gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection(parent_scope)
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
assert head.logits_dimension == 1, ("SDCA only applies for "
"logits_dimension=1.")
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
super(LinearClassifier, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearRegressor(estimator.Estimator):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearEstimator(estimator.Estimator):
"""Linear model with user specified head.
Train a generalized linear model to predict label value given observation of
feature values.
Example:
To do poisson regression,
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearEstimator(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
head=head_lib.poisson_regression_head())
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesDateTimeTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf1(self):
date1 = pd.Series(pd.date_range("2012-1-1 12:45:31", periods=3, freq="M"))
date2 = pd.Series(pd.date_range("2013-3-11 21:45:00", periods=3, freq="W"))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
@property
def pd_start_date(self):
return self.pdf1["start_date"]
@property
def ks_start_date(self):
return ps.from_pandas(self.pd_start_date)
def check_func(self, func):
self.assert_eq(func(self.ks_start_date), func(self.pd_start_date))
def test_timestamp_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
# Those fail in certain OSs presumably due to different
# timezone behaviours inherited from C library.
actual = (psdf["end_date"] - psdf["start_date"] - 1).to_pandas()
expected = (pdf["end_date"] - pdf["start_date"]) // np.timedelta64(1, "s") - 1
# self.assert_eq(actual, expected)
actual = (psdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31") - 1).to_pandas()
expected = (pdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31")) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
actual = (pd.Timestamp("2013-3-11 21:45:00") - psdf["start_date"] - 1).to_pandas()
expected = (pd.Timestamp("2013-3-11 21:45:00") - pdf["start_date"]) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "datetime subtraction can only be applied to datetime series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"]
def test_arithmetic_op_exceptions(self):
psser = self.ks_start_date
py_datetime = self.pd_start_date.dt.to_pydatetime()
datetime_index = ps.Index(self.pd_start_date)
for other in [1, 0.1, psser, datetime_index, py_datetime]:
expected_err_msg = "Addition can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser + other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other + psser)
expected_err_msg = "Multiplication can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser * other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other * psser)
expected_err_msg = "True division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser / other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other / psser)
expected_err_msg = "Floor division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser // other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other // psser)
expected_err_msg = "Modulo can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser % other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other % psser)
expected_err_msg = "datetime subtraction can only be applied to datetime series."
for other in [1, 0.1]:
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other - psser)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaises(NotImplementedError, lambda: py_datetime - psser)
def test_date_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf["end_date"].dt.date - psdf["start_date"].dt.date,
(pdf["end_date"].dt.date - pdf["start_date"].dt.date).dt.days,
)
self.assert_eq(
psdf["end_date"].dt.date - datetime.date(2012, 1, 1),
(pdf["end_date"].dt.date - datetime.date(2012, 1, 1)).dt.days,
)
self.assert_eq(
datetime.date(2013, 3, 11) - psdf["start_date"].dt.date,
(datetime.date(2013, 3, 11) - pdf["start_date"].dt.date).dt.days,
)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "date subtraction can only be applied to date series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"].dt.date
@unittest.skip(
"It fails in certain OSs presumably due to different "
"timezone behaviours inherited from C library."
)
def test_div(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
for u in "D", "s", "ms":
duration = np.timedelta64(1, u)
self.assert_eq(
(psdf["end_date"] - psdf["start_date"]) / duration,
(pdf["end_date"] - pdf["start_date"]) / duration,
)
@unittest.skip("It is currently failed probably for the same reason in 'test_subtraction'")
def test_date(self):
self.check_func(lambda x: x.dt.date)
def test_time(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.time)
def test_timetz(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.timetz)
def test_year(self):
self.check_func(lambda x: x.dt.year)
def test_month(self):
self.check_func(lambda x: x.dt.month)
def test_day(self):
self.check_func(lambda x: x.dt.day)
def test_hour(self):
self.check_func(lambda x: x.dt.hour)
def test_minute(self):
self.check_func(lambda x: x.dt.minute)
def test_second(self):
self.check_func(lambda x: x.dt.second)
def test_microsecond(self):
self.check_func(lambda x: x.dt.microsecond)
def test_nanosecond(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.nanosecond)
def test_week(self):
self.check_func(lambda x: x.dt.week)
def test_weekofyear(self):
self.check_func(lambda x: x.dt.weekofyear)
def test_dayofweek(self):
self.check_func(lambda x: x.dt.dayofweek)
def test_weekday(self):
self.check_func(lambda x: x.dt.weekday)
def test_dayofyear(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_quarter(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_is_month_start(self):
self.check_func(lambda x: x.dt.is_month_start)
def test_is_month_end(self):
self.check_func(lambda x: x.dt.is_month_end)
def test_is_quarter_start(self):
self.check_func(lambda x: x.dt.is_quarter_start)
def test_is_quarter_end(self):
self.check_func(lambda x: x.dt.is_quarter_end)
def test_is_year_start(self):
self.check_func(lambda x: x.dt.is_year_start)
def test_is_year_end(self):
self.check_func(lambda x: x.dt.is_year_end)
def test_is_leap_year(self):
self.check_func(lambda x: x.dt.is_leap_year)
def test_daysinmonth(self):
self.check_func(lambda x: x.dt.daysinmonth)
def test_days_in_month(self):
self.check_func(lambda x: x.dt.days_in_month)
@unittest.expectedFailure
def test_tz_localize(self):
self.check_func(lambda x: x.dt.tz_localize("America/New_York"))
@unittest.expectedFailure
def test_tz_convert(self):
self.check_func(lambda x: x.dt.tz_convert("America/New_York"))
def test_normalize(self):
self.check_func(lambda x: x.dt.normalize())
def test_strftime(self):
self.check_func(lambda x: x.dt.strftime("%Y-%m-%d"))
def test_round(self):
self.check_func(lambda x: x.dt.round(freq="min"))
self.check_func(lambda x: x.dt.round(freq="H"))
def test_floor(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
def test_ceil(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
@unittest.skip("Unsupported locale setting")
def test_month_name(self):
self.check_func(lambda x: x.dt.month_name())
self.check_func(lambda x: x.dt.month_name(locale="en_US.UTF-8"))
@unittest.skip("Unsupported locale setting")
def test_day_name(self):
self.check_func(lambda x: x.dt.day_name())
self.check_func(lambda x: x.dt.day_name(locale="en_US.UTF-8"))
def test_unsupported_type(self):
self.assertRaisesRegex(
ValueError, "Cannot call DatetimeMethods on type LongType", lambda: ps.Series([0]).dt
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_datetime import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import summary
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
# TODO(touts): Share that with the Supervisor.
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.Saver` object taking care of saving the variables. Picked
from and stored into the `SAVERS` collection in the graph.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph.
* `global_step`: A tensor containing the global step counter. Picked
from and stored into the `GLOBAL_STEP` collection in the graph.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A sessionn feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
local_init_op=None,
summary_op=None,
saver=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty scalar string tensor when the variables are
initialized, or a non-empty one listing the names of the
non-initialized variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.Saver` object to use to save and restore variables.
"""
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._ready_op = ready_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
self._init_feed_dict = init_feed_dict
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat(
0,
[variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()])
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = Scaffold.get_or_default(
'saver',
ops.GraphKeys.SAVERS,
lambda: training_saver.Saver(sharded=True, allow_empty=True,
write_version=saver_pb2.SaverDef.V2))
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError('More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key,
arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def _default_local_init_op():
return control_flow_ops.group(variables.local_variables_initializer(),
data_flow_ops.initialize_all_tables())
def MonitoredTrainingSession(master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
inialize/restore.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: an instance of `tf.ConfigProto` proto used to configure the session.
It's the `config` argument of constructor of `tf.Session`.
Returns:
A `MonitoredSession` object.
"""
hooks = hooks or []
scaffold = scaffold or Scaffold()
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold, master=master, config=config)
return MonitoredSession(session_creator=session_creator, hooks=hooks)
if chief_only_hooks:
hooks.extend(chief_only_hooks)
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
if checkpoint_dir:
hooks.append(
basic_session_run_hooks.StepCounterHook(output_dir=checkpoint_dir))
if save_summaries_steps > 0:
hooks.append(basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
output_dir=checkpoint_dir))
if save_checkpoint_secs > 0:
hooks.append(basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir, save_secs=save_checkpoint_secs, scaffold=scaffold))
return MonitoredSession(session_creator=session_creator, hooks=hooks)
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.Session for a chief."""
def __init__(self, scaffold=None, master='', config=None,
checkpoint_dir=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
"""
self._checkpoint_dir = checkpoint_dir
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.Session for a worker."""
def __init__(self, scaffold=None, master='', config=None):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config)
class MonitoredSession(object):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummaryHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()`
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` occurs, it recovers or reinitializes the session before
executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* surpresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context.
How to set `tf.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
"""
def __init__(self, session_creator=None, hooks=None):
"""Creates a MonitoredSession.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
"""
self._graph_was_finalized = ops.get_default_graph().finalized
self._hooks = hooks or []
for h in self._hooks:
h.begin()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks)
self._sess = _RecoverableSession(self._coordinated_creator)
@property
def graph(self):
"""The graph that was launched in this session."""
if self._coordinated_creator.tf_sess is None:
return None
return self._coordinated_creator.tf_sess.graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def should_stop(self):
if self._sess:
return self._sess.should_stop()
return True
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(object):
"""Factory for the _RecoverableSession."""
def __init__(self, session_creator, hooks):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
if not self._graph_was_finalized:
ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access
def _is_closed(self):
"""Return True if the supervised session is closed. For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
class _WrappedSession(object):
"""Wrapper around a `tf.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.Session` or `_WrappedSession` object. The wrapped session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session on `tf.errors.AbortedError`.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError`, the wrapped session is closed, and a new
one is created by calling the factory again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._sess_creator.create_session())
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._sess_creator.create_session()
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except errors.AbortedError:
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
def _check_stop(self):
# Check with the coordinator if we should stop.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join()
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
options = options or config_pb2.RunOptions()
feed_dict = self._call_hook_before_run(run_context, actual_fetches,
feed_dict, options)
# Do session run.
run_metadata = run_metadata or config_pb2.RunMetadata()
outputs = _WrappedSession.run(self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(
results=outputs[hook] if hook in outputs else None,
options=options,
run_metadata=run_metadata))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,
options):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(
hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if request.options:
self._merge_run_options(options, request.options)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
def _merge_run_options(self, options, incoming_options):
"""Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
"""
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms,
incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool,
incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(
options.output_partition_graphs,
incoming_options.output_partition_graphs)
options.debug_tensor_watch_opts.extend(
incoming_options.debug_tensor_watch_opts)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/evn python
# =================================================================
# system_test_runner.py
#
# - This script is the test driver for a distributed environment
# system testing framework. It is located at the top level of the
# framework hierachy (in this case - system_test/).
#
# - This test driver servers as an entry point to launch a series
# of test suites (module) with multiple functionally similar test
# cases which can be grouped together.
#
# - Please refer to system_test/README.txt for more details on
# how to add test suite and test case.
#
# - In most cases, it is not necessary to make any changes to this
# script.
# =================================================================
from optparse import OptionParser
from system_test_env import SystemTestEnv
from utils import system_test_utils
import logging.config
import os
import pprint
import sys
# load the config file for logging
logging.config.fileConfig('logging.conf')
# 'd' is an argument to be merged into the log message (see Python doc for logging).
# In this case, corresponding class name can be appended to the end of the logging
# message to facilitate debugging.
d = {'name_of_class': '(system_test_runner)'}
class report:
systemTestEnv = None
reportString = ""
reportFileName = "system_test_report.html"
systemTestReport = None
header = """<head>
<title>Kafka System Test Report</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.2/js/bootstrap.min.js"></script>
</head>"""
footer = """ """
def __init__(self, systemTestEnv):
self.totalSkipped = 0
self.totalPassed = 0
self.totalTests = 0
self.totalFailed = 0
self.systemTestEnv = systemTestEnv
self.systemTestReport = open(self.reportFileName, 'w')
def __del__(self):
self.systemTestReport.close()
self.systemTestReport = None
def writeHtmlPage(self, body):
html = """
<!DOCTYPE html>
<html lang="en">
"""
html += self.header
html += body
html += self.footer
html += """
</html>
"""
self.systemTestReport.write(html)
def wrapIn(self, tag, content):
html = "\n<" + tag + ">"
html += "\n " + content
html += "\n</" + tag.split(" ")[0] + ">"
return html
def genModal(self, className, caseName, systemTestResult):
key = "validation_status"
id = className + "_" + caseName
info = self.wrapIn("h4", "Validation Status")
for validatedItem in sorted(systemTestResult[key].iterkeys()):
testItemStatus = systemTestResult[key][validatedItem]
info += validatedItem + " : " + testItemStatus
return self.wrapIn("div class=\"modal fade\" id=\"" + id + "\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"" + id + "Label\" aria-hidden=\"true\"",
self.wrapIn("div class=\"modal-dialog\"",
self.wrapIn("div class=\"modal-content\"",
self.wrapIn("div class=\"modal-header\"",
self.wrapIn("h4 class=\"modal-title\" id=\"" + id + "Label\"",
className + " - " + caseName)) +
self.wrapIn("div class=\"modal-body\"",
info) +
self.wrapIn("div class=\"modal-footer\"",
self.wrapIn("button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\"", "Close")))))
def summarize(self):
testItemsTableHeader = self.wrapIn("thead",
self.wrapIn("tr",
self.wrapIn("th", "Test Class Name") +
self.wrapIn("th", "Test Case Name") +
self.wrapIn("th", "Validation Status")))
testItemsTableBody = ""
modals = ""
for systemTestResult in self.systemTestEnv.systemTestResultsList:
self.totalTests += 1
if "_test_class_name" in systemTestResult:
testClassName = systemTestResult["_test_class_name"]
else:
testClassName = ""
if "_test_case_name" in systemTestResult:
testCaseName = systemTestResult["_test_case_name"]
else:
testCaseName = ""
if "validation_status" in systemTestResult:
testItemStatus = "SKIPPED"
for key in systemTestResult["validation_status"].iterkeys():
testItemStatus = systemTestResult["validation_status"][key]
if "FAILED" == testItemStatus:
break;
if "FAILED" == testItemStatus:
self.totalFailed += 1
validationStatus = self.wrapIn("div class=\"text-danger\" data-toggle=\"modal\" data-target=\"#" + testClassName + "_" + testCaseName + "\"", "FAILED")
modals += self.genModal(testClassName, testCaseName, systemTestResult)
elif "PASSED" == testItemStatus:
self.totalPassed += 1
validationStatus = self.wrapIn("div class=\"text-success\"", "PASSED")
else:
self.totalSkipped += 1
validationStatus = self.wrapIn("div class=\"text-warning\"", "SKIPPED")
else:
self.reportString += "|"
testItemsTableBody += self.wrapIn("tr",
self.wrapIn("td", testClassName) +
self.wrapIn("td", testCaseName) +
self.wrapIn("td", validationStatus))
testItemsTableBody = self.wrapIn("tbody", testItemsTableBody)
testItemsTable = self.wrapIn("table class=\"table table-striped\"", testItemsTableHeader + testItemsTableBody)
statsTblBody = self.wrapIn("tr class=\"active\"", self.wrapIn("td", "Total tests") + self.wrapIn("td", str(self.totalTests)))
statsTblBody += self.wrapIn("tr class=\"success\"", self.wrapIn("td", "Total tests passed") + self.wrapIn("td", str(self.totalPassed)))
statsTblBody += self.wrapIn("tr class=\"danger\"", self.wrapIn("td", "Total tests failed") + self.wrapIn("td", str(self.totalFailed)))
statsTblBody += self.wrapIn("tr class=\"warning\"", self.wrapIn("td", "Total tests skipped") + self.wrapIn("td", str(self.totalSkipped)))
testStatsTable = self.wrapIn("table class=\"table\"", statsTblBody)
body = self.wrapIn("div class=\"container\"",
self.wrapIn("h2", "Kafka System Test Report") +
self.wrapIn("div class=\"row\"", self.wrapIn("div class=\"col-md-4\"", testStatsTable)) +
self.wrapIn("div class=\"row\"", self.wrapIn("div class=\"col-md-6\"", testItemsTable)) +
modals)
self.writeHtmlPage(self.wrapIn("body", body))
def main():
nLogger = logging.getLogger('namedLogger')
aLogger = logging.getLogger('anonymousLogger')
optionParser = OptionParser()
optionParser.add_option("-p", "--print-test-descriptions-only",
dest="printTestDescriptionsOnly",
default=False,
action="store_true",
help="print test descriptions only - don't run the test")
optionParser.add_option("-n", "--do-not-validate-remote-host",
dest="doNotValidateRemoteHost",
default=False,
action="store_true",
help="do not validate remote host (due to different kafka versions are installed)")
(options, args) = optionParser.parse_args()
print "\n"
aLogger.info("=================================================")
aLogger.info(" System Regression Test Framework")
aLogger.info("=================================================")
print "\n"
testSuiteClassDictList = []
# SystemTestEnv is a class to provide all environement settings for this session
# such as the SYSTEM_TEST_BASE_DIR, SYSTEM_TEST_UTIL_DIR, ...
systemTestEnv = SystemTestEnv()
if options.printTestDescriptionsOnly:
systemTestEnv.printTestDescriptionsOnly = True
if options.doNotValidateRemoteHost:
systemTestEnv.doNotValidateRemoteHost = True
if not systemTestEnv.printTestDescriptionsOnly:
if not systemTestEnv.doNotValidateRemoteHost:
if not system_test_utils.setup_remote_hosts(systemTestEnv):
nLogger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
else:
nLogger.info("SKIPPING : checking remote machines", extra=d)
print
# get all defined names within a module:
definedItemList = dir(SystemTestEnv)
aLogger.debug("=================================================")
aLogger.debug("SystemTestEnv keys:")
for item in definedItemList:
aLogger.debug(" " + item)
aLogger.debug("=================================================")
aLogger.info("=================================================")
aLogger.info("looking up test suites ...")
aLogger.info("=================================================")
# find all test suites in SYSTEM_TEST_BASE_DIR
for dirName in os.listdir(systemTestEnv.SYSTEM_TEST_BASE_DIR):
# make sure this is a valid testsuite directory
if os.path.isdir(dirName) and dirName.endswith(systemTestEnv.SYSTEM_TEST_SUITE_SUFFIX):
print
nLogger.info("found a testsuite : " + dirName, extra=d)
testModulePathName = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + dirName)
if not systemTestEnv.printTestDescriptionsOnly:
system_test_utils.setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName)
# go through all test modules file in this testsuite
for moduleFileName in os.listdir(testModulePathName):
# make sure it is a valid test module
if moduleFileName.endswith(systemTestEnv.SYSTEM_TEST_MODULE_EXT) \
and not moduleFileName.startswith("__"):
# found a test module file
nLogger.info("found a test module file : " + moduleFileName, extra=d)
testModuleClassName = system_test_utils.sys_call("grep ^class " + testModulePathName + "/" + \
moduleFileName + " | sed 's/^class //g' | sed 's/(.*):.*//g'")
testModuleClassName = testModuleClassName.rstrip('\n')
# collect the test suite class data
testSuiteClassDict = {}
testSuiteClassDict["suite"] = dirName
extLenToRemove = systemTestEnv.SYSTEM_TEST_MODULE_EXT.__len__() * -1
testSuiteClassDict["module"] = moduleFileName[:extLenToRemove]
testSuiteClassDict["class"] = testModuleClassName
testSuiteClassDictList.append(testSuiteClassDict)
suiteName = testSuiteClassDict["suite"]
moduleName = testSuiteClassDict["module"]
className = testSuiteClassDict["class"]
# add testsuite directory to sys.path such that the module can be loaded
sys.path.append(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/" + suiteName)
if not systemTestEnv.printTestDescriptionsOnly:
aLogger.info("=================================================")
aLogger.info("Running Test for : ")
aLogger.info(" suite : " + suiteName)
aLogger.info(" module : " + moduleName)
aLogger.info(" class : " + className)
aLogger.info("=================================================")
# dynamically loading a module and starting the test class
mod = __import__(moduleName)
theClass = getattr(mod, className)
instance = theClass(systemTestEnv)
instance.runTest()
print
report(systemTestEnv).summarize()
if not systemTestEnv.printTestDescriptionsOnly:
totalFailureCount = 0
print
print "========================================================"
print " TEST REPORTS"
print "========================================================"
for systemTestResult in systemTestEnv.systemTestResultsList:
for key in sorted(systemTestResult.iterkeys()):
if key == "validation_status":
print key, " : "
testItemStatus = None
for validatedItem in sorted(systemTestResult[key].iterkeys()):
testItemStatus = systemTestResult[key][validatedItem]
print " ", validatedItem, " : ", testItemStatus
if "FAILED" == testItemStatus:
totalFailureCount += 1
else:
print key, " : ", systemTestResult[key]
print
print "========================================================"
print
print "========================================================"
print "Total failures count : " + str(totalFailureCount)
print "========================================================"
print
return totalFailureCount
return -1
# =========================
# main entry point
# =========================
sys.exit(main())
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Distribution Strategy.
This is experimental. It's not ready for general use.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import one_device_strategy
from tensorflow.contrib.distribute.python import values
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import device_util
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_def = (tpu_cluster_resolver.cluster_spec()
or server_lib.ClusterSpec({})).as_cluster_def()
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=True))
return tpu_system_metadata
class TPUStrategy(one_device_strategy.OneDeviceStrategy):
"""Experimental TPU distribution strategy implementation."""
def __init__(self, tpu_cluster_resolver, steps_per_run):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.contrib.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
"""
# TODO(isaprykin): Generalize the defaults. They are currently tailored for
# the unit test.
super(TPUStrategy, self).__init__('/device:CPU:0')
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
# TODO(priyag): This should not be hardcoded here.
self._host = '/device:CPU:0'
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
def distribute_dataset(self, dataset_fn):
# TODO(priyag): Perhaps distribute across cores here.
return self._call_dataset_fn(dataset_fn)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values=None):
shapes = nest.flatten(iterator.output_shapes)
if any([not s.is_fully_defined() for s in shapes]):
raise ValueError(
'TPU currently requires fully defined shapes. Either use '
'set_shape() on the input tensors or use '
'dataset.apply(map_and_batch(..., drop_remainder=True)).')
types = nest.flatten(iterator.output_types)
def enqueue_ops_fn():
"""Enqueue ops for one iteration."""
control_deps = []
sharded_inputs = []
with ops.device(self._host):
for _ in range(self.num_towers):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
inputs = nest.flatten(iterator.get_next())
control_deps.extend(inputs)
sharded_inputs.append(inputs)
enqueue_ops = []
for core_id, shard_input in enumerate(sharded_inputs):
enqueue_ops.append(
tpu_ops.infeed_enqueue_tuple(
inputs=shard_input, shapes=shapes, device_ordinal=core_id))
return enqueue_ops
def enqueue_ops_loop_body(i):
with ops.control_dependencies(enqueue_ops_fn()):
return i + 1
with ops.device(self._host):
enqueue_ops = control_flow_ops.while_loop(
lambda i: i < iterations,
enqueue_ops_loop_body,
[constant_op.constant(0)],
parallel_iterations=1)
def dequeue_fn():
dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
return nest.pack_sequence_as(iterator.output_shapes, dequeued)
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def run_fn(*args, **kwargs):
del args, kwargs
fn_inputs = dequeue_fn()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, *fn_inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# TODO(sourabhbajaj): The input to while loop should be based on the output
# type of the step_fn
def iterate_on_tpu():
return training_loop.repeat(iterations, run_fn, initial_loop_values)
replicate_inputs = [[]] * self.num_towers
replicate_outputs = tpu.replicate(iterate_on_tpu, replicate_inputs)
ctx.run_op = control_flow_ops.group(replicate_outputs, enqueue_ops)
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [x for x in replicate_outputs
if not isinstance(x, ops.Operation)]
# Outputs are currently of the structure (grouped by device)
# [[output0_device0, output1_device0, output2_device0],
# [output0_device1, output1_device1, output2_device1]]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
last_step_tensor_outputs = [list(x) for x in zip(*last_step_tensor_outputs)]
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been aggregated, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
if aggregation is not variables_lib.VariableAggregation.NONE:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _call_for_each_tower(self, fn, *args, **kwargs):
kwargs.pop('run_concurrently', None)
with one_device_strategy._OneDeviceTowerContext(self): # pylint: disable=protected-access
return fn(*args, **kwargs)
def initialize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.initialize_system()]
def finalize(self):
if context.executing_eagerly():
# TODO(priyag): Add appopriate call here when eager is supported for TPUs.
raise NotImplementedError('Eager mode not supported in TPUStrategy.')
else:
return [tpu.shutdown_system()]
def _reduce(self, aggregation, value, destinations):
graph = ops.get_default_graph()
cf_context = graph._get_control_flow_context() # pylint: disable=protected-access
# If we're inside the ReplicateContext, reduction should be done using
# CrossReplicaSum while outside we can directly use an add_n op.
while cf_context:
if isinstance(cf_context, tpu.TPUReplicateContext):
if aggregation == vs.VariableAggregation.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self.num_towers)
return tpu_ops.cross_replica_sum(value)
cf_context = cf_context.outer_context
# Validate that the destination is same as the host device
# Note we don't do this when in replicate context as the reduction is
# performed on the TPU device itself.
devices = cross_tower_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
assert device_util.canonicalize(devices[0]) == device_util.canonicalize(
self._host)
else:
raise ValueError('Multiple devices are not supported for TPUStrategy')
output = math_ops.add_n(value)
if aggregation == vs.VariableAggregation.MEAN:
return output * (1. / len(value))
return output
def _unwrap(self, value):
if isinstance(value, list):
return value
return [value]
@property
def num_towers(self):
return self._tpu_metadata.num_of_cores_per_host
| |
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Tests for the IBM FlashSystem iSCSI volume driver.
"""
import mock
import six
import random
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import test_ibm_flashsystem as fscommon
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import flashsystem_iscsi
from cinder.volume import volume_types
class FlashSystemManagementSimulator(fscommon.FlashSystemManagementSimulator):
def __init__(self):
# Default protocol is iSCSI
self._protocol = 'iSCSI'
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._next_cmd_error = {
'lsnode': '',
'lssystem': '',
'lsmdiskgrp': ''
}
self._errors = {
# CMMVC50000 is a fake error which indicates that command has not
# got expected results. This error represents kinds of CLI errors.
'CMMVC50000': ('', 'CMMVC50000 The command can not be executed '
'successfully.')
}
class FlashSystemFakeISCSIDriver(flashsystem_iscsi.FlashSystemISCSIDriver):
def __init__(self, *args, **kwargs):
super(FlashSystemFakeISCSIDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _ssh(self, cmd, check_exit_code=True):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FlashSystemISCSIDriverTestCase(test.TestCase):
def _set_flag(self, flag, value):
group = self.driver.configuration.config_group
self.driver.configuration.set_override(flag, value, group)
def _reset_flags(self):
self.driver.configuration.local_conf.reset()
for k, v in self._def_flags.items():
self._set_flag(k, v)
def _generate_vol_info(self,
vol_name,
vol_size=10,
vol_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
if not vol_name:
vol_name = 'test_volume%s' % rand_id
return {'name': vol_name,
'size': vol_size,
'id': '%s' % rand_id,
'volume_type_id': None,
'status': vol_status,
'mdisk_grp_name': 'mdiskgrp0'}
def _generate_snap_info(self,
vol_name,
vol_id,
vol_size,
vol_status,
snap_status='available'):
rand_id = six.text_type(random.randint(10000, 99999))
return {'name': 'test_snap_%s' % rand_id,
'id': rand_id,
'volume': {'name': vol_name,
'id': vol_id,
'size': vol_size,
'status': vol_status},
'volume_size': vol_size,
'status': snap_status,
'mdisk_grp_name': 'mdiskgrp0'}
def setUp(self):
super(FlashSystemISCSIDriverTestCase, self).setUp()
self._def_flags = {'san_ip': 'hostname',
'san_login': 'username',
'san_password': 'password',
'flashsystem_connection_protocol': 'iSCSI',
'flashsystem_multihostmap_enabled': True,
'iscsi_ip_address': '192.168.1.10',
'flashsystem_iscsi_portid': 1}
self.connector = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
self.sim = FlashSystemManagementSimulator()
self.driver = FlashSystemFakeISCSIDriver(
configuration=conf.Configuration(None))
self.driver.set_fake_storage(self.sim)
self._reset_flags()
self.ctxt = context.get_admin_context()
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
self.sleeppatch.start()
def tearDown(self):
self.sleeppatch.stop()
super(FlashSystemISCSIDriverTestCase, self).tearDown()
def test_flashsystem_do_setup(self):
# case 1: set as iSCSI
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
self.assertEqual('iSCSI', self.driver._protocol)
# clear environment
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_validate_connector(self):
conn_neither = {'host': 'host'}
conn_iscsi = {'host': 'host', 'initiator': 'foo'}
conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'}
protocol = self.driver._protocol
# case 1: when protocol is iSCSI
self.driver._protocol = 'iSCSI'
self.driver.validate_connector(conn_iscsi)
self.driver.validate_connector(conn_both)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, conn_neither)
# clear environment
self.driver._protocol = protocol
def test_flashsystem_connection(self):
# case 1: initialize_connection/terminate_connection with iSCSI
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.driver.initialize_connection(vol1, self.connector)
self.driver.terminate_connection(vol1, self.connector)
# clear environment
self.driver.delete_volume(vol1)
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_create_host(self):
# case 1: create host with iqn
self.sim.set_protocol('iSCSI')
self._set_flag('flashsystem_connection_protocol', 'iSCSI')
self.driver.do_setup(None)
conn = {
'host': 'flashsystem',
'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
'wwpns': ['abcd000000000001', 'abcd000000000002'],
'initiator': 'iqn.123456'}
host = self.driver._create_host(conn)
# case 2: delete host
self.driver._delete_host(host)
# clear environment
self.sim.set_protocol('iSCSI')
self._reset_flags()
def test_flashsystem_get_vdisk_params(self):
# case 1: use default params
self.driver._get_vdisk_params(None)
# case 2: use extra params from type
opts1 = {'storage_protocol': 'iSCSI'}
opts2 = {'capabilities:storage_protocol': 'iSCSI'}
opts3 = {'storage_protocol': 'FC'}
type1 = volume_types.create(self.ctxt, 'opts1', opts1)
type2 = volume_types.create(self.ctxt, 'opts2', opts2)
type3 = volume_types.create(self.ctxt, 'opts3', opts3)
self.assertEqual(
'iSCSI',
self.driver._get_vdisk_params(type1['id'])['protocol'])
self.assertEqual(
'iSCSI',
self.driver._get_vdisk_params(type2['id'])['protocol'])
self.assertRaises(exception.InvalidInput,
self.driver._get_vdisk_params,
type3['id'])
# clear environment
volume_types.destroy(self.ctxt, type1['id'])
volume_types.destroy(self.ctxt, type2['id'])
volume_types.destroy(self.ctxt, type3['id'])
def test_flashsystem_map_vdisk_to_host(self):
# case 1: no host found
vol1 = self._generate_vol_info(None)
self.driver.create_volume(vol1)
self.assertEqual(
# lun id shoud begin with 1
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# case 2: host already exists
vol2 = self._generate_vol_info(None)
self.driver.create_volume(vol2)
self.assertEqual(
# lun id shoud be sequential
2,
self.driver._map_vdisk_to_host(vol2['name'], self.connector))
# case 3: test if already mapped
self.assertEqual(
1,
self.driver._map_vdisk_to_host(vol1['name'], self.connector))
# clean environment
self.driver._unmap_vdisk_from_host(vol1['name'], self.connector)
self.driver._unmap_vdisk_from_host(vol2['name'], self.connector)
self.driver.delete_volume(vol1)
self.driver.delete_volume(vol2)
# case 4: If there is no vdisk mapped to host, host should be removed
self.assertIsNone(self.driver._get_host_from_connector(self.connector))
| |
# -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2014 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import os
import json
import ecdsa
import hashlib
import binascii
from .entropy import random_secret_exponent
from .passphrase import random_256bit_passphrase, random_160bit_passphrase
from .b58check import b58check_encode, b58check_decode, b58check_unpack, \
b58check_version_byte
from .utils import is_secret_exponent, is_256bit_hex_string, \
is_wif_pk, is_b58check_address, extract_pk_as_int
from characters.hex import is_hex
def bin_hash160(s):
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
_error_messages = {
"CURVE_ORDER_EXCEEDED": ("Invalid passphrase. The SHA256 hash of this "
"passphrase exceeds the curve order. Please try another passphrase."),
"MUST_BE_VALID_PRIVKEY_FORMAT": ("Format must be bin, hex, wif, or "
"b58check."),
"MUST_BE_VALID_PUBKEY_FORMAT": ("Format must be bin or hex"),
"MUST_BE_VALID_HASH160_FORMAT": ("format must be bin, hex or "
"b58check."),
"NOT_A_BRAIN_WALLET": ("No passphrase! This isn't a brain wallet address!")
}
class BitcoinKeypair():
_curve = ecdsa.curves.SECP256k1
_hash_function = hashlib.sha256
_pubkeyhash_version_byte = 0
@classmethod
def version_byte(cls, type='pubkey_hash'):
if type == 'pubkey_hash':
return cls._pubkeyhash_version_byte
elif type == 'private_key':
return (cls._pubkeyhash_version_byte + 128) % 256
else:
raise Exception("type must be 'pubkey_hash' or 'privatekey'")
def __init__(self, private_key=None):
""" Takes in a private key/secret exponent.
"""
if private_key:
secret_exponent = extract_pk_as_int(private_key, self._curve.order)
else:
secret_exponent = random_secret_exponent(self._curve.order)
self._ecdsa_private_key = ecdsa.keys.SigningKey.from_secret_exponent(
secret_exponent, self._curve, self._hash_function
)
self._ecdsa_public_key = self._ecdsa_private_key.get_verifying_key()
@classmethod
def from_private_key(cls, private_key=None):
return cls(private_key)
@classmethod
def from_passphrase(cls, passphrase=None):
""" Create keypair from a passphrase input (a brain wallet keypair)."""
if not passphrase:
# run a rejection sampling algorithm to ensure the private key is
# less than the curve order
while True:
passphrase = random_160bit_passphrase()
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if int(hex_private_key, 16) < cls._curve.order:
break
else:
hex_private_key = hashlib.sha256(passphrase).hexdigest()
if not (int(hex_private_key, 16) < cls._curve.order):
raise Exception(_error_messages["CURVE_ORDER_EXCEEDED"])
keypair = cls(hex_private_key)
keypair._passphrase = passphrase
return keypair
def _bin_private_key(self):
return self._ecdsa_private_key.to_string()
def _bin_public_key(self):
return '\x04' + self._ecdsa_public_key.to_string()
def _bin_hash160(self):
return bin_hash160(self._bin_public_key())
def private_key(self, format='hex'):
if format == 'bin':
return self._bin_private_key()
elif format == 'hex':
return binascii.hexlify(self._bin_private_key())
elif format == 'wif' or format == 'b58check':
return b58check_encode(self._bin_private_key(),
version_byte=self.version_byte('private_key'))
else:
raise Exception(_error_messages["MUST_BE_VALID_PRIVKEY_FORMAT"])
def public_key(self, format='hex'):
if format == 'bin':
return self._bin_public_key()
elif format == 'hex':
return binascii.hexlify(self._bin_public_key())
else:
raise Exception(_error_messages["MUST_BE_VALID_PUBKEY_FORMAT"])
def hash160(self, format='hex'):
if format == 'bin':
return self._bin_hash160()
elif format == 'hex':
return binascii.hexlify(self._bin_hash160())
elif format == 'b58check':
return b58check_encode(self._bin_hash160(),
version_byte=self.version_byte('pubkey_hash'))
else:
raise Exception(_error_messages["MUST_BE_VALID_HASH160_FORMAT"])
def secret_exponent(self):
""" The secret exponent is the private key in int or hex format. """
return self.private_key('hex')
def wif_pk(self):
""" The "wif pk" is the private key in wallet import format. """
return self.private_key('wif')
def address(self):
""" The address is the hash160 in b58check format. """
return self.hash160('b58check')
""" Brain wallet address methods """
def passphrase(self):
if hasattr(self, '_passphrase'):
return self._passphrase
else:
raise Exception(_error_messages["NOT_A_BRAIN_WALLET"])
class LitecoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 48
class NamecoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 52
class PeercoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 55
class PrimecoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 23
class DogecoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 30
class WorldcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 73
class FeathercoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 14
class TerracoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 0
class NovacoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 8
class IxcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 138
class TestnetKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 111
class ProtosharesKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 56
class MemorycoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 50
class QuarkcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 58
class InfinitecoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 102
class CryptogenicbullionKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 11
class AnoncoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 23
class MegacoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 50
class EarthcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 93
class NetcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 112
class HuntercoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 40
class VertcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 71
class ReddcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 61
class GwangcoinKeypair(BitcoinKeypair):
_pubkeyhash_version_byte = 125
# TO DO:
# auroracoin
# counterparty
# darkcoin
# ybcoin
# maxcoin
# mintcoin
# devcoin
# tickets
# freicoin
# zetacoin
# digitalcoin
# copperlark
# applecoin
# unobtanium
# fedoracoin
# cachecoin
# mincoin
# ultracoin
# colossuscoin
# blackcoin
# securecoin
# gridcoin
# billioncoin
# kittehcoin
# karmacoin
# mooncoin
# sexcoin
| |
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| |
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import time
import argparse
import ConfigParser
from vnc_api.vnc_api import *
from cfgm_common.exceptions import *
from vnc_admin_api import VncApiAdmin
class VrouterProvisioner(object):
def __init__(self, args_str=None):
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self._parse_args(args_str)
connected = False
tries = 0
while not connected:
try:
self._vnc_lib = VncApiAdmin(
self._args.use_admin_api,
self._args.admin_user, self._args.admin_password,
self._args.admin_tenant_name,
self._args.api_server_ip,
self._args.api_server_port, '/',
auth_host=self._args.openstack_ip,
api_server_use_ssl=self._args.api_server_use_ssl)
connected = True
except ResourceExhaustionError: # haproxy throws 503
if tries < 10:
tries += 1
time.sleep(3)
else:
raise
gsc_obj = self._vnc_lib.global_system_config_read(
fq_name=['default-global-system-config'])
self._global_system_config_obj = gsc_obj
rt_inst_obj = self._vnc_lib.routing_instance_read(
fq_name=['default-domain', 'default-project',
'ip-fabric', '__default__'])
self._fab_rt_inst_obj = rt_inst_obj
if self._args.oper == 'add':
self.add_vrouter()
if not self._args.disable_vhost_vmi:
self.add_vhost0_vmi()
elif self._args.oper == 'del':
self.del_vhost0_vmi()
self.del_vrouter()
else:
print "Unknown operation %s. Only 'add' and 'del' supported"\
% (self._args.oper)
# end __init__
def _parse_args(self, args_str):
'''
Eg. python provision_vrouter.py --host_name a3s30.contrail.juniper.net
--host_ip 10.1.1.1
--api_server_ip 127.0.0.1
--api_server_port 8082
--api_server_use_ssl False
--oper <add | del>
[--dpdk-enabled]
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'oper': 'add',
'control_names': [],
'router_type': None,
'dpdk_enabled': False,
'disable_vhost_vmi': False,
'enable_vhost_vmi_policy': False,
'sub_cluster_name': None,
}
ksopts = {
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(ksopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--host_name", help="hostname name of compute-node", required=True)
parser.add_argument("--host_ip", help="IP address of compute-node", required=True)
parser.add_argument(
"--control_names",
help="List of control-node names compute node connects to")
parser.add_argument("--api_server_port", help="Port of api server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument(
"--oper", default='add',
help="Provision operation to be done(add or del)")
parser.add_argument(
"--admin_user", help="Name of keystone admin user")
parser.add_argument(
"--admin_password", help="Password of keystone admin user")
parser.add_argument(
"--admin_tenant_name", help="Tenamt name for keystone admin user")
parser.add_argument(
"--openstack_ip", help="IP address of openstack node")
parser.add_argument(
"--router_type", help="Type of the virtual router (tor-service-node,embedded or none)")
parser.add_argument(
"--dpdk_enabled", action="store_true", help="Whether forwarding mode on vrouter is DPDK based")
parser.add_argument(
"--disable_vhost_vmi", action="store_true", help="Do not create vhost0 vmi if flag is set")
parser.add_argument(
"--enable_vhost_vmi_policy", action="store_true", help="Enable vhost0 vmi policy if flag is set")
parser.add_argument(
"--sub_cluster_name", help="Sub cluster this vrouter to be part of")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--api_server_ip", help="IP address of api server",
nargs='+', type=str)
group.add_argument("--use_admin_api",
default=False,
help = "Connect to local api-server on admin port",
action="store_true")
self._args = parser.parse_args(remaining_argv)
# end _parse_args
def add_vrouter(self):
gsc_obj = self._global_system_config_obj
vrouter_obj = VirtualRouter(
self._args.host_name, gsc_obj,
virtual_router_ip_address=self._args.host_ip)
vrouter_exists = True
try:
vrouter_obj = self._vnc_lib.virtual_router_read(
fq_name=vrouter_obj.get_fq_name())
except NoIdError:
vrouter_exists = False
if self._args.sub_cluster_name:
sub_cluster_obj = SubCluster(self._args.sub_cluster_name)
try:
sub_cluster_obj = self._vnc_lib.sub_cluster_read(
fq_name=sub_cluster_obj.get_fq_name())
except NoIdError:
raise RuntimeError("Sub cluster has to be provisioned first")
vrouter_obj.add_sub_cluster(sub_cluster_obj)
# Configure router type
if self._args.router_type:
vrouter_obj.set_virtual_router_type(self._args.router_type)
if self._args.dpdk_enabled:
vrouter_obj.set_virtual_router_dpdk_enabled(True)
else:
vrouter_obj.set_virtual_router_dpdk_enabled(False)
if vrouter_exists:
self.vrouter_fq_name = vrouter_obj.get_fq_name()
self._vnc_lib.virtual_router_update(vrouter_obj)
else:
try:
self.vrouter_fq_name = vrouter_obj.get_fq_name()
self._vnc_lib.virtual_router_create(vrouter_obj)
except RefsExistError:
print "Already created!"
# end add_vrouter
def add_vhost0_vmi(self):
vrouter_exists = True
try:
vrouter_obj = self._vnc_lib.virtual_router_read(
fq_name=self.vrouter_fq_name)
except NoIdError:
vrouter_exists = False
if not vrouter_exists:
print "No vrouter object found cannot add vhost0 vmi !"
return
try:
vhost0_vmi_fq_name = self.vrouter_fq_name
vhost0_vmi_fq_name.append('vhost0')
vhost0_vmi = self._vnc_lib.virtual_machine_interface_read(
fq_name = vhost0_vmi_fq_name)
vhost0_vmi_exists = True
except NoIdError:
vhost0_vmi_exists = False
vhost0_vmi = VirtualMachineInterface(name="vhost0", parent_obj = vrouter_obj)
ip_fab_vn = self._vnc_lib.virtual_network_read(fq_name = [u'default-domain', u'default-project', u'ip-fabric'])
vhost0_vmi.set_virtual_network(ip_fab_vn)
# Enable/Disable policy on the vhost0 vmi
if self._args.enable_vhost_vmi_policy:
vhost0_vmi.set_virtual_machine_interface_disable_policy(False)
else:
vhost0_vmi.set_virtual_machine_interface_disable_policy(True)
if vhost0_vmi_exists:
self._vnc_lib.virtual_machine_interface_update(vhost0_vmi)
else:
try:
self._vnc_lib.virtual_machine_interface_create(vhost0_vmi)
except RefsExistError:
print "vhost0 vmi already created!"
# end add_vhost0_vmi
def del_vrouter(self):
gsc_obj = self._global_system_config_obj
vrouter_obj = VirtualRouter(self._args.host_name, gsc_obj)
vrouter_exists = True
try:
vrouter = self._vnc_lib.virtual_router_read(
fq_name=vrouter_obj.get_fq_name())
except NoIdError:
vrouter_exists = False
if vrouter_exists:
self._vnc_lib.virtual_router_delete(
fq_name=vrouter_obj.get_fq_name())
else:
print " vrouter object not found "
# end del_vrouter
def del_vhost0_vmi(self):
gsc_obj = self._global_system_config_obj
vrouter_obj = VirtualRouter(self._args.host_name, gsc_obj)
vhost0_vmi_fq_name = vrouter_obj.get_fq_name()
vhost0_vmi_fq_name.append('vhost0')
vhost0_vmi_exists = True
try:
vhost0_vmi = self._vnc_lib.virtual_machine_interface_read(
fq_name = vhost0_vmi_fq_name)
except NoIdError:
vhost0_vmi_exists = False
if vhost0_vmi_exists:
self._vnc_lib.virtual_machine_interface_delete(fq_name=vhost0_vmi_fq_name)
print " Deleted vhost0 vmi %s " % vhost0_vmi_fq_name
else:
print " No vhost0 vmi found "
# end del_vhost0_vmi
# end class VrouterProvisioner
def main(args_str=None):
VrouterProvisioner(args_str)
# end main
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._webhooks_operations import build_create_request_initial, build_delete_request_initial, build_get_callback_config_request, build_get_request, build_list_events_request, build_list_request, build_ping_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WebhooksOperations:
"""WebhooksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> "_models.Webhook":
"""Gets the properties of the specified webhook.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Webhook, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.Webhook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Webhook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Webhook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
webhook_create_parameters: "_models.WebhookCreateParameters",
**kwargs: Any
) -> "_models.Webhook":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Webhook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(webhook_create_parameters, 'WebhookCreateParameters')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Webhook', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Webhook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
webhook_create_parameters: "_models.WebhookCreateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Webhook"]:
"""Creates a webhook for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param webhook_create_parameters: The parameters for creating a webhook.
:type webhook_create_parameters:
~azure.mgmt.containerregistry.v2020_11_01_preview.models.WebhookCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Webhook or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.Webhook]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Webhook"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_create_parameters=webhook_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Webhook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a webhook from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
webhook_update_parameters: "_models.WebhookUpdateParameters",
**kwargs: Any
) -> "_models.Webhook":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Webhook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(webhook_update_parameters, 'WebhookUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Webhook', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Webhook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
webhook_update_parameters: "_models.WebhookUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Webhook"]:
"""Updates a webhook with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:param webhook_update_parameters: The parameters for updating a webhook.
:type webhook_update_parameters:
~azure.mgmt.containerregistry.v2020_11_01_preview.models.WebhookUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Webhook or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2020_11_01_preview.models.Webhook]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Webhook"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
webhook_update_parameters=webhook_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Webhook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.WebhookListResult"]:
"""Lists all the webhooks for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebhookListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.WebhookListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebhookListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("WebhookListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks'} # type: ignore
@distributed_trace_async
async def ping(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> "_models.EventInfo":
"""Triggers a ping event to be sent to the webhook.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventInfo, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.EventInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_ping_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=self.ping.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
ping.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/ping'} # type: ignore
@distributed_trace_async
async def get_callback_config(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> "_models.CallbackConfig":
"""Gets the configuration of service URI and custom headers for the webhook.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CallbackConfig, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2020_11_01_preview.models.CallbackConfig
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CallbackConfig"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_callback_config_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=self.get_callback_config.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CallbackConfig', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_callback_config.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/getCallbackConfig'} # type: ignore
@distributed_trace
def list_events(
self,
resource_group_name: str,
registry_name: str,
webhook_name: str,
**kwargs: Any
) -> AsyncIterable["_models.EventListResult"]:
"""Lists recent events for the specified webhook.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param webhook_name: The name of the webhook.
:type webhook_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2020_11_01_preview.models.EventListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_events_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=self.list_events.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_events_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
webhook_name=webhook_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("EventListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_events.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/webhooks/{webhookName}/listEvents'} # type: ignore
| |
#!/usr/bin/env python
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command Line Interface to LunR Admin API
"""
from lunr.common.subcommand import SubCommand, SubCommandParser,\
opt, noargs, SubCommandError, confirm, Displayable
from urllib2 import urlopen, Request, HTTPError, URLError
from lunr.db import models
from lunr.common.exc import HTTPClientError
from lunr.common.config import LunrConfig
from httplib import HTTPException
from lunr.common import logger
from urlparse import urlparse
from urllib import urlencode
from functools import wraps
from json import loads
from StringIO import StringIO
import sys
import os
class Console(SubCommand):
def __init__(self):
# let the base class setup methods in our class
SubCommand.__init__(self)
# Add global arguments for this subcommand
self.opt('-c', '--config', default=None,
help="config file (default: /etc/lunr/storage-server.conf)")
self.opt('-v', '--verbose', action='count',
help="be verbose (-vv is more verbose)")
self.opt('-u', '--url', default=None, help="lunr admin api url")
def lookup_id(self, id):
params = {'name': id}
volumes = self.request('/volumes', params=params)
found = None
for volume in volumes:
if volume['status'] !='DELETED':
if found:
raise HTTPError('unused', 409, 'Conflict',
{}, StringIO('{"reason": "conflict"}'))
found = volume
if not found:
raise HTTPError('unused', 404, 'Not Found',
{}, StringIO('{"reason": "not found"}'))
return found['id']
def node_request(self, id, uri, **kwargs):
warn_on_errors = kwargs.pop('warn_on_errors', True)
node = self.request('/nodes/%s' % id)
try:
url = "http://%s:%s%s" % (node['hostname'], node['port'], uri)
return (self.urlopen(url, **kwargs), node)
except HTTPClientError, e:
node['error'] = e
if warn_on_errors:
print "** %s" % e
return (False, node)
def request(self, uri, **kwargs):
if not self.url:
conf = self.load_conf(self.config)
self.url = conf.string('storage', 'api_server',
'http://localhost:8080')
url = self.ip_to_url(self.url)
return self.urlopen("%s/v1.0/admin%s" % (url, uri), **kwargs)
def ip_to_url(self, url):
"""
Allows the user to specify a host or host:port
example: -u 192.168.5.1 or -u 192.168.5.1:8080
"""
uri = urlparse(url)
# If no scheme, assume it's an ip/hostname
if not uri.scheme:
return self.ip_to_url('http://%s' % url)
# If no port, assume we want port 8080
if not uri.port:
return self.ip_to_url('%s:8080' % url)
return url
def urlopen(self, url, method='GET', params=None, headers=None):
params = params or {}
headers = headers or {}
data = urlencode(params)
if method in ('GET', 'HEAD', 'DELETE') and data:
url += '?' + data
req = Request(url, data, headers)
req.get_method = lambda *args, **kwargs: method
try:
if self.verbose:
print "-- %s on %s with %s "\
% (req.get_method(), req.get_full_url(), params)
resp = urlopen(req)
return loads(''.join(resp.readlines()))
except (HTTPError, URLError, HTTPException), e:
raise HTTPClientError(req, e)
def unused(self, _dict):
""" Remove unused parameters from the dict """
result = {}
for key, value in _dict.items():
if value is not None:
result[key] = value
return result
def only(self, _dict, allowed):
""" Return a dict of only the allowed keys """
result = {}
for key, value in _dict.items():
if key in allowed:
result[key] = value
return result
def load_conf(self, file):
try:
return LunrConfig.from_conf(file or LunrConfig.lunr_storage_config)
except IOError, e:
if file or self.verbose:
print 'Warning: %s' % e
return LunrConfig()
class TypeConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'type'
@noargs
def list(self):
""" List all volume types """
resp = self.request('/volume_types')
self.display(resp, ['name', 'status', 'min_size', 'max_size',
'read_iops', 'write_iops'])
@opt('name', help="name of the volume type")
def get(self, name=None):
""" Display details of a volume type """
resp = self.request('/volume_types/%s' % name)
self.display(resp)
@opt('--min-size', help="minimum volume size")
@opt('--max-size', help="maximum volume size")
@opt('-r', '--read-iops', help="read iops")
@opt('-w', '--write-iops', help="write iops")
@opt('name', help="name of the volume type")
def create(self, args):
""" Create a new volume type """
# Only these args are parameters
params = self.only(args, models.VolumeType.get_mutable_column_names())
# Remove any parameters that are None
params = self.unused(params)
resp = self.request('/volume_types', method='POST', params=params)
self.display(resp)
@opt('name', help="name of the volume type")
def delete(self, name):
""" Mark a volume type as deleted """
resp = self.request('/volume_types/%s' % name,
method='DELETE')
self.display(resp)
class NodeConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'node'
@noargs
def list(self):
""" List all nodes """
resp = self.request('/nodes')
self.display(resp, ['id', 'name', 'status', 'volume_type_name',
'hostname', 'storage_hostname'])
@opt('id', help="id of the node")
def get(self, id):
""" Display details of a given node """
result = self.request('/nodes/%s' % id)
self.display(result)
@opt('-H', '--hostname', required=True, help='api hostname')
@opt('-P', '--port', required=True, help='api port')
@opt('-S', '--storage-hostname', required=True, help='storage hostname')
@opt('-t', '--volume-type-name', required=True,
help='type of storage (volume_type_name)')
@opt('-s', '--size', required=True, help='size in GB')
@opt('--status', default='PENDING',
help="status of node (Default is 'PENDING'")
@opt('name', help="name of the new node")
def create(self, args):
""" Create a new node """
# Only these args are parameters
params = self.only(args, models.Node.get_mutable_column_names())
# Remove any parameters that are None
params = self.unused(params)
resp = self.request('/nodes', method='POST', params=params)
self.display(resp)
@opt('-H', '--hostname', help='api hostname')
@opt('-P', '--port', help='api port')
@opt('-S', '--storage-hostname', help='storage hostname')
@opt('-t', '--volume-type-name',
help='type of storage (volume_type_name)')
@opt('-s', '--size', help='size in GB')
@opt('--status', help='status of node')
@opt('id', help="id of the node to update")
def update(self, id, args):
""" Update a node """
# Only these args are parameters
params = self.only(args, models.Node.get_mutable_column_names())
# Remove any parameters that are None
params = self.unused(params)
# Post the update parameters
resp = self.request('/nodes/%s' % id, method='POST', params=params)
self.display(resp)
@opt('-a', '--all', action='store_true', help='deploy all nodes')
@opt('id', nargs='?', help="id of the node to deploy")
def deploy(self, id=None, all=None):
""" Mark a PENDING node(s) ACTIVE """
if not all and not id:
return self._parser.print_help()
if all:
nodes = self.request('/nodes')
nodes = [n for n in nodes if n['status'] == 'PENDING']
else:
node = self.request('/nodes/%s' % id)
if node['status'] != 'PENDING':
if not confirm("Node '%s' status is '%s' set to 'ACTIVE'"
% (node['id'], node['status'])):
return 1
nodes = [node]
results = []
for node in nodes:
resp = self.request('/nodes/%s' % node['id'], method='POST',
params={'status': 'ACTIVE'})
results.append(resp)
if results:
self.display(results, ['id', 'status'])
return 0
print "No nodes in 'PENDING' status"
@opt('id', help="id of the node to delete")
def delete(self, id):
""" Mark a given node as deleted """
resp = self.request('/nodes/%s' % id, method='DELETE')
self.display(resp)
class AccountConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'account'
@noargs
def list(self):
""" List all accounts for everyone """
resp = self.request('/accounts')
self.display(resp, ['id', 'status'])
@opt('account', help="account to get (id or name)")
def get(self, account):
""" List details for a specific account """
self.display(self.request('/accounts/%s' % account))
class VolumeConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'volume'
@opt('-s', '--status', help="Filter the list by status")
@opt('-a', '--account-id', help="Filter the list by account_id")
@opt('-n', '--node-id', help="Filter the list by node_id")
@opt('-i', '--id', help="Filter the list by volume id")
@opt('-r', '--restore-of', help="Filter the list by restore_of")
@opt('-N', '--name', help="Filter the list by name")
def list(self, args):
""" List all volumes for everyone """
filters = self.remove(args, ['config', 'verbose', 'url'])
resp = self.request('/volumes', params=self.unused(filters))
self.display(resp, ['id', 'name', 'status',
'size', 'volume_type_name'])
def _print_volume(self, volume):
self.display(volume)
# Get the node info for this volume
print "\n-- Node %s --" % volume['node_id']
node = self.request('/nodes/%s' % volume['node_id'])
self.display(node)
@opt('id', help="id of the volume to get")
def get(self, id):
""" List details for a specific volume """
# Get volume info
volume = self.request('/volumes/%s' % id)
self._print_volume(volume)
if volume['status'] == 'DELETED':
try:
volume_id = self.lookup_id(id)
volume = self.request('/volumes/%s' % volume_id)
print "\n-- Migrated volume %s --" % volume['id']
self._print_volume(volume)
except Exception, e:
if e.code != 404:
raise
@opt('id', help="id of the volume to delete")
def delete(self, id):
""" Delete a specific volume """
resp = self.request('/volumes/%s' % id, method='DELETE')
self.display(resp)
class ExportConsole(Console, Displayable):
_name = 'export'
@opt('id', help="id of the volume to get export")
def get(self, id):
""" List export details for a specific volume """
# Get export info
resp = self.request('/volumes/%s/export' % id)
self.display(resp)
volume = self.request('/volumes/%s' % id)
if volume['status'] == 'DELETED':
try:
volume_id = self.lookup_id(id)
resp = self.request('/volumes/%s/export' % volume_id)
print "\n-- Migrated volume %s --" % volume['id']
self.display(resp)
except Exception, e:
if e.code != 404:
raise
@opt('id', help="id of the volume to create export")
def create(self, id):
"""Create an export for a specific volume"""
self.request('/volumes/%s/export' % id, method='PUT')
@opt('id', help="id of the volume to delete export")
def delete(self, id):
"""Delete an export for a specific volume"""
self.request('/volumes/%s/export' % id, method='DELETE')
class BackupConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'backup'
@opt('-a', '--account-id', help='filter results by account id')
@opt('-V', '--volume-id', help='filter results by volume id')
def list(self, args):
""" List all backups for everyone """
# Only these args are parameters
params = self.only(args, models.Backup.get_mutable_column_names())
# Remove any parameters that are None
params = self.unused(params)
resp = self.request('/backups', params=params)
self.display(resp, ['id', 'status', 'size', 'volume_id'])
@opt('id', help="id of the backup to get")
def get(self, id):
""" get details for a specific backup """
resp = self.request('/backups/%s' % id)
self.display(resp)
@opt('id', help="id of the backup to delete")
def delete(self, id):
""" Delete a specific backup """
resp = self.request('/backups/%s' % id, method='DELETE')
self.display(resp)
class ToolConsole(Console, Displayable):
def __init__(self):
# let the base class setup methods in our class
Console.__init__(self)
# Give our sub command a name
self._name = 'tools'
def _is_connected(self, payload):
if not payload:
return '(error)'
sessions = payload.get('sessions', [])
ips = []
for session in sessions:
ip = session.get('ip', False)
if ip:
ips.append(ip)
if ips:
return ', '.join(ips)
return '(not connected)'
def _iqn(self, payload):
if not payload:
return '(error)'
return payload.get('name', '(not exported)')
@opt('-d', '--deleted', action='store_true',
help="include deleted volumes in the listing")
@opt('account', help="account id")
def account(self, account, deleted=None):
"""
Display all available information about the account
and the status of it's volumes
"""
results = []
volumes = self.request('/volumes', params={'account_id': account})
# Get a list of all volumes for this account
for volume in volumes:
if volume['status'] == 'DELETED' and deleted is None:
continue
# Get the status of the volume from the storage node
(payload, node) = self.node_request(volume['node_id'],
'/volumes/%s' % volume['id'])
results.append({'volume_id': volume['id'],
'volume': volume['name'],
'size': volume['size'],
'node': "http://%s:%s" % (node['hostname'],
node['port']),
'in-use': self._is_connected(payload),
'status': volume['status']})
self.display(self.request('/accounts/%s' % account),
['name', 'status', 'last_modified', 'created_at'])
if results:
return self.display(results, ['volume', 'status', 'size',
'node', 'in-use'])
print "-- This account has no active volumes --"
def _print_volume(self, volume):
(payload, node) = self.node_request(volume['node_id'],
'/volumes/%s/export' %
volume['id'],
warn_on_errors=False)
if 'error' in node:
if node['error'].code == 404:
# no export, fill payload
payload = {'connected': False}
else:
print '** %s' % node['error']
volume['node-url'] = "http://%s:%s/volumes/%s" % (node['hostname'],
node['port'],
volume['id'])
volume['in-use'] = self._is_connected(payload)
volume['iqn'] = self._iqn(payload)
self.display(volume, ['account_id', 'status', 'size', 'node_id',
'node-url', 'in-use', 'iqn', 'created_at',
'last_modified'])
@opt('id', help="volume id")
def volume(self, id):
""" Display all available information about the volume """
volume = self.request('/volumes/%s' % id)
self._print_volume(volume)
if volume['status'] == 'DELETED':
try:
volume_id = self.lookup_id(id)
volume = self.request('/volumes/%s' % volume_id)
print "\n-- Migrated volume %s --" % volume['id']
self._print_volume(volume)
except Exception, e:
if e.code != 404:
raise
def main(argv=sys.argv[1:]):
logger.configure(log_to_console=True, level=logger.DEBUG,
lunr_log_level=logger.DEBUG, capture_stdio=False)
# Create the top-level parser
parser = SubCommandParser([
TypeConsole(), NodeConsole(), AccountConsole(), VolumeConsole(),
BackupConsole(), ExportConsole(), ToolConsole(),
], desc=__doc__.strip())
# execute the command requested
try:
return parser.run(argv)
except HTTPClientError, e:
print str(e)
return 1
if __name__ == "__main__":
sys.exit(main())
| |
from __future__ import unicode_literals
import json
from moto.core.responses import BaseResponse
from .models import redshift_backends
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_username": self._get_param('MasterUsername'),
"master_user_password": self._get_param('MasterUserPassword'),
"db_name": self._get_param('DBName'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"availability_zone": self._get_param('AvailabilityZone'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"port": self._get_int_param('Port'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region": self.region,
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs)
return json.dumps({
"CreateClusterResponse": {
"CreateClusterResult": {
"Cluster": cluster.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return json.dumps({
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def modify_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"new_cluster_identifier": self._get_param('NewClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_user_password": self._get_param('MasterUserPassword'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_multi_param('ClusterSecurityGroups.member'),
"vpc_security_group_ids": self._get_multi_param('VpcSecurityGroupIds.member'),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
}
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return json.dumps({
"ModifyClusterResponse": {
"ModifyClusterResult": {
"Cluster": cluster.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster(self):
cluster_identifier = self._get_param("ClusterIdentifier")
cluster = self.redshift_backend.delete_cluster(cluster_identifier)
return json.dumps({
"DeleteClusterResponse": {
"DeleteClusterResult": {
"Cluster": cluster.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName')
description = self._get_param('Description')
subnet_ids = self._get_multi_param('SubnetIds.member')
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
)
return json.dumps({
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(subnet_identifier)
return json.dumps({
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [subnet_group.to_json() for subnet_group in subnet_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return json.dumps({
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param('ClusterSecurityGroupName')
description = self._get_param('Description')
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
)
return json.dumps({
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(cluster_security_group_name)
return json.dumps({
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [security_group.to_json() for security_group in security_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
return json.dumps({
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param('ParameterGroupName')
group_family = self._get_param('ParameterGroupFamily')
description = self._get_param('Description')
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name,
group_family,
description,
)
return json.dumps({
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(cluster_parameter_group_name)
return json.dumps({
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [parameter_group.to_json() for parameter_group in parameter_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(cluster_parameter_group_name)
return json.dumps({
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
| |
''' Generative structure search (SearchSPN). '''
import numpy as np
import logging
from spnss.mathutil import argmin_fair
from fast import compute_weights_of
from history import NetHistory
import operators as ops
import nbc
import knobs
import cc
import deps
def compute_edges(net, data):
nodes = net.pot + net.variables
edges = {n:[] for n in nodes}
for i,d in enumerate(data):
net.set(d)
net.forward_log()
max_edges = net.backward_max_edges_v2_log()
for n, ci in max_edges.items():
edges[n].append( ci )
for n in edges.keys():
edges[n] = np.array(edges[n], dtype=np.int)
return edges
def compute_score_of(pn, edges):
w = compute_weights_of(pn, edges)
return np.log(w).sum()
def pn_select(scores, blacklist):
pnl = [pn for pn in scores.iterkeys() if pn not in blacklist]
if len(pnl) == 0:
return None # no suitable product node found
pnsl = [scores[pn] for pn in pnl]
pni = argmin_fair(pnsl)
return pnl[pni]
def chll_select(pn, edges, nmi_cache={}, threshold=0.1):
ctest = deps.dep_tester2(pn, edges, threshold, nmi_cache)
ccl = cc.components(ctest, range(len(pn.children)))
chll = [[pn.children[i] for i in c] for c in ccl]
chll = [chl for chl in chll if len(chl) >= 2] # ignore singleton components
return chll
def is_pn_ok(pn, edges):
if len(edges[pn]) < knobs.min_instances:
logging.debug(' product node has too few instances.')
return False
if len(pn.children) <= 1:
logging.debug(' product node has too few children.')
return False
return True
def is_chll_ok(pn, chll):
if len(chll) == 0 or len(chll) == len(pn.children):
logging.debug(' product node has only singleton child subsets.')
return False
return True
class SSData:
def __init__(self, net, data):
self.edges = compute_edges(net, data)
self.nmi_cache = {}
self.pn_scores = {pn:compute_score_of(pn, self.edges) for pn in net.pot if pn.is_prod()}
self.pn_blacklist = set()
def update_scores_of(self, pnl):
for pn in pnl:
self.pn_scores[pn] = compute_score_of(pn, self.edges)
def select_step(ssdata, threshold):
while True:
while True:
if len(ssdata.pn_blacklist) == len(ssdata.pn_scores): # give up if all product nodes have been blacklisted
return None, None
pn = pn_select(ssdata.pn_scores, ssdata.pn_blacklist)
if is_pn_ok(pn, ssdata.edges):
break
ssdata.pn_blacklist.add(pn)
chll = chll_select(pn, ssdata.edges, ssdata.nmi_cache, threshold)
if is_chll_ok(pn, chll):
break
ssdata.pn_blacklist.add(pn)
logging.info( '\tinst x ch: %d x %d' % (len(ssdata.edges[pn]), len(pn.children)) + \
'\tchll/total: %s / %d' % (str([len(chl) for chl in chll if len(chl) != 1]), len(pn.children)) )
return pn, chll
# SS stands for Structure Search
class SS:
def __init__(self, net, trn, vld, threshold):
assert trn.ndim == 2
self.net = net # IMPORTANT: we assume that 'net' has already been trained using 'trn'
self.ssdata = SSData(net, trn)
self.net_history = NetHistory(net, vld)
self.thresh = threshold
def print_stats(self, i=None, name=''):
if i is not None:
logging.warning('\t================ %d =============== %s' % (i, name))
msg = '\tsize: %d' % len(self.net.pot)
msg += '\tvld: %f' % self.net_history.vld_hist[-1]
msg += '\tthresh: %f' % self.thresh
logging.warning(msg)
def step(self):
''' Take a step in the search space of SPN graphs. '''
pn, chll = select_step(self.ssdata, self.thresh)
if (pn, chll) == (None, None):
return False
mol = [ops.MixOp(self.net, pn, chl) for chl in chll]
for mo in mol:
# cluster
data = np.hstack( self.ssdata.edges[c][:,np.newaxis] for c in mo.chl ); assert len(data) >= knobs.min_instances
nvals = [len(c.weights) if c.is_sum() else len(c.masses) for c in mo.chl]
qa, nc = nbc.kcluster(data, nvals)
#qa, nc = nbc.inc_hard_em(data, nvals)
# change the graph
mo.connect(qa.max()+1)
ops.compute_params(mo, qa, self.ssdata.edges, knobs.laplace_smooth)
ops.adjust_edges(mo, qa, self.ssdata.edges)
self.ssdata.update_scores_of( [n for n in mo.prod_nodes()] )
self.net_history.add_op_node_list(mol)
logging.info('\tncl: %s' % str([len(mo.pnl) for mo in mol]))
return True
def step_ahead(self, num_steps):
for j in xrange(num_steps):
if self.step() == False:
logging.warning('\tran out of steps to take.')
j -= 1
break
self.net_history.save_vld()
return j+1
def skip_search(self, num_steps):
''' This is the main structure search algorithm. '''
assert num_steps >= 1
nh = self.net_history
i0 = 0
i1 = i0 + self.step_ahead(num_steps)
i2 = i1 + self.step_ahead(num_steps)
if nh.vh[i1] <= nh.vh[i0]:
bni = nh.best_net_index(i0, i1)
nh.move_to(bni)
assert self.net == nh.net
return nh.net
MAX_STEPS = 100000000
for i in xrange(MAX_STEPS):
self.print_stats(i)
logging.info('\t%f' % nh.vh[-1])
if nh.vh[i2] <= nh.vh[i1]:
break
taken = self.step_ahead(num_steps)
nh.vh[i0] = None
i0 = i1
i1 = i2
i2 += taken
if i == MAX_STEPS-1:
raise Exception, 'skip_search() just took a HUGE number of steps; that is not expected.'
bni = nh.best_net_index(i0, i2)
nh.move_to(bni)
assert self.net == nh.net
return nh.net
# TODO: DONE! some kind of history with undo/redo capabilities
# TODO: MOSTLY DONE investigate using Naive-Bayes EM clustering instead of k-means
# TODO: MOSTLY DONE (doesn't seem to work as well). investigate using hamming clustering instead of k-means
# TODO: MOSTLY DONE investigate early-stopping or other criteria to end learning
# TODO: MOSTLY DONE (helps a bit; slows it down) investigate re-computing edges/weights every once in a while
def search_spn(net, trn, vld, t, skip):
salg = SS(net, trn, vld, t)
net = salg.skip_search(skip)
return net
| |
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import posixpath
import shutil
import stat
import tarfile
from naclports import configuration, package, util, error
PAYLOAD_DIR = 'payload'
INSTALL_PREFIX = '/naclports-dummydir'
def MakeDirIfNeeded(filename):
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
util.Makedirs(dirname)
def FilterOutExecutables(filenames, root):
"""Filter out ELF binaries in the bin directory.
We don't want NaCl exectuables installed in the toolchain's bin directory
since we add this to the PATH during the build process, and NaCl executables
can't be run on the host system (not without sel_ldr anyway).
"""
rtn = []
for name in filenames:
full_name = os.path.join(root, name)
if os.path.split(name)[0] == 'bin':
if not os.path.splitext(name)[1] and util.IsElfFile(full_name):
continue
rtn.append(name)
return rtn
def InstallFile(filename, old_root, new_root):
"""Install a single file by moving it into a new location.
Args:
filename: Relative name of file to install.
old_root: The current location of the file.
new_root: The new desired root for the file.
"""
oldname = os.path.join(old_root, filename)
util.LogVerbose('install: %s' % filename)
newname = os.path.join(new_root, filename)
dirname = os.path.dirname(newname)
if not os.path.isdir(dirname):
util.Makedirs(dirname)
os.rename(oldname, newname)
# When install binaries ELF files into the toolchain direcoties, remove
# the X bit so that they do not found when searching the PATH.
if util.IsElfFile(newname) or util.IsPexeFile(newname):
mode = os.stat(newname).st_mode
mode = mode & ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.chmod(newname, mode)
def RelocateFile(filename, dest):
"""Perform in-place mutations on file contents to handle new location.
There are a few file types that have absolute pathnames embedded
and need to be modified in some way when being installed to
a particular location. For most file types this method does nothing.
"""
# Only relocate certain file types.
modify = False
# boost build scripts
# TODO(sbc): move this to the boost package metadata
if filename.startswith('build-1'):
modify = True
# pkg_config (.pc) files
if filename.startswith('lib/pkgconfig'):
modify = True
if filename.startswith('share/pkgconfig'):
modify = True
# <foo>-config scripts that live in usr/bin
if filename.startswith('bin') and filename.endswith('-config'):
modify = True
# libtool's .la files which can contain absolute paths to
# dependencies.
if filename.endswith('.la'):
modify = True
# headers can sometimes also contain absolute paths.
if filename.startswith('include/') and filename.endswith('.h'):
modify = True
filename = os.path.join(dest, filename)
if modify:
with open(filename) as f:
data = f.read()
mode = os.stat(filename).st_mode
os.chmod(filename, 0777)
with open(filename, 'r+') as f:
f.write(data.replace(INSTALL_PREFIX, dest))
os.chmod(filename, mode)
class BinaryPackage(package.Package):
"""Representation of binary package packge file.
This class is initialised with the filename of a binary package
and its attributes are set according the file name and contents.
Operations such as installation can be performed on the package.
"""
extra_keys = package.EXTRA_KEYS
def __init__(self, filename):
super(BinaryPackage, self).__init__()
self.filename = filename
self.info = filename
self.VerifyArchiveFormat()
info = self.GetPkgInfo()
self.ParseInfo(info)
self.config = configuration.Configuration(self.BUILD_ARCH,
self.BUILD_TOOLCHAIN,
self.BUILD_CONFIG == 'debug')
def VerifyArchiveFormat(self):
if not os.path.exists(self.filename):
raise error.Error('package archive not found: %s' % self.filename)
basename, extension = os.path.splitext(os.path.basename(self.filename))
basename = os.path.splitext(basename)[0]
if extension != '.bz2':
raise error.Error('invalid file extension: %s' % extension)
try:
with tarfile.open(self.filename) as tar:
if './pkg_info' not in tar.getnames():
raise error.PkgFormatError('package does not contain pkg_info file')
except tarfile.TarError as e:
raise error.PkgFormatError(e)
def IsInstallable(self):
"""Determine if a binary package can be installed in the
currently configured SDK.
Currently only packages built with the same SDK major version
are installable.
"""
return self.BUILD_SDK_VERSION == util.GetSDKVersion()
def GetPkgInfo(self):
"""Extract the contents of the pkg_info file from the binary package."""
with tarfile.open(self.filename) as tar:
return tar.extractfile('./pkg_info').read()
def Install(self, force):
"""Install binary package into toolchain directory."""
with util.InstallLock(self.config):
self._Install(force)
def _Install(self, force):
if self.TOOLCHAIN_INSTALL != '0':
self._InstallFiles(force)
self.WriteStamp()
def _InstallFiles(self, force):
dest = util.GetInstallRoot(self.config)
dest_tmp = os.path.join(dest, 'install_tmp')
if os.path.exists(dest_tmp):
shutil.rmtree(dest_tmp)
if self.IsAnyVersionInstalled():
raise error.Error('package already installed: %s' % self.InfoString())
self.LogStatus('Installing')
util.LogVerbose('installing from: %s' % self.filename)
util.Makedirs(dest_tmp)
names = []
try:
with tarfile.open(self.filename) as tar:
for info in tar:
if info.isdir():
continue
name = posixpath.normpath(info.name)
if name == 'pkg_info':
continue
if not name.startswith(PAYLOAD_DIR + '/'):
raise error.PkgFormatError('invalid file in package: %s' % name)
name = name[len(PAYLOAD_DIR) + 1:]
names.append(name)
if not force:
for name in names:
full_name = os.path.join(dest, name)
if os.path.exists(full_name) :
raise error.Error('file already exists: %s' % full_name)
tar.extractall(dest_tmp)
payload_tree = os.path.join(dest_tmp, PAYLOAD_DIR)
names = FilterOutExecutables(names, payload_tree)
for name in names:
InstallFile(name, payload_tree, dest)
finally:
shutil.rmtree(dest_tmp)
for name in names:
RelocateFile(name, dest)
self.WriteFileList(names)
def WriteStamp(self):
"""Write stamp file containing pkg_info."""
filename = util.GetInstallStamp(self.NAME, self.config)
MakeDirIfNeeded(filename)
util.LogVerbose('stamp: %s' % filename)
pkg_info = self.GetPkgInfo()
with open(filename, 'w') as f:
f.write(pkg_info)
def WriteFileList(self, file_names):
"""Write the file list for this package."""
filename = self.GetListFile()
MakeDirIfNeeded(filename)
with open(filename, 'w') as f:
for name in file_names:
f.write(name + '\n')
| |
#
# Preferences.py -- Preferences plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
from ginga.misc import Widgets, ParamSet
from ginga import cmap, imap
from ginga import GingaPlugin
from ginga import AutoCuts, ColorDist
from ginga.util import wcsmod
from ginga.misc import Bunch
class Preferences(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Preferences, self).__init__(fv, fitsimage)
self.chname = self.fv.get_channelName(self.fitsimage)
self.cmap_names = cmap.get_names()
self.imap_names = imap.get_names()
self.zoomalg_names = ('step', 'rate')
self.autocuts_cache = {}
self.gui_up = False
self.calg_names = ColorDist.get_dist_names()
self.autozoom_options = self.fitsimage.get_autozoom_options()
self.autocut_options = self.fitsimage.get_autocuts_options()
self.autocut_methods = self.fitsimage.get_autocut_methods()
self.t_ = self.fitsimage.get_settings()
self.t_.getSetting('autocuts').add_callback('set',
self.autocuts_changed_ext_cb)
self.t_.getSetting('autozoom').add_callback('set',
self.autozoom_changed_ext_cb)
for key in ['pan']:
self.t_.getSetting(key).add_callback('set',
self.pan_changed_ext_cb)
for key in ['scale']:
self.t_.getSetting(key).add_callback('set',
self.scale_changed_ext_cb)
self.t_.getSetting('zoom_algorithm').add_callback('set', self.set_zoomalg_ext_cb)
self.t_.getSetting('zoom_rate').add_callback('set', self.set_zoomrate_ext_cb)
for key in ['scale_x_base', 'scale_y_base']:
self.t_.getSetting(key).add_callback('set', self.scalebase_changed_ext_cb)
self.t_.getSetting('rot_deg').add_callback('set', self.set_rotate_ext_cb)
for name in ('flip_x', 'flip_y', 'swap_xy'):
self.t_.getSetting(name).add_callback('set', self.set_transform_ext_cb)
for name in ('autocut_method', 'autocut_params'):
self.t_.getSetting(name).add_callback('set', self.set_autocuts_ext_cb)
## for key in ['color_algorithm', 'color_hashsize', 'color_map',
## 'intensity_map']:
## self.t_.getSetting(key).add_callback('set', self.cmap_changed_ext_cb)
self.t_.setdefault('wcs_coords', 'icrs')
self.t_.setdefault('wcs_display', 'sexagesimal')
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
vbox, sw, orientation = Widgets.get_oriented_box(container)
self.orientation = orientation
#vbox.set_border_width(4)
vbox.set_spacing(2)
# COLOR DISTRIBUTION OPTIONS
fr = Widgets.Frame("Color Distribution")
captions = (('Algorithm:', 'label', 'Algorithm', 'combobox'),
#('Table Size:', 'label', 'Table Size', 'entry'),
('Dist Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.calg_choice = b.algorithm
#self.w.table_size = b.table_size
b.algorithm.set_tooltip("Choose a color distribution algorithm")
#b.table_size.set_tooltip("Set size of the distribution hash table")
b.dist_defaults.set_tooltip("Restore color distribution defaults")
b.dist_defaults.add_callback('activated',
lambda w: self.set_default_distmaps())
combobox = b.algorithm
options = []
index = 0
for name in self.calg_names:
options.append(name)
combobox.append_text(name)
index += 1
index = self.calg_names.index(self.t_.get('color_algorithm', "linear"))
combobox.set_index(index)
combobox.add_callback('activated', self.set_calg_cb)
## entry = b.table_size
## entry.set_text(str(self.t_.get('color_hashsize', 65535)))
## entry.add_callback('activated', self.set_tablesize_cb)
fr.set_widget(w)
vbox.add_widget(fr)
# COLOR MAPPING OPTIONS
fr = Widgets.Frame("Color Mapping")
captions = (('Colormap:', 'label', 'Colormap', 'combobox'),
('Intensity:', 'label', 'Intensity', 'combobox'),
('Color Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
self.w.cmap_choice = b.colormap
self.w.imap_choice = b.intensity
b.color_defaults.add_callback('activated',
lambda w: self.set_default_cmaps())
b.colormap.set_tooltip("Choose a color map for this image")
b.intensity.set_tooltip("Choose an intensity map for this image")
b.color_defaults.set_tooltip("Restore default color and intensity maps")
fr.set_widget(w)
vbox.add_widget(fr)
combobox = b.colormap
options = []
index = 0
for name in self.cmap_names:
options.append(name)
combobox.append_text(name)
index += 1
cmap_name = self.t_.get('color_map', "ramp")
try:
index = self.cmap_names.index(cmap_name)
except Exception:
index = self.cmap_names.index('ramp')
combobox.set_index(index)
combobox.add_callback('activated', self.set_cmap_cb)
combobox = b.intensity
options = []
index = 0
for name in self.imap_names:
options.append(name)
combobox.append_text(name)
index += 1
imap_name = self.t_.get('intensity_map', "ramp")
try:
index = self.imap_names.index(imap_name)
except Exception:
index = self.imap_names.index('ramp')
combobox.set_index(index)
combobox.add_callback('activated', self.set_imap_cb)
# AUTOCUTS OPTIONS
fr = Widgets.Frame("Auto Cuts")
vbox2 = Widgets.VBox()
fr.set_widget(vbox2)
captions = (('Auto Method:', 'label', 'Auto Method', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
# Setup auto cuts method choice
combobox = b.auto_method
index = 0
method = self.t_.get('autocut_method', "histogram")
for name in self.autocut_methods:
combobox.append_text(name)
index += 1
index = self.autocut_methods.index(method)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocut_method_cb)
b.auto_method.set_tooltip("Choose algorithm for auto levels")
vbox2.add_widget(w, stretch=0)
self.w.acvbox = Widgets.VBox()
vbox2.add_widget(self.w.acvbox, stretch=1)
vbox.add_widget(fr, stretch=0)
# TRANSFORM OPTIONS
fr = Widgets.Frame("Transform")
captions = (('Flip X', 'checkbutton', 'Flip Y', 'checkbutton',
'Swap XY', 'checkbutton'),
('Rotate:', 'label', 'Rotate', 'spinfloat'),
('Restore', 'button'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
for name in ('flip_x', 'flip_y', 'swap_xy'):
btn = b[name]
btn.set_state(self.t_.get(name, False))
btn.add_callback('activated', self.set_transforms_cb)
b.flip_x.set_tooltip("Flip the image around the X axis")
b.flip_y.set_tooltip("Flip the image around the Y axis")
b.swap_xy.set_tooltip("Swap the X and Y axes in the image")
b.rotate.set_tooltip("Rotate the image around the pan position")
b.restore.set_tooltip("Clear any transforms and center image")
b.restore.add_callback('activated', self.restore_cb)
b.rotate.set_limits(0.00, 359.99999999, incr_value=10.0)
b.rotate.set_value(0.00)
b.rotate.set_decimals(8)
b.rotate.add_callback('value-changed', self.rotate_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# WCS OPTIONS
fr = Widgets.Frame("WCS")
captions = (('WCS Coords:', 'label', 'WCS Coords', 'combobox'),
('WCS Display:', 'label', 'WCS Display', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.wcs_coords.set_tooltip("Set WCS coordinate system")
b.wcs_display.set_tooltip("Set WCS display format")
# Setup WCS coords method choice
combobox = b.wcs_coords
index = 0
for name in wcsmod.coord_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_coords', "")
try:
index = wcsmod.coord_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
# Setup WCS display format method choice
combobox = b.wcs_display
index = 0
for name in wcsmod.display_types:
combobox.append_text(name)
index += 1
method = self.t_.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
combobox.set_index(index)
except ValueError:
pass
combobox.add_callback('activated', self.set_wcs_params_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# ZOOM OPTIONS
fr = Widgets.Frame("Zoom")
captions = (('Zoom Alg:', 'label', 'Zoom Alg', 'combobox'),
('Zoom Rate:', 'label', 'Zoom Rate', 'spinfloat'),
('Stretch XY:', 'label', 'Stretch XY', 'combobox'),
('Stretch Factor:', 'label', 'Stretch Factor', 'spinfloat'),
('Scale X:', 'label', 'Scale X', 'entry'),
('Scale Y:', 'label', 'Scale Y', 'entry'),
('Scale Min:', 'label', 'Scale Min', 'spinfloat'),
('Scale Max:', 'label', 'Scale Max', 'spinfloat'),
('Zoom Defaults', 'button'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
index = 0
for name in self.zoomalg_names:
b.zoom_alg.append_text(name.capitalize())
index += 1
zoomalg = self.t_.get('zoom_algorithm', "step")
index = self.zoomalg_names.index(zoomalg)
b.zoom_alg.set_index(index)
b.zoom_alg.set_tooltip("Choose Zoom algorithm")
b.zoom_alg.add_callback('activated', self.set_zoomalg_cb)
index = 0
for name in ('X', 'Y'):
b.stretch_xy.append_text(name)
index += 1
b.stretch_xy.set_index(0)
b.stretch_xy.set_tooltip("Stretch pixels in X or Y")
b.stretch_xy.add_callback('activated', self.set_stretch_cb)
b.stretch_factor.set_limits(1.0, 10.0, incr_value=0.10)
b.stretch_factor.set_value(1.0)
b.stretch_factor.set_decimals(8)
b.stretch_factor.add_callback('value-changed', self.set_stretch_cb)
b.stretch_factor.set_tooltip("Length of pixel relative to 1 on other side")
b.stretch_factor.set_enabled(zoomalg != 'step')
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
b.zoom_rate.set_limits(1.1, 3.0, incr_value=0.1)
b.zoom_rate.set_value(zoomrate)
b.zoom_rate.set_decimals(8)
b.zoom_rate.set_enabled(zoomalg != 'step')
b.zoom_rate.set_tooltip("Step rate of increase/decrease per zoom level")
b.zoom_rate.add_callback('value-changed', self.set_zoomrate_cb)
b.zoom_defaults.add_callback('activated', self.set_zoom_defaults_cb)
scale_x, scale_y = self.fitsimage.get_scale_xy()
b.scale_x.set_tooltip("Set the scale in X axis")
b.scale_x.set_text(str(scale_x))
b.scale_x.add_callback('activated', self.set_scale_cb)
b.scale_y.set_tooltip("Set the scale in Y axis")
b.scale_y.set_text(str(scale_y))
b.scale_y.add_callback('activated', self.set_scale_cb)
scale_min, scale_max = self.t_['scale_min'], self.t_['scale_max']
b.scale_min.set_limits(0.00001, 1.0, incr_value=1.0)
b.scale_min.set_value(scale_min)
b.scale_min.set_decimals(8)
b.scale_min.add_callback('value-changed', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the minimum allowed scale in any axis")
b.scale_max.set_limits(1.0, 10000.0, incr_value=1.0)
b.scale_max.set_value(scale_max)
b.scale_max.set_decimals(8)
b.scale_max.add_callback('value-changed', self.set_scale_limit_cb)
b.scale_min.set_tooltip("Set the maximum allowed scale in any axis")
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
# PAN OPTIONS
fr = Widgets.Frame("Panning")
captions = (('Pan X:', 'label', 'Pan X', 'entry'),
('Pan Y:', 'label', 'Pan Y', 'entry'),
('Center Image', 'button'),
('Mark Center', 'checkbutton'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
pan_x, pan_y = self.fitsimage.get_pan()
b.pan_x.set_tooltip("Set the pan position in X axis")
b.pan_x.set_text(str(pan_x+0.5))
b.pan_x.add_callback('activated', self.set_pan_cb)
b.pan_y.set_tooltip("Set the pan position in Y axis")
b.pan_y.set_text(str(pan_y+0.5))
b.pan_y.add_callback('activated', self.set_pan_cb)
b.center_image.set_tooltip("Set the pan position to center of the image")
b.center_image.add_callback('activated', self.center_image_cb)
b.mark_center.set_tooltip("Mark the center (pan locator)")
b.mark_center.add_callback('activated', self.set_misc_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
fr = Widgets.Frame("New Images")
captions = (('Cut New:', 'label', 'Cut New', 'combobox'),
('Zoom New:', 'label', 'Zoom New', 'combobox'),
('Center New', 'checkbutton', 'Follow New', 'checkbutton'),
('Raise New', 'checkbutton', 'Create thumbnail', 'checkbutton'),)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
combobox = b.cut_new
index = 0
for name in self.autocut_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autocuts', "off")
index = self.autocut_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autocuts_cb)
b.cut_new.set_tooltip("Automatically set cut levels for new images")
combobox = b.zoom_new
index = 0
for name in self.autozoom_options:
combobox.append_text(name)
index += 1
option = self.t_.get('autozoom', "off")
index = self.autozoom_options.index(option)
combobox.set_index(index)
combobox.add_callback('activated', self.set_autozoom_cb)
b.zoom_new.set_tooltip("Automatically fit new images to window")
b.center_new.set_tooltip("Automatically center new images")
b.follow_new.set_tooltip("View new images as they arrive")
b.raise_new.set_tooltip("Raise and focus tab for new images")
b.create_thumbnail.set_tooltip("Create thumbnail for new images")
self.w.center_new.set_state(True)
self.w.center_new.add_callback('activated', self.set_chprefs_cb)
self.w.follow_new.set_state(True)
self.w.follow_new.add_callback('activated', self.set_chprefs_cb)
self.w.raise_new.set_state(True)
self.w.raise_new.add_callback('activated', self.set_chprefs_cb)
self.w.create_thumbnail.set_state(True)
self.w.create_thumbnail.add_callback('activated', self.set_chprefs_cb)
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(4)
btns.set_border_width(4)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn)
btn = Widgets.Button("Save Settings")
btn.add_callback('activated', lambda w: self.save_preferences())
btns.add_widget(btn)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def set_cmap_cb(self, w, index):
"""This callback is invoked when the user selects a new color
map from the preferences pane."""
name = cmap.get_names()[index]
self.set_cmap_byname(name)
self.t_.set(color_map=name)
def set_cmap_byname(self, name, redraw=True):
# Get colormap
try:
cm = cmap.get_cmap(name)
except KeyError:
raise ValueError("No such color map name: '%s'" % (name))
rgbmap = self.fitsimage.get_rgbmap()
rgbmap.set_cmap(cm)
def set_imap_cb(self, w, index):
"""This callback is invoked when the user selects a new intensity
map from the preferences pane."""
name = imap.get_names()[index]
self.set_imap_byname(name)
self.t_.set(intensity_map=name)
def set_imap_byname(self, name, redraw=True):
# Get intensity map
try:
im = imap.get_imap(name)
except KeyError:
raise ValueError("No such intensity map name: '%s'" % (name))
rgbmap = self.fitsimage.get_rgbmap()
rgbmap.set_imap(im)
def set_calg_cb(self, w, index):
"""This callback is invoked when the user selects a new color
hashing algorithm from the preferences pane."""
#index = w.get_index()
name = self.calg_names[index]
self.set_calg_byname(name)
def set_tablesize_cb(self, w):
value = int(w.get_text())
rgbmap = self.fitsimage.get_rgbmap()
rgbmap.set_hash_size(value)
self.t_.set(color_hashsize=value)
def set_calg_byname(self, name, redraw=True):
# Get color mapping algorithm
rgbmap = self.fitsimage.get_rgbmap()
try:
rgbmap.set_hash_algorithm(name)
except KeyError:
raise ValueError("No such color algorithm name: '%s'" % (name))
# Doesn't this force a redraw? Following redraw should be unecessary.
self.t_.set(color_algorithm=name)
if redraw:
self.fitsimage.redraw(whence=2)
def set_default_cmaps(self):
cmap_name = "ramp"
imap_name = "ramp"
index = self.cmap_names.index(cmap_name)
self.w.cmap_choice.set_index(index)
index = self.imap_names.index(imap_name)
self.w.imap_choice.set_index(index)
self.set_cmap_byname(cmap_name)
self.t_.set(color_map=cmap_name)
self.set_imap_byname(imap_name)
self.t_.set(intensity_map=imap_name)
def set_default_distmaps(self):
name = 'linear'
index = self.calg_names.index(name)
self.w.calg_choice.set_index(index)
self.set_calg_byname(name)
self.t_.set(color_algorithm=name)
hashsize = 65535
self.t_.set(color_hashsize=hashsize)
## self.w.table_size.set_text(str(hashsize))
rgbmap = self.fitsimage.get_rgbmap()
rgbmap.set_hash_size(hashsize)
def set_zoomrate_cb(self, w, rate):
self.t_.set(zoom_rate=rate)
def set_zoomrate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.zoom_rate.set_value(value)
def set_zoomalg_cb(self, w, idx):
self.t_.set(zoom_algorithm=self.zoomalg_names[idx])
def set_zoomalg_ext_cb(self, setting, value):
if not self.gui_up:
return
if value == 'step':
self.w.zoom_alg.set_index(0)
self.w.zoom_rate.set_enabled(False)
self.w.stretch_factor.set_enabled(False)
else:
self.w.zoom_alg.set_index(1)
self.w.zoom_rate.set_enabled(True)
self.w.stretch_factor.set_enabled(True)
def scalebase_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x_base, scale_y_base = self.fitsimage.get_scale_base_xy()
ratio = float(scale_x_base) / float(scale_y_base)
if ratio < 1.0:
# Y is stretched
idx = 1
ratio = 1.0 / ratio
elif ratio > 1.0:
# X is stretched
idx = 0
else:
idx = self.w.stretch_xy.get_index()
# Update stretch controls to reflect actual scale
self.w.stretch_xy.set_index(idx)
self.w.stretch_factor.set_value(ratio)
def set_zoom_defaults_cb(self, w):
rate = math.sqrt(2.0)
self.w.stretch_factor.set_value(1.0)
self.t_.set(zoom_algorithm='step', zoom_rate=rate,
scale_x_base=1.0, scale_y_base=1.0)
def set_stretch_cb(self, *args):
axis = self.w.stretch_xy.get_index()
value = self.w.stretch_factor.get_value()
if axis == 0:
self.t_.set(scale_x_base=value, scale_y_base=1.0)
else:
self.t_.set(scale_x_base=1.0, scale_y_base=value)
def pan_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
pan_x, pan_y = value
fits_x, fits_y = pan_x + 0.5, pan_y + 0.5
self.w.pan_x.set_text(str(fits_x))
self.w.pan_y.set_text(str(fits_y))
def set_scale_cb(self, w):
scale_x = float(self.w.scale_x.get_text())
scale_y = float(self.w.scale_y.get_text())
self.fitsimage.scale_to(scale_x, scale_y)
def scale_changed_ext_cb(self, setting, value):
if not self.gui_up:
return
scale_x, scale_y = value
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
def set_scale_limit_cb(self, w, val):
scale_min = float(self.w.scale_min.get_value())
scale_max = float(self.w.scale_max.get_value())
self.t_.set(scale_min=scale_min, scale_max=scale_max)
def set_autozoom_cb(self, w, idx):
option = self.autozoom_options[idx]
self.fitsimage.enable_autozoom(option)
self.t_.set(autozoom=option)
def autozoom_changed_ext_cb(self, setting, option):
if not self.gui_up:
return
index = self.autozoom_options.index(option)
self.w.zoom_new.set_index(index)
def config_autocut_params(self, method, pct):
index = self.autocut_methods.index(method)
self.w.auto_method.set_index(index)
self.w.hist_pct.set_value(pct)
if method != 'histogram':
self.w.hist_pct.set_enabled(False)
else:
self.w.hist_pct.set_enabled(True)
def set_autocuts_ext_cb(self, setting, value):
if not self.gui_up:
return
method = self.t_['autocut_method']
pct = self.t_['autocut_hist_pct']
self.config_autocut_params(method, pct)
def config_autocut_params(self, method):
index = self.autocut_methods.index(method)
self.w.auto_method.set_index(index)
# remove old params
self.w.acvbox.remove_all()
# Create new autocuts object of the right kind
ac = AutoCuts.get_autocuts(method)(self.logger)
# Build up a set of control widgets for the autocuts
# algorithm tweakable parameters
paramlst = ac.get_params_metadata()
params = self.autocuts_cache.setdefault(method, {})
self.ac_params = ParamSet.ParamSet(self.logger, params)
w = self.ac_params.build_params(paramlst,
orientation=self.orientation)
self.ac_params.add_callback('changed', self.autocut_params_changed_cb)
self.w.acvbox.add_widget(w, stretch=1)
def set_autocuts_ext_cb(self, setting, value):
if not self.gui_up:
return
if setting.name == 'autocut_method':
self.config_autocut_params(value)
elif setting.name == 'autocut_params':
params = dict(value)
self.ac_params.params.update(params)
self.ac_params.sync_params()
def set_autocut_method_cb(self, w, idx):
#idx = self.w.auto_method.get_index()
method = self.autocut_methods[idx]
self.config_autocut_params(method)
params = self.ac_params.get_params()
params = list(params.items())
self.t_.set(autocut_method=method, autocut_params=params)
def autocut_params_changed_cb(self, paramObj, params):
params = list(params.items())
self.t_.set(autocut_params=params)
def set_autocuts_cb(self, w, index):
option = self.autocut_options[index]
self.fitsimage.enable_autocuts(option)
self.t_.set(autocuts=option)
def autocuts_changed_ext_cb(self, setting, option):
self.logger.debug("autocuts changed to %s" % option)
index = self.autocut_options.index(option)
if self.gui_up:
self.w.cut_new.set_index(index)
def set_transforms_cb(self, *args):
flip_x = self.w.flip_x.get_state()
flip_y = self.w.flip_y.get_state()
swap_xy = self.w.swap_xy.get_state()
self.t_.set(flip_x=flip_x, flip_y=flip_y, swap_xy=swap_xy)
return True
def set_transform_ext_cb(self, setting, value):
if not self.gui_up:
return
flip_x, flip_y, swap_xy = \
self.t_['flip_x'], self.t_['flip_y'], self.t_['swap_xy']
self.w.flip_x.set_state(flip_x)
self.w.flip_y.set_state(flip_y)
self.w.swap_xy.set_state(swap_xy)
def rotate_cb(self, w, deg):
#deg = self.w.rotate.get_value()
self.t_.set(rot_deg=deg)
return True
def set_rotate_ext_cb(self, setting, value):
if not self.gui_up:
return
self.w.rotate.set_value(value)
return True
def center_image_cb(self, *args):
self.fitsimage.center_image()
return True
def set_pan_cb(self, *args):
pan_x = float(self.w.pan_x.get_text()) - 0.5
pan_y = float(self.w.pan_y.get_text()) - 0.5
self.fitsimage.set_pan(pan_x, pan_y)
return True
def restore_cb(self, *args):
self.t_.set(flip_x=False, flip_y=False, swap_xy=False,
rot_deg=0.0)
self.fitsimage.center_image()
return True
def set_misc_cb(self, *args):
markc = (self.w.mark_center.get_state() != 0)
self.t_.set(show_pan_position=markc)
self.fitsimage.show_pan_mark(markc)
return True
def set_chprefs_cb(self, *args):
autocenter = (self.w.center_new.get_state() != 0)
switchnew = (self.w.follow_new.get_state() != 0)
raisenew = (self.w.raise_new.get_state() != 0)
genthumb = (self.w.create_thumbnail.get_state() != 0)
self.t_.set(switchnew=switchnew, raisenew=raisenew,
autocenter=autocenter, genthumb=genthumb)
def set_wcs_params_cb(self, *args):
idx = self.w.wcs_coords.get_index()
try:
ctype = wcsmod.coord_types[idx]
except IndexError:
ctype = 'icrs'
idx = self.w.wcs_display.get_index()
dtype = wcsmod.display_types[idx]
self.t_.set(wcs_coords=ctype, wcs_display=dtype)
def preferences_to_controls(self):
prefs = self.t_
# color map
rgbmap = self.fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
try:
index = self.cmap_names.index(cm.name)
except ValueError:
# may be a custom color map installed
index = 0
self.w.cmap_choice.set_index(index)
# color dist algorithm
calg = rgbmap.get_hash_algorithm()
index = self.calg_names.index(calg)
self.w.calg_choice.set_index(index)
## size = rgbmap.get_hash_size()
## self.w.table_size.set_text(str(size))
# intensity map
im = rgbmap.get_imap()
try:
index = self.imap_names.index(im.name)
except ValueError:
# may be a custom intensity map installed
index = 0
self.w.imap_choice.set_index(index)
# TODO: this is a HACK to get around Qt's callbacks
# on setting widget values--need a way to disable callbacks
# for direct setting
auto_zoom = prefs.get('autozoom', 'off')
# zoom settings
zoomalg = prefs.get('zoom_algorithm', "step")
index = self.zoomalg_names.index(zoomalg)
self.w.zoom_alg.set_index(index)
zoomrate = self.t_.get('zoom_rate', math.sqrt(2.0))
self.w.zoom_rate.set_value(zoomrate)
self.w.zoom_rate.set_enabled(zoomalg!='step')
self.w.stretch_factor.set_enabled(zoomalg!='step')
self.scalebase_changed_ext_cb(prefs, None)
scale_x, scale_y = self.fitsimage.get_scale_xy()
self.w.scale_x.set_text(str(scale_x))
self.w.scale_y.set_text(str(scale_y))
scale_min = prefs.get('scale_min', 0.00001)
self.w.scale_min.set_value(scale_min)
scale_max = prefs.get('scale_max', 10000.0)
self.w.scale_max.set_value(scale_max)
# panning settings
pan_x, pan_y = self.fitsimage.get_pan()
self.w.pan_x.set_text(str(pan_x+0.5))
self.w.pan_y.set_text(str(pan_y+0.5))
self.w.mark_center.set_state(prefs.get('show_pan_position', False))
# transform settings
self.w.flip_x.set_state(prefs.get('flip_x', False))
self.w.flip_y.set_state(prefs.get('flip_y', False))
self.w.swap_xy.set_state(prefs.get('swap_xy', False))
self.w.rotate.set_value(prefs.get('rot_deg', 0.00))
# auto cuts settings
autocuts = prefs.get('autocuts', 'off')
index = self.autocut_options.index(autocuts)
self.w.cut_new.set_index(index)
autocut_method = prefs.get('autocut_method', None)
if autocut_method == None:
autocut_method = 'histogram'
else:
params = prefs.get('autocut_params', {})
p = self.autocuts_cache.setdefault(autocut_method, {})
p.update(params)
self.config_autocut_params(autocut_method)
# auto zoom settings
auto_zoom = prefs.get('autozoom', 'off')
index = self.autozoom_options.index(auto_zoom)
self.w.zoom_new.set_index(index)
# wcs settings
method = prefs.get('wcs_coords', "icrs")
try:
index = wcsmod.coord_types.index(method)
self.w.wcs_coords.set_index(index)
except ValueError:
pass
method = prefs.get('wcs_display', "sexagesimal")
try:
index = wcsmod.display_types.index(method)
self.w.wcs_display.set_index(index)
except ValueError:
pass
# misc settings
prefs.setdefault('autocenter', False)
self.w.center_new.set_state(prefs['autocenter'])
prefs.setdefault('switchnew', True)
self.w.follow_new.set_state(prefs['switchnew'])
prefs.setdefault('raisenew', True)
self.w.raise_new.set_state(prefs['raisenew'])
prefs.setdefault('genthumb', True)
self.w.create_thumbnail.set_state(prefs['genthumb'])
def save_preferences(self):
self.t_.save()
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.preferences_to_controls()
def pause(self):
pass
def resume(self):
pass
def stop(self):
self.gui_up = False
def redo(self):
pass
def __str__(self):
return 'preferences'
#END
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadpool}
"""
import pickle, time, weakref, gc, threading
from twisted.trial import unittest
from twisted.python import threadpool, threadable, failure, context
from twisted.internet import reactor
from twisted.internet.defer import Deferred
#
# See the end of this module for the remainder of the imports.
#
class Synchronization(object):
failures = 0
def __init__(self, N, waiting):
self.N = N
self.waiting = waiting
self.lock = threading.Lock()
self.runs = []
def run(self):
# This is the testy part: this is supposed to be invoked
# serially from multiple threads. If that is actually the
# case, we will never fail to acquire this lock. If it is
# *not* the case, we might get here while someone else is
# holding the lock.
if self.lock.acquire(False):
if not len(self.runs) % 5:
time.sleep(0.0002) # Constant selected based on
# empirical data to maximize the
# chance of a quick failure if this
# code is broken.
self.lock.release()
else:
self.failures += 1
# This is just the only way I can think of to wake up the test
# method. It doesn't actually have anything to do with the
# test.
self.lock.acquire()
self.runs.append(None)
if len(self.runs) == self.N:
self.waiting.release()
self.lock.release()
synchronized = ["run"]
threadable.synchronize(Synchronization)
class ThreadPoolTestCase(unittest.TestCase):
"""
Test threadpools.
"""
def _waitForLock(self, lock):
for i in xrange(1000000):
if lock.acquire(False):
break
time.sleep(1e-5)
else:
self.fail("A long time passed without succeeding")
def test_attributes(self):
"""
L{ThreadPool.min} and L{ThreadPool.max} are set to the values passed to
L{ThreadPool.__init__}.
"""
pool = threadpool.ThreadPool(12, 22)
self.assertEqual(pool.min, 12)
self.assertEqual(pool.max, 22)
def test_start(self):
"""
L{ThreadPool.start} creates the minimum number of threads specified.
"""
pool = threadpool.ThreadPool(0, 5)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 0)
pool = threadpool.ThreadPool(3, 10)
self.assertEqual(len(pool.threads), 0)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 3)
def test_threadCreationArguments(self):
"""
Test that creating threads in the threadpool with application-level
objects as arguments doesn't results in those objects never being
freed, with the thread maintaining a reference to them as long as it
exists.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# Here's our function
def worker(arg):
pass
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThread(worker, unique)
# Add an event to wait completion
event = threading.Event()
tp.callInThread(event.set)
event.wait(self.getTimeout())
del worker
del unique
gc.collect()
self.assertEqual(uniqueRef(), None)
self.assertEqual(workerRef(), None)
def test_threadCreationArgumentsCallInThreadWithCallback(self):
"""
As C{test_threadCreationArguments} above, but for
callInThreadWithCallback.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# this holds references obtained in onResult
refdict = {} # name -> ref value
onResultWait = threading.Event()
onResultDone = threading.Event()
resultRef = []
# result callback
def onResult(success, result):
onResultWait.wait(self.getTimeout())
refdict['workerRef'] = workerRef()
refdict['uniqueRef'] = uniqueRef()
onResultDone.set()
resultRef.append(weakref.ref(result))
# Here's our function
def worker(arg, test):
return Dumb()
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
onResultRef = weakref.ref(onResult)
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThreadWithCallback(onResult, worker, unique, test=unique)
del worker
del unique
gc.collect()
# let onResult collect the refs
onResultWait.set()
# wait for onResult
onResultDone.wait(self.getTimeout())
self.assertEqual(uniqueRef(), None)
self.assertEqual(workerRef(), None)
# XXX There's a race right here - has onResult in the worker thread
# returned and the locals in _worker holding it and the result been
# deleted yet?
del onResult
gc.collect()
self.assertEqual(onResultRef(), None)
self.assertEqual(resultRef[0](), None)
def test_persistence(self):
"""
Threadpools can be pickled and unpickled, which should preserve the
number of threads and other parameters.
"""
pool = threadpool.ThreadPool(7, 20)
self.assertEqual(pool.min, 7)
self.assertEqual(pool.max, 20)
# check that unpickled threadpool has same number of threads
copy = pickle.loads(pickle.dumps(pool))
self.assertEqual(copy.min, 7)
self.assertEqual(copy.max, 20)
def _threadpoolTest(self, method):
"""
Test synchronization of calls made with C{method}, which should be
one of the mechanisms of the threadpool to execute work in threads.
"""
# This is a schizophrenic test: it seems to be trying to test
# both the callInThread()/dispatch() behavior of the ThreadPool as well
# as the serialization behavior of threadable.synchronize(). It
# would probably make more sense as two much simpler tests.
N = 10
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
waiting = threading.Lock()
waiting.acquire()
actor = Synchronization(N, waiting)
for i in xrange(N):
method(tp, actor)
self._waitForLock(waiting)
self.failIf(actor.failures, "run() re-entered %d times" %
(actor.failures,))
def test_callInThread(self):
"""
Call C{_threadpoolTest} with C{callInThread}.
"""
return self._threadpoolTest(
lambda tp, actor: tp.callInThread(actor.run))
def test_callInThreadException(self):
"""
L{ThreadPool.callInThread} logs exceptions raised by the callable it
is passed.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(raiseError)
tp.start()
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
def test_callInThreadWithCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(True, result)} where C{result} is the value returned
by the callable supplied.
"""
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : "test")
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertTrue(results[0])
self.assertEqual(results[1], "test")
def test_callInThreadWithCallbackExceptionInCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(False, failure)} where C{failure} represents the
exception raised by the callable supplied.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, raiseError)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertFalse(results[0])
self.assertTrue(isinstance(results[1], failure.Failure))
self.assertTrue(issubclass(results[1].type, NewError))
def test_callInThreadWithCallbackExceptionInOnResult(self):
"""
L{ThreadPool.callInThreadWithCallback} logs the exception raised by
C{onResult}.
"""
class NewError(Exception):
pass
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
results.append(success)
results.append(result)
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : None)
tp.callInThread(waiter.release)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
self.assertTrue(results[0])
self.assertEqual(results[1], None)
def test_callbackThread(self):
"""
L{ThreadPool.callInThreadWithCallback} calls the function it is
given and the C{onResult} callback in the same thread.
"""
threadIds = []
import thread
event = threading.Event()
def onResult(success, result):
threadIds.append(thread.get_ident())
event.set()
def func():
threadIds.append(thread.get_ident())
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(threadIds), 2)
self.assertEqual(threadIds[0], threadIds[1])
def test_callbackContext(self):
"""
The context L{ThreadPool.callInThreadWithCallback} is invoked in is
shared by the context the callable and C{onResult} callback are
invoked in.
"""
myctx = context.theContextTracker.currentContext().contexts[-1]
myctx['testing'] = 'this must be present'
contexts = []
event = threading.Event()
def onResult(success, result):
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
event.set()
def func():
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(contexts), 2)
self.assertEqual(myctx, contexts[0])
self.assertEqual(myctx, contexts[1])
def test_existingWork(self):
"""
Work added to the threadpool before its start should be executed once
the threadpool is started: this is ensured by trying to release a lock
previously acquired.
"""
waiter = threading.Lock()
waiter.acquire()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(waiter.release) # before start()
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
class RaceConditionTestCase(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
self.threadpool = threadpool.ThreadPool(0, 10)
self.threadpool.start()
def tearDown(self):
del self.event
self.threadpool.stop()
del self.threadpool
def test_synchronization(self):
"""
Test a race condition: ensure that actions run in the pool synchronize
with actions run in the main thread.
"""
timeout = self.getTimeout()
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
self.event.clear()
for i in range(3):
self.threadpool.callInThread(self.event.wait)
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
if not self.event.isSet():
self.event.set()
self.fail("Actions not synchronized")
def test_singleThread(self):
"""
The submission of a new job to a thread pool in response to the
C{onResult} callback does not cause a new thread to be added to the
thread pool.
This requires that the thread which calls C{onResult} to have first
marked itself as available so that when the new job is queued, that
thread may be considered to run it. This is desirable so that when
only N jobs are ever being executed in the thread pool at once only
N threads will ever be created.
"""
# Ensure no threads running
self.assertEqual(self.threadpool.workers, 0)
loopDeferred = Deferred()
def onResult(success, counter):
reactor.callFromThread(submit, counter)
def submit(counter):
if counter:
self.threadpool.callInThreadWithCallback(
onResult, lambda: counter - 1)
else:
loopDeferred.callback(None)
def cbLoop(ignored):
# Ensure there is only one thread running.
self.assertEqual(self.threadpool.workers, 1)
loopDeferred.addCallback(cbLoop)
submit(10)
return loopDeferred
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud Storage to BigQuery operator."""
import json
from typing import TYPE_CHECKING, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToBigQueryOperator(BaseOperator):
"""
Loads files from Google Cloud Storage into BigQuery.
The schema to be used for the BigQuery table may be specified in one of
two ways. You may either directly pass the schema fields in, or you may
point the operator to a Google Cloud Storage object name. The object in
Google Cloud Storage must be a JSON file with the schema fields in it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToBigQueryOperator`
:param bucket: The bucket to load from. (templated)
:type bucket: str
:param source_objects: String or List of Google Cloud Storage URIs to load from. (templated)
If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
:type source_objects: str, list[str]
:param destination_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into.
If ``<project>`` is not included, project will be the project defined in
the connection json. (templated)
:type destination_project_dataset_table: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Should not be set when source_format is 'DATASTORE_BACKUP'.
Parameter must be defined if 'schema_object' is null and autodetect is False.
:type schema_fields: list
:param schema_object: If set, a GCS object path pointing to a .json file that
contains the schema for the table. (templated)
Parameter must be defined if 'schema_fields' is null and autodetect is False.
:type schema_object: str
:param source_format: File format to export.
:type source_format: str
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result. Only applicable to CSV, ignored
for other formats.
:type allow_jagged_rows: bool
:param encoding: The character encoding of the data. See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding
:param max_id_key: If set, the name of a column in the BigQuery table
that's to be loaded. This will be used to select the MAX value from
BigQuery after the load occurs. The results will be returned by the
execute() command, which in turn gets stored in XCom for future
operators to use. This can be helpful with incremental loads--during
future executions, you can pick up from the max ID.
:type max_id_key: str
:param bigquery_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the BigQuery service.
:type bigquery_conn_id: str
:param google_cloud_storage_conn_id: (Optional) The connection ID used to connect to Google Cloud
and interact with the Google Cloud Storage service.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the load job.
:type schema_update_options: list
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param external_table: Flag to specify if the destination table should be
a BigQuery external table. Default Value is False.
:type external_table: bool
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
Note that 'field' is not available in concurrency with
dataset.table$partition.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. BigQuery supports clustering for both partitioned and
non-partitioned tables. The order of columns given determines the sort order.
Not applicable for external tables.
:type cluster_fields: list[str]
:param autodetect: [Optional] Indicates if we should automatically infer the
options and schema for CSV and JSON sources. (Default: ``True``).
Parameter must be set to True if 'schema_fields' and 'schema_object' are undefined.
It is suggested to set to True if table are create outside of Airflow.
:type autodetect: bool
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: [Optional] The geographic location of the job. Required except for US and EU.
See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:param labels: [Optional] Labels for the BiqQuery table.
:type labels: dict
:param description: [Optional] Description for the BigQuery table.
:type description: str
"""
template_fields: Sequence[str] = (
'bucket',
'source_objects',
'schema_object',
'destination_project_dataset_table',
'impersonation_chain',
)
template_ext: Sequence[str] = ('.sql',)
ui_color = '#f0eee4'
def __init__(
self,
*,
bucket,
source_objects,
destination_project_dataset_table,
schema_fields=None,
schema_object=None,
source_format='CSV',
compression='NONE',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
encoding="UTF-8",
max_id_key=None,
bigquery_conn_id='google_cloud_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
schema_update_options=(),
src_fmt_configs=None,
external_table=False,
time_partitioning=None,
cluster_fields=None,
autodetect=True,
encryption_configuration=None,
location=None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
labels=None,
description=None,
**kwargs,
):
super().__init__(**kwargs)
# GCS config
if src_fmt_configs is None:
src_fmt_configs = {}
if time_partitioning is None:
time_partitioning = {}
self.bucket = bucket
self.source_objects = source_objects
self.schema_object = schema_object
# BQ config
self.destination_project_dataset_table = destination_project_dataset_table
self.schema_fields = schema_fields
self.source_format = source_format
self.compression = compression
self.create_disposition = create_disposition
self.skip_leading_rows = skip_leading_rows
self.write_disposition = write_disposition
self.field_delimiter = field_delimiter
self.max_bad_records = max_bad_records
self.quote_character = quote_character
self.ignore_unknown_values = ignore_unknown_values
self.allow_quoted_newlines = allow_quoted_newlines
self.allow_jagged_rows = allow_jagged_rows
self.external_table = external_table
self.encoding = encoding
self.max_id_key = max_id_key
self.bigquery_conn_id = bigquery_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.schema_update_options = schema_update_options
self.src_fmt_configs = src_fmt_configs
self.time_partitioning = time_partitioning
self.cluster_fields = cluster_fields
self.autodetect = autodetect
self.encryption_configuration = encryption_configuration
self.location = location
self.impersonation_chain = impersonation_chain
self.labels = labels
self.description = description
def execute(self, context: 'Context'):
bq_hook = BigQueryHook(
bigquery_conn_id=self.bigquery_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
if not self.schema_fields:
if self.schema_object and self.source_format != 'DATASTORE_BACKUP':
gcs_hook = GCSHook(
gcp_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
blob = gcs_hook.download(
bucket_name=self.bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
else:
schema_fields = None
else:
schema_fields = self.schema_fields
self.source_objects = (
self.source_objects if isinstance(self.source_objects, list) else [self.source_objects]
)
source_uris = [f'gs://{self.bucket}/{source_object}' for source_object in self.source_objects]
conn = bq_hook.get_conn()
cursor = conn.cursor()
if self.external_table:
cursor.create_external_table(
external_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
compression=self.compression,
skip_leading_rows=self.skip_leading_rows,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
src_fmt_configs=self.src_fmt_configs,
encryption_configuration=self.encryption_configuration,
labels=self.labels,
description=self.description,
)
else:
cursor.run_load(
destination_project_dataset_table=self.destination_project_dataset_table,
schema_fields=schema_fields,
source_uris=source_uris,
source_format=self.source_format,
autodetect=self.autodetect,
create_disposition=self.create_disposition,
skip_leading_rows=self.skip_leading_rows,
write_disposition=self.write_disposition,
field_delimiter=self.field_delimiter,
max_bad_records=self.max_bad_records,
quote_character=self.quote_character,
ignore_unknown_values=self.ignore_unknown_values,
allow_quoted_newlines=self.allow_quoted_newlines,
allow_jagged_rows=self.allow_jagged_rows,
encoding=self.encoding,
schema_update_options=self.schema_update_options,
src_fmt_configs=self.src_fmt_configs,
time_partitioning=self.time_partitioning,
cluster_fields=self.cluster_fields,
encryption_configuration=self.encryption_configuration,
labels=self.labels,
description=self.description,
)
if cursor.use_legacy_sql:
escaped_table_name = f'[{self.destination_project_dataset_table}]'
else:
escaped_table_name = f'`{self.destination_project_dataset_table}`'
if self.max_id_key:
select_command = f'SELECT MAX({self.max_id_key}) FROM {escaped_table_name}'
cursor.execute(select_command)
row = cursor.fetchone()
if row:
max_id = row[0] if row[0] else 0
self.log.info(
'Loaded BQ data with max %s.%s=%s',
self.destination_project_dataset_table,
self.max_id_key,
max_id,
)
else:
raise RuntimeError(f"The f{select_command} returned no rows!")
| |
from random import randrange
from matplotlib import patches
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import pickle
'''
Creates dataset of digit sequences by concatenating of mnist digits
Params:
dataset - dataset to use (mnist.train, mnist.validation or mnist.test)
num_examples - how many examples to create (size of created dataset)
length - the length of digit sequences
debug - show generated examples
'''
def make_dataset(dataset, num_examples, length, debug=False):
examples = []
labels = []
permutation = np.random.permutation(dataset.num_examples)
permutation_index = 0
for i in range(num_examples):
example = []
label = []
for j in range(length):
number = np.reshape(dataset.images[permutation_index], [28, 28])
if len(example) == 0:
example = number
label = dataset.labels[permutation_index]
else:
example = np.append(example, number, axis=1)
label = np.vstack([label, dataset.labels[permutation_index]])
permutation_index += 1
if permutation_index >= len(permutation):
permutation = np.random.permutation(dataset.num_examples)
permutation_index = 0
if debug:
print(label)
plt.imshow(example, cmap='gray')
plt.show()
examples.append(example)
labels.append(label)
return {"examples": examples, "labels": labels}
'''
Makes dataset for localization
Params:
dataset - dataset of sequences to use (created by function make_dataset())
height - height of new images
width - width of new images
debug - show generated examples
'''
def make_localization_dataset(dataset, pickle_file_name, height, width, debug=False):
f = open(pickle_file_name, 'wb')
for i in range(len(dataset["examples"])):
example = dataset["examples"][i]
label = dataset["labels"][i]
new_example = np.zeros([height, width])
max_h = height - example.shape[0]
max_w = width - example.shape[1]
h_transition = randrange(max_h)
w_transition = randrange(max_w)
x = w_transition + int(example.shape[1] / 2)
y = h_transition + int(example.shape[0] / 2)
h = example.shape[0]
w = example.shape[1]
new_example[h_transition:h_transition + example.shape[0],
w_transition:w_transition + example.shape[1]] = example
position = [x, y, h, w]
pickle.dump({"example": new_example, "label": label, "position": position}, f)
if debug:
print(position)
fig, ax = plt.subplots(1)
ax.imshow(new_example, cmap='gray')
rect = patches.Rectangle((x - (w / 2), y - (h / 2)), w, h, linewidth=1, edgecolor='r', facecolor='none')
point = patches.Rectangle((x, y), 1, 1, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.add_patch(point)
plt.show()
f.close()
'''
Makes dataset for localization with variable length of senquences
Params:
dataset - dataset of sequences to use (Mnist.train, Mnist.validation or Mnist.test)
pickle_file_name - name of file in which the newly created dataset will be saved
num_examples - number of created examples
max_length - maximal length of sequence
height - height of new images
width - width of new images
debug - show generated examples
'''
def make_variable_length_dataset(dataset, pickle_file_name, num_examples, max_length, height, width, debug=False):
f = open(pickle_file_name, 'wb')
permutation = np.random.permutation(dataset.num_examples)
permutation_index = 0
for i in range(num_examples):
example = []
label = []
length = randrange(1, max_length + 1)
for j in range(max_length):
number = np.reshape(dataset.images[permutation_index], [28, 28])
if j < length:
if len(example) == 0:
example = number
label = np.append(dataset.labels[permutation_index], [0])
else:
example = np.append(example, number, axis=1)
label = np.vstack([label, np.append(dataset.labels[permutation_index], [0])])
else:
label = np.vstack([label, np.append(np.zeros([10]), [1])])
permutation_index += 1
if permutation_index >= len(permutation):
permutation = np.random.permutation(dataset.num_examples)
permutation_index = 0
if debug:
print(label)
plt.imshow(example, cmap='gray')
plt.show()
new_example = np.zeros([height, width])
max_h = height - example.shape[0]
max_w = width - example.shape[1]
h_transition = randrange(max_h)
w_transition = randrange(max_w)
x = w_transition + int(example.shape[1] / 2)
y = h_transition + int(example.shape[0] / 2)
h = example.shape[0]
w = example.shape[1]
new_example[h_transition:h_transition + example.shape[0],
w_transition:w_transition + example.shape[1]] = example
position = [x, y, h, w]
pickle.dump({"example": new_example, "label": label, "position": position}, f)
if debug:
print(label)
print(position)
fig, ax = plt.subplots(1)
ax.imshow(new_example, cmap='gray')
rect = patches.Rectangle((x - (w / 2), y - (h / 2)), w, h, linewidth=1, edgecolor='r', facecolor='none')
point = patches.Rectangle((x, y), 1, 1, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.add_patch(point)
plt.show()
print()
f.close()
'''
Loads dataset from pickle file
Params:
file_name - name of pickle file
debug - show generated examples
'''
def load_dataset(file_name, debug=False):
dataset = pickle.load(open(file_name, "rb"))
if debug:
for i in range(len(dataset["examples"])):
print(dataset["labels"][i])
plt.imshow(dataset["examples"][i], cmap='gray')
plt.show()
return dataset
if __name__ == '__main__':
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
print("Creating testing dataset")
train_dataset = make_dataset(mnist.train, 165000, 5)
pickle.dump(train_dataset, open("train.p", "wb"))
print("Creating testing dataset for localization")
make_localization_dataset(train_dataset, "train_localization.p", 128, 256, False)
train_dataset = []
print("Done")
print("Creating validation dataset")
validation_dataset = make_dataset(mnist.validation, 15000, 5)
pickle.dump(validation_dataset, open("validation.p", "wb"))
print("Creating validation dataset for localization")
make_localization_dataset(validation_dataset, "validation_localization.p", 128, 256, False)
validation_dataset = []
print("Done")
print("Creating testing dataset")
test_dataset = make_dataset(mnist.test, 30000, 5)
pickle.dump(test_dataset, open("test.p", "wb"))
print("Creating testing dataset for localization")
make_localization_dataset(test_dataset, "test_localization.p", 128, 256, False)
test_dataset = []
print("Done")
print("Creating variable length localization training dataset")
make_variable_length_dataset(mnist.train, "train_variable_localization.p", 165000, 5, 128, 256, debug=False)
print("Done")
print("Creating variable length localization validation dataset")
make_variable_length_dataset(mnist.validation, "validation_variable_localization.p", 15000, 5, 128, 256,
debug=False)
print("Done")
print("Creating variable length localization testing dataset")
make_variable_length_dataset(mnist.validation, "test_variable_localization.p", 30000, 5, 128, 256, debug=False)
print("Done")
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pickle
import unittest
import numpy as np
import properties
class HP1(properties.HasProperties):
a = properties.Integer('int a')
class HP2(properties.HasProperties):
inst1 = properties.Instance('b', HP1)
class HP3(properties.HasProperties):
inst2 = properties.Instance('c', HP2)
class TestSerialization(unittest.TestCase):
def test_pickle(self):
hp1 = HP1(a=10)
hp2 = HP2(inst1=hp1)
hp3 = HP3(inst2=hp2)
hp3_copy = pickle.loads(pickle.dumps(hp3))
assert isinstance(hp3_copy, HP3)
assert isinstance(hp3_copy.inst2, HP2)
assert isinstance(hp3_copy.inst2.inst1, HP1)
assert hp3_copy.inst2.inst1.a == 10
def test_serialize(self):
hp1 = HP1(a=10)
hp2 = HP2(inst1=hp1)
hp3 = HP3(inst2=hp2)
hp3_dict = {
'__class__': 'HP3',
'inst2': {
'__class__': 'HP2',
'inst1': {
'__class__': 'HP1',
'a': 10,
},
},
}
hp3_dict_no_class = {
'inst2': {
'inst1': {
'a': 10,
},
},
}
assert hp3.serialize() == hp3_dict
assert hp3.serialize(include_class=False) == hp3_dict_no_class
assert not isinstance(
properties.HasProperties.deserialize(hp3_dict), HP3
)
assert not isinstance(
properties.HasProperties.deserialize(hp3_dict_no_class), HP3
)
assert not isinstance(
properties.HasProperties.deserialize(
hp3_dict_no_class, trusted=True
), HP3
)
assert isinstance(properties.HasProperties.deserialize(
{'__class__': 'HP9'}, trusted=True
), properties.HasProperties)
assert isinstance(HP3.deserialize(hp3_dict), HP3)
assert isinstance(HP3.deserialize(hp3_dict_no_class), HP3)
assert isinstance(
properties.HasProperties.deserialize(hp3_dict, trusted=True), HP3
)
with self.assertRaises(ValueError):
HP1.deserialize(5)
with self.assertRaises(ValueError):
properties.HasProperties.deserialize(hp3_dict, strict=True)
assert isinstance(
HP3.deserialize(hp3_dict, strict=True), HP3
)
hp3_extra = {
'__class__': 'HP3',
'inst2': {
'__class__': 'HP2',
'inst1': {
'__class__': 'HP1',
'a': 10,
}
},
'b': 1,
}
with self.assertRaises(ValueError):
HP3.deserialize(hp3_extra, strict=True)
assert isinstance(HP3.deserialize({}), HP3)
with self.assertRaises(properties.ValidationError):
HP3.deserialize({}).validate()
with self.assertRaises(properties.ValidationError):
HP3.deserialize({}, assert_valid=True)
hp3_incomplete = {
'__class__': 'HP3',
'inst2': {
'__class__': 'HP2',
'inst1': {
'__class__': 'HP1',
}
}
}
assert isinstance(HP3.deserialize(hp3_incomplete, strict=True), HP3)
with self.assertRaises(properties.ValidationError):
HP3.deserialize(hp3_incomplete, assert_valid=True)
hp3_subextra = {
'__class__': 'HP3',
'inst2': {
'__class__': 'HP2',
'inst1': {
'__class__': 'HP1',
'a': 10,
'b': 2,
}
}
}
assert isinstance(HP3.deserialize(hp3_subextra, assert_valid=True), HP3)
with self.assertRaises(ValueError):
HP3.deserialize(hp3_subextra, strict=True)
class Invalid(properties.HasProperties):
def validate(self):
return False
assert isinstance(Invalid.deserialize({}), Invalid)
with self.assertRaises(properties.ValidationError):
Invalid.deserialize({}, assert_valid=True)
with self.assertRaises(ValueError):
HP3.deserialize(hp3_dict, _instance=Invalid())
def test_immutable_serial(self):
class UidModel(properties.HasProperties):
uid = properties.Uuid('unique id')
um1 = UidModel()
um2 = UidModel.deserialize(um1.serialize())
assert properties.equal(um1, um2)
def test_none_serial(self):
class ManyProperties(properties.HasProperties):
mystr = properties.String(
'my string',
required=False,
)
myarr = properties.Array(
'my array',
required=False,
)
myinst = properties.Instance(
'my HP1',
instance_class=HP1,
required=False,
)
mylist = properties.List(
'my list of HP1',
prop=HP1,
required=False,
default=properties.utils.undefined
)
myunion = properties.Union(
'string or HP1',
props=(HP1, properties.String('')),
required=False,
)
many = ManyProperties()
assert many.serialize(include_class=False) == {}
def test_serializer(self):
with self.assertRaises(TypeError):
properties.GettableProperty('bad serial', serializer=5)
with self.assertRaises(TypeError):
properties.GettableProperty('bad deserial', deserializer=5)
def reverse(value):
return ''.join(v for v in value[::-1])
def to_string(value):
return ', '.join(v for v in value.astype(str))
def from_string(value):
return np.array(value.split(', ')).astype(int)
def serialize_a_only(value):
return value.a
def deserialize_from_a(value):
return HP1(a=value)
def sum_of_a(value):
return sum(inst.a for inst in value)
def from_sum(value):
return [HP1(a=value)]
def just_the_classname(value):
return value.__class__.__name__
class ManyProperties(properties.HasProperties):
mystr = properties.String(
'my string',
serializer=reverse,
deserializer=reverse,
)
myarr = properties.Array(
'my array',
serializer=to_string,
deserializer=from_string,
)
myinst = properties.Instance(
'my HP1',
instance_class=HP1,
serializer=serialize_a_only,
deserializer=deserialize_from_a,
)
mylist = properties.List(
'my list of HP1',
prop=HP1,
serializer=sum_of_a,
deserializer=from_sum,
)
myunion = properties.Union(
'string or HP1',
props=(HP1, properties.String('')),
serializer=just_the_classname,
deserializer=reverse,
)
many = ManyProperties(
mystr='abcd',
myarr=[1, 2, 3],
myinst=HP1(a=10),
mylist=[HP1(a=1), HP1(a=2), HP1(a=3)],
myunion=HP1(a=10)
)
many_serialized = {
'mystr': 'dcba',
'myarr': '1, 2, 3',
'myinst': 10,
'mylist': 6,
'myunion': 'HP1'
}
assert many.serialize(include_class=False) == many_serialized
many = ManyProperties.deserialize(many_serialized)
assert many.mystr == 'abcd'
assert isinstance(many.myarr, np.ndarray)
assert np.all(many.myarr == [1, 2, 3])
assert isinstance(many.myinst, HP1)
assert many.myinst.a == 10
assert isinstance(many.mylist, list)
assert len(many.mylist) == 1
assert isinstance(many.mylist[0], HP1)
assert many.mylist[0].a == 6
assert many.myunion == '1PH'
assert isinstance(ManyProperties.deserialize({'mystr': 'hi'}), ManyProperties)
def kwarg_multiplier(value, **kwargs):
mult = kwargs.get('mult', 1)
return value * mult
def kwarg_divider(value, **kwargs):
mult = kwargs.get('mult', 1)
return value / mult
class HasInt(properties.HasProperties):
my_int = properties.Integer(
'Integer serialized as mult',
serializer=kwarg_multiplier,
deserializer=kwarg_divider,
)
hi = HasInt(my_int=5)
hi_ser = hi.serialize()
assert hi_ser['my_int'] == 5
hi_copy = HasInt.deserialize(hi_ser)
assert hi_copy.my_int == 5
hi_ser = hi.serialize(mult=2)
assert hi_ser['my_int'] == 10
hi_copy = HasInt.deserialize(hi_ser, mult=2)
assert hi_copy.my_int == 5
def test_dynamic_serial(self):
class DynamicModel(properties.HasProperties):
a = properties.Integer('')
b = properties.Renamed('a')
@properties.Integer('')
def c(self):
return self.a
dm1 = DynamicModel()
dm1.a = 5
assert dm1.b == 5
assert dm1.c == 5
dm_save_dynamic = dm1.serialize(save_dynamic=True)
dm_skip_dynamic = dm1.serialize()
assert dm_skip_dynamic == {'__class__': 'DynamicModel', 'a': 5}
assert dm_save_dynamic == {'__class__': 'DynamicModel',
'a': 5, 'b': 5, 'c': 5}
dm2 = DynamicModel.deserialize(dm_skip_dynamic)
assert properties.equal(dm1, dm2)
dm3 = DynamicModel.deserialize(dm_save_dynamic)
assert properties.equal(dm1, dm3)
def test_instance_deserializer(self):
class DeserializeClass(object):
def __call__(self, value):
print('deserializing')
class HasDeserializer(properties.HasProperties):
my_int = properties.Integer(
'Int with deserializer',
deserializer=DeserializeClass(),
)
if __name__ == '__main__':
unittest.main()
| |
import requests
import re
import json
import sys
import datetime
import logging
from logging.handlers import TimedRotatingFileHandler
import ConfigParser
# Importing db manager
sys.path.insert(0, '../../resource-contextualization-import-db/abstraction')
from DB_Factory import DBFactory
# Importing utils
sys.path.insert(0, '../util')
import util
"""
Dictionary with the relationships between input resource types and output resource types.
"""
resource_types_relations = {
'Tool (analysis)' :['Tool'],
'Tool (query and retrieval)' :['Tool'],
'Tool (utility)' :['Tool'],
'Tool (deposition)' :['Tool'],
'Tool (visualiser)' :['Tool'],
'Tool' :['Tool'],
'Workflow' :['Tool'],
'Library' :['Tool'],
'Database' :['Database'],
'Suite' :['Tool'],
'Framework' :['Tool'],
'Other' :['Tool'],
'Widget' :['Tool']
}
logger = None
def init_logger():
"""
Function that initialises logging system
"""
global logger
logger = logging.getLogger('elixir_registry_logs')
if (len(logger.handlers) == 0): # We only create a StreamHandler if there aren't another one
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.INFO)
filehandler = logging.handlers.TimedRotatingFileHandler('../../resource-contextualization-logs/context-elixir_registry.log', when='w0')
filehandler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamhandler.setFormatter(formatter)
filehandler.setFormatter(formatter)
# add formatters to logger
logger.addHandler(streamhandler)
logger.addHandler(filehandler)
def get_records():
"""
Get all registry data from "elixir-registry.cbs.dtu.dk"
* {list} registry data. In this script we will need:
variables {string}:
"title" - Title for the data registry.
"description" - Description for the data registry.
"link" - Link to the data registry.
"field" - Default ('Services Registry');
None if there is any error.
"""
try:
elixirData = requests.get('https://bio.tools/api/tool')
records_list = json.loads(elixirData.text)
return records_list
except Exception as e:
logger.error ("Exception asking for Elixir data")
logger.error (e)
return None
def get_one_field_from_registry_data(record, field_name):
"""
Generic function to get one field value from the data of one record.
* result {list} one Elixir's record.
* field_name {string} name of the field to be obtained.
* {string} Return the field value requested. None if there is any error.
"""
try:
return format(record[field_name])
except Exception as e:
logger.error("Error getting "+field_name+" from Elixir record:")
logger.error(record)
logger.error(e)
return None
def get_title(data):
"""
Get 'title' field from the data of one record.
* data {list} one Elixir's record.
* {string} Return 'title' value from the list. None if there is any error.
"""
return get_one_field_from_registry_data(data, 'name')
def get_description(data):
"""
Get 'description' field from the data of one record.
* data {list} one Elixir's record.
* {string} Return 'description' value from the list. None if there is any error.
"""
return get_one_field_from_registry_data(data, 'description')
def get_link(data):
"""
Get 'link' field from the data of one record.
* data {list} one Elixir's record.
* {string} Return 'link' value from the list. None if there is any error.
"""
return get_one_field_from_registry_data(data, 'homepage')
def get_field(data):
"""
Get 'field' field from the data of one elixir registry entity.
* data {list} one record of Elixir registry data.
* {string or list} Return 'field' value from the list. None if there is any error.
"""
my_field = get_one_field_from_registry_data(data, 'topic')
if my_field is not None:
my_field_converted = eval(my_field)
return_value = []
for each_field in my_field_converted:
try:
term = each_field.get('term')
return_value.append(term)
except Exception as e:
logger.error("Error getting 'term' field of "+my_field+" topic:")
logger.error(e)
return return_value
else:
return None
def get_resource_types_value(original_value):
"""
Converts one original resource type to our own resource type names.
* original_value {string} original resource type value.
* {List} Return 'resource type' value adapted to our own necesities.
"""
if original_value is not None:
global resource_types_relations
return resource_types_relations.get(original_value,['Tool'])
else:
return []
def get_resource_type_field(data):
"""
Get the resource type of any registry obtained with this script.
* data {list} one Elixir's record.
* {string} Return resource type value.
"""
resource_types = []
my_field = get_one_field_from_registry_data(data, 'resourceType')
if my_field is not None:
clear_value = remove_unicode_chars(my_field)
if isinstance(clear_value, basestring):
resource_types = get_resource_types_value(clear_value)
return resource_types
else:
for each_value in clear_value:
resource_types = resource_types + get_resource_types_value(each_value)
return resource_types
else:
return None
def get_source_field():
"""
Get the source token of any registry obtained with this script.
* {string} Return source token.
"""
return get_elixir_registry_source()
def get_elixir_registry_source():
"""
Get the specific source of fields related with Elixir registry.
* {string} Returns a representative token of the Elixir registry fields source.
"""
return 'elixir_registry'
def get_insertion_date_field():
"""
Get insertion date of any registry obtained with this script.
* {date} Return source type value.
"""
return datetime.datetime.now()
def remove_unicode_chars(variable):
"""
Utility function to remove special Unicode chars.
* variable {string} string variable with Unicode chars. It can contains more than only one different strings.
* {list} Return the variables without Unicode chars. None if there is any error.
"""
if variable is not None and isinstance(variable, basestring) :
listFull = []
strText = variable.replace("[u'", "")
strClear = strText.replace("']", "")
if re.search("', u'", strClear):
array = strClear.split("', u'")
for index in array:
strEach = index
listFull.append(strEach)
return listFull
else:
listFull.append(strClear)
return listFull
else:
return None
### ENTRY POINTS
def main():
"""
Executes main_options function with default configurations
"""
main_options(None)
def mainFullUpdating():
"""
Executes main_options function updating all registries and erasing all previous
Elixir registry data
"""
my_options = {}
my_options['delete_all_old_data'] = True
my_options['updateRegistries'] = True
main_options(my_options)
def mainFullDeleting():
"""
Executes main_options function erasing all previous Elixir registry data
"""
my_options = {}
my_options['delete_all_old_data'] = True
my_options['updateRegistries'] = False
main_options(my_options)
def main_options(options):
"""
Executes the main functionality of this script: it extracts JSON data from each record found on Elixir's registry
and inserts its main data into the DB.
* options {list} specific configurations for initialization.
ds_name {string} specific dataset/database to use with the DB manager
delete_all_old_data {boolean} specifies if we should delete all previous Elixir registry data in our DataBase
registriesFromTime {date} time from registries will be obtained
updateRegistries {boolean} if we want to get new regiestries or not
In this script we will insert these fields into each registry:
"title" {string} Title for the data registry.
"notes" {string} Description for the data registry.
"link" {string} Link to the data registry.
"field" {string} Default ('Services Registry');
"source" {string} Default ('ckan');
"insertion date" {date} Current date and time.
"""
init_logger()
ds_name = None
delete_all_old_data = False
registriesFromTime = None
updateRegistries = True
if options is not None:
logger.info ('>> Starting Elixir registry importing process... params: ')
if ('ds_name' in options.keys()):
ds_name = options['ds_name']
logger.info ('ds_name='+ds_name)
if ('delete_all_old_data' in options.keys()):
delete_all_old_data = options['delete_all_old_data']
logger.info ('delete_all_old_data='+str(delete_all_old_data))
if ('updateRegistries' in options.keys()):
updateRegistries = options['updateRegistries']
logger.info ('updateRegistries='+str(updateRegistries))
else:
logger.info ('>> Starting Elixir registry importing process...')
records = None
if updateRegistries:
records = get_records()
user = None
passw = None
try:
config = ConfigParser.RawConfigParser()
config.read('ConfigFile.properties')
usertemp = config.get('AuthenticationSection', 'database.user');
passwtemp = config.get('AuthenticationSection', 'database.password');
user = usertemp
passw = passwtemp
except Exception as e:
logger.info ("Not user info found, using anonymous user... ")
logger.info (e)
dbFactory = DBFactory()
dbManager = dbFactory.get_default_db_manager_with_username(ds_name,user,passw)
if (delete_all_old_data is not None and delete_all_old_data):
registry_conditions = [['EQ','source',get_source_field()]]
previous_count = dbManager.count_data_by_conditions(registry_conditions)
dbManager.delete_data_by_conditions(registry_conditions)
new_count = dbManager.count_data_by_conditions(registry_conditions)
if (previous_count is not None and new_count is not None):
logger.info ('Deleted '+str( (previous_count-new_count) )+' registries')
if records is not None:
numSuccess = 0
for record in records:
# exists = util.existURL(get_link(record))
# logger.info ('Exists? '+get_link(record)+' :'+str(exists))
# if (exists):
success = dbManager.insert_data({
"title":get_title(record),
"description":get_description(record),
"link":get_link(record),
"field":get_field(record),
"source":get_source_field(),
"resource_type":get_resource_type_field(record),
"insertion_date":get_insertion_date_field()
})
if success:
numSuccess=numSuccess+1
logger.info ('Inserted '+str(numSuccess)+' new registries')
logger.info('<< Finished Elixir registry importing process...')
if __name__ == "__main__":
#main_options({"ds_name":'test_core'})
mainFullUpdating()
| |
from __future__ import print_function, unicode_literals
from future.builtins import open
import os
import re
import sys
from contextlib import contextmanager
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from importlib import import_module
from posixpath import join
from mezzanine.utils.conf import real_project_name
from fabric.api import abort, env, cd, prefix, sudo as _sudo, run as _run, \
hide, task, local
from fabric.context_managers import settings as fab_settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists, upload_template
from fabric.contrib.project import rsync_project
from fabric.colors import yellow, green, blue, red
from fabric.decorators import hosts
################
# Config setup #
################
env.proj_app = real_project_name("{{ project_name }}")
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = import_module("%s.settings" % env.proj_app).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS",
"multiprocessing.cpu_count() * 2 + 1")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
# Remote git repos need to be "bare" and reside separated from the project
if env.deploy_tool == "git":
env.repo_path = "/home/%s/git/%s.git" % (env.user, env.proj_name)
else:
env.repo_path = env.proj_path
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf.template",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf.template",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl update gunicorn_%(proj_name)s",
},
"cron": {
"local_path": "deploy/crontab.template",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/%(proj_app)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_path):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True, *args, **kwargs):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command, *args, **kwargs)
@task
def sudo(command, show=True, *args, **kwargs):
"""
Runs a command as sudo on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command, *args, **kwargs)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload the
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes)
def vcs_upload():
"""
Uploads the project with the selected VCS tool.
"""
if env.deploy_tool == "git":
remote_path = "ssh://%s@%s%s" % (env.user, env.host_string,
env.repo_path)
if not exists(env.repo_path):
run("mkdir -p %s" % env.repo_path)
with cd(env.repo_path):
run("git init --bare")
local("git push -f %s master" % remote_path)
with cd(env.repo_path):
run("GIT_WORK_TREE=%s git checkout -f master" % env.proj_path)
run("GIT_WORK_TREE=%s git reset --hard" % env.proj_path)
elif env.deploy_tool == "hg":
remote_path = "ssh://%s@%s/%s" % (env.user, env.host_string,
env.repo_path)
with cd(env.repo_path):
if not exists("%s/.hg" % env.repo_path):
run("hg init")
print(env.repo_path)
with fab_settings(warn_only=True):
push = local("hg push -f %s" % remote_path)
if push.return_code == 255:
abort()
run("hg update")
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return run("pip install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return sudo(command, show=show, user="postgres")
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the project database.
"""
tmp_file = "/tmp/%s" % filename
# We dump to /tmp because user "postgres" can't write to other user folders
# We cd to / because user "postgres" might not have read permissions
# elsewhere.
with cd("/"):
postgres("pg_dump -Fc %s > %s" % (env.proj_name, tmp_file))
run("cp %s ." % tmp_file)
sudo("rm -f %s" % tmp_file)
@task
def restore(filename):
"""
Restores the project database from a previous backup.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os;" \
"os.environ[\'DJANGO_SETTINGS_MODULE\']=\'%s.settings\';" \
"import django;" \
"django.setup();" % env.proj_app
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
if show:
print_command(code)
result = run(full_code, show=False)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print(settings.STATIC_ROOT)", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
###########################
# Security best practices #
###########################
@task
@log_call
@hosts(["root@%s" % host for host in env.hosts])
def secure(new_user=env.user):
"""
Minimal security steps for brand new servers.
Installs system updates, creates new user (with sudo privileges) for future
usage, and disables root login via SSH.
"""
run("apt-get update -q")
run("apt-get upgrade -y -q")
run("adduser --gecos '' %s" % new_user)
run("usermod -G sudo %s" % new_user)
run("sed -i 's:RootLogin yes:RootLogin no:' /etc/ssh/sshd_config")
run("service ssh restart")
print(green("Security steps completed. Log in to the server as '%s' from "
"now on." % new_user, bold=True))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
# Install Python requirements
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
# Set up virtualenv
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True))
@task
@log_call
def create():
"""
Creates the environment needed to host the project.
The environment consists of: system locales, virtualenv, database, project
files, SSL certificate, and project-specific Python requirements.
"""
# Generate project locale
locale = env.locale.replace("UTF-8", "utf8")
with hide("stdout"):
if locale not in run("locale -a"):
sudo("locale-gen %s" % env.locale)
sudo("update-locale %s" % env.locale)
sudo("service postgresql restart")
run("exit")
# Create project path
run("mkdir -p %s" % env.proj_path)
# Set up virtual env
run("mkdir -p %s" % env.venv_home)
with cd(env.venv_home):
if exists(env.proj_name):
if confirm("Virtualenv already exists in host server: %s"
"\nWould you like to replace it?" % env.proj_name):
run("rm -rf %s" % env.proj_name)
else:
abort()
run("virtualenv %s" % env.proj_name)
# Upload project files
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
# Create DB and DB user
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Install project-specific requirements
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle psycopg2 "
"django-compressor python-memcached")
# Bootstrap the DB
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from django.contrib.auth import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
run("rm -rf %s" % env.venv_path)
if exists(env.proj_path):
run("rm -rf %s" % env.proj_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
if exists(env.repo_path):
run("rm -rf %s" % env.repo_path)
sudo("supervisorctl update")
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
If the processes are not running, they will be started.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
run("kill -HUP `cat %s`" % pid_path)
else:
sudo("supervisorctl update")
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Backup current version of the project, push latest version of the project
via version control or rsync, install new requirements, sync and migrate
the database, collect any new static assets, and restart gunicorn's worker
processes for the project.
"""
if not exists(env.proj_path):
if confirm("Project does not exist in host server: %s"
"\nWould you like to create it?" % env.proj_name):
create()
else:
abort()
# Backup current version of the project
with cd(env.proj_path):
backup("last.db")
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run("git rev-parse HEAD > %s/last.commit" % env.proj_path)
elif env.deploy_tool == "hg":
run("hg id -i > last.commit")
with project():
static_dir = static()
if exists(static_dir):
run("tar -cf static.tar --exclude='*.thumbnails' %s" %
static_dir)
else:
with cd(join(env.proj_path, "..")):
excludes = ["*.pyc", "*.pio", "*.thumbnails"]
exclude_arg = " ".join("--exclude='%s'" % e for e in excludes)
run("tar -cf {0}.tar {1} {0}".format(env.proj_name, exclude_arg))
# Deploy latest version of the project
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
vcs_upload()
else:
rsync_upload()
with project():
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
for name in get_templates():
upload_template_and_reload(name)
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the project files, the database, and all static
files. Calling rollback will revert all of these to their state prior to
the last deploy.
"""
with update_changed_requirements():
if env.deploy_tool in env.vcs_tools:
with cd(env.repo_path):
if env.deploy_tool == "git":
run("GIT_WORK_TREE={0} git checkout -f "
"`cat {0}/last.commit`".format(env.proj_path))
elif env.deploy_tool == "hg":
run("hg update -C `cat last.commit`")
with project():
with cd(join(static(), "..")):
run("tar -xf %s/static.tar" % env.proj_path)
else:
with cd(env.proj_path.rsplit("/", 1)[0]):
run("rm -rf %s" % env.proj_name)
run("tar -xf %s.tar" % env.proj_name)
with cd(env.proj_path):
restore("last.db")
restart()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine.resources import stack_user
from heat.engine import scheduler
from heat.engine import template
from heat.tests import common
from heat.tests import utils
from mistralclient.api.v2 import executions
from .. import client # noqa
from ..resources import workflow # noqa
workflow_template = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
tasks:
- name: hello
action: std.echo output='Good morning!'
publish:
result: <% $.hello %>
"""
workflow_template_full = """
heat_template_version: 2013-05-23
resources:
create_vm:
type: OS::Mistral::Workflow
properties:
name: create_vm
type: direct
input:
name: create_test_server
image: 31d8eeaf-686e-4e95-bb27-765014b9f20b
flavor: 2
output:
vm_id: <% $.vm_id %>
tasks:
- name: create_server
action: |
nova.servers_create name=<% $.name %> image=<% $.image %>
flavor=<% $.flavor %>
publish:
vm_id: <% $.create_server.id %>
on_success:
- check_server_exists
- name: check_server_exists
action: nova.servers_get server=<% $.vm_id %>
publish:
server_exists: True
on_success:
- wait_instance
- name: wait_instance
action: nova.servers_find id=<% $.vm_id %> status='ACTIVE'
policies:
retry:
delay: 5
count: 15
"""
workflow_template_bad = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
tasks:
- name: second_task
action: std.noop
requires: [first_task]
- name: first_task
action: std.noop
"""
workflow_template_bad_reverse = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: reverse
tasks:
- name: second_task
action: std.noop
requires: [first_task]
- name: first_task
action: std.noop
"""
workflow_template_update_replace = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
name: hello_action
type: direct
tasks:
- name: hello
action: std.echo output='Good evening!'
publish:
result: <% $.hello %>
"""
workflow_template_update = """
heat_template_version: 2013-05-23
resources:
workflow:
type: OS::Mistral::Workflow
properties:
type: direct
description: just testing workflow resource
tasks:
- name: hello
action: std.echo output='Good evening!'
publish:
result: <% $.hello %>
"""
class FakeWorkflow(object):
def __init__(self, name):
self.name = name
class TestWorkflow(common.HeatTestCase):
def setUp(self):
super(TestWorkflow, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
tmpl = template_format.parse(workflow_template)
self.stack = utils.parse_stack(tmpl, stack_name='test_stack')
resource_defns = self.stack.t.resource_definitions(self.stack)
self.rsrc_defn = resource_defns['workflow']
self.mistral = mock.Mock()
self.patchobject(workflow.Workflow, 'mistral',
return_value=self.mistral)
mock.patch.object(stack_user.StackUser, '_create_user').start()
mock.patch.object(signal_responder.SignalResponder,
'_create_keypair').start()
mock.patch.object(client, 'mistral_base').start()
mock.patch.object(client.MistralClientPlugin, '_create').start()
self.client = client.MistralClientPlugin(self.ctx)
def _create_resource(self, name, snippet, stack):
wf = workflow.Workflow(name, snippet, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.create)()
return wf
def test_create(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
expected_state = (wf.CREATE, wf.COMPLETE)
self.assertEqual(expected_state, wf.state)
self.assertEqual('test_stack-workflow-b5fiekfci3yc', wf.resource_id)
def test_create_with_name(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = workflow.Workflow('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
expected_state = (wf.CREATE, wf.COMPLETE)
self.assertEqual(expected_state, wf.state)
self.assertEqual('create_vm', wf.resource_id)
def test_attributes(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.assertEqual({'name': 'test_stack-workflow-b5fiekfci3yc',
'input': None}, wf.FnGetAtt('data'))
self.assertEqual([], wf.FnGetAtt('executions'))
def test_direct_workflow_validation_error(self):
error_msg = ("Mistral resource validation error : "
"workflow.properties.tasks.second_task.requires: "
"task second_task contains property 'requires' "
"in case of direct workflow. Only reverse workflows "
"can contain property 'requires'.")
self._test_validation_failed(workflow_template_bad, error_msg)
def test_wrong_params_using(self):
error_msg = ("Mistral resource validation error : "
"workflow.properties.params: 'task_name' is not assigned "
"in 'params' in case of reverse type workflow.")
self._test_validation_failed(workflow_template_bad_reverse, error_msg)
def _test_validation_failed(self, templatem, error_msg):
tmpl = template_format.parse(templatem)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = workflow.Workflow('workflow', rsrc_defns, stack)
exc = self.assertRaises(exception.StackValidationFailed,
wf.validate)
self.assertEqual(error_msg, six.text_type(exc))
def test_create_wrong_definition(self):
tmpl = template_format.parse(workflow_template)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['workflow']
wf = workflow.Workflow('workflow', rsrc_defns, stack)
self.mistral.workflows.create.side_effect = Exception('boom!')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.create))
expected_state = (wf.CREATE, wf.FAILED)
self.assertEqual(expected_state, wf.state)
self.assertIn('Exception: boom!', six.text_type(exc))
def test_update_replace(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
t = template_format.parse(workflow_template_update_replace)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_workflow = rsrc_defns['workflow']
new_workflows = [FakeWorkflow('hello_action')]
self.mistral.workflows.update.return_value = new_workflows
self.mistral.workflows.delete.return_value = None
err = self.assertRaises(resource.UpdateReplace,
scheduler.TaskRunner(wf.update,
new_workflow))
msg = 'The Resource workflow requires replacement.'
self.assertEqual(msg, six.text_type(err))
def test_update(self):
wf = self._create_resource('workflow', self.rsrc_defn,
self.stack)
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
self.mistral.workflows.update.return_value = [
FakeWorkflow('test_stack-workflow-b5fiekfci3yc')]
scheduler.TaskRunner(wf.update, new_wf)()
self.mistral.workflows.update.assert_called_once()
self.assertEqual((wf.UPDATE, wf.COMPLETE), wf.state)
def test_update_failed(self):
wf = self._create_resource('workflow', self.rsrc_defn,
self.stack)
t = template_format.parse(workflow_template_update)
rsrc_defns = template.Template(t).resource_definitions(self.stack)
new_wf = rsrc_defns['workflow']
self.mistral.workflows.update.side_effect = Exception('boom!')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.update, new_wf))
self.assertEqual((wf.UPDATE, wf.FAILED), wf.state)
def test_delete(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
def test_delete_no_data(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
wf.data_delete('executions')
self.assertEqual([], wf.FnGetAtt('executions'))
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
def test_delete_not_found(self):
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.mistral.workflows.delete.side_effect = (
self.mistral.mistral_base.APIException(error_code=404))
scheduler.TaskRunner(wf.delete)()
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
@mock.patch.object(resource.Resource, 'client_plugin')
def test_delete_other_errors(self, mock_plugin):
"""We mock client_plugin for returning correct mistral client."""
mock_plugin.return_value = self.client
client.mistral_base.APIException = exception.Error
wf = self._create_resource('workflow', self.rsrc_defn, self.stack)
self.mistral.workflows.delete.side_effect = (Exception('boom!'))
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.delete))
self.assertEqual((wf.DELETE, wf.FAILED), wf.state)
self.assertIn('boom!', six.text_type(exc))
def test_resource_mapping(self):
mapping = workflow.resource_mapping()
self.assertEqual(1, len(mapping))
self.assertEqual(workflow.Workflow,
mapping['OS::Mistral::Workflow'])
def test_signal_failed(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = workflow.Workflow('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'flavor': '3'}}
self.mistral.executions.create.side_effect = Exception('boom!')
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
self.assertEqual('Exception: boom!', six.text_type(err))
def test_signal_wrong_input_and_params_type(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = workflow.Workflow('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: Signal data error : Input in"
" signal data must be a map, find a <type 'str'>")
self.assertEqual(error_message, six.text_type(err))
details = {'params': '3'}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: Signal data error : Params "
"must be a map, find a <type 'str'>")
self.assertEqual(error_message, six.text_type(err))
def test_signal_wrong_input_key(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = workflow.Workflow('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'1': '3'}}
err = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(wf.signal, details))
error_message = ("StackValidationFailed: Signal data error :"
" Unknown input 1")
self.assertEqual(error_message, six.text_type(err))
def test_signal_and_delete_with_executions(self):
tmpl = template_format.parse(workflow_template_full)
stack = utils.parse_stack(tmpl)
rsrc_defns = stack.t.resource_definitions(stack)['create_vm']
wf = workflow.Workflow('create_vm', rsrc_defns, stack)
self.mistral.workflows.create.return_value = [
FakeWorkflow('create_vm')]
scheduler.TaskRunner(wf.create)()
details = {'input': {'flavor': '3'}}
execution = mock.Mock()
execution.id = '12345'
# Invoke the real create method (bug 1453539)
exec_manager = executions.ExecutionManager(wf.client('mistral'))
self.mistral.executions.create.side_effect = (
lambda *args, **kw: exec_manager.create(*args, **kw))
self.patchobject(exec_manager, '_create', return_value=execution)
scheduler.TaskRunner(wf.signal, details)()
self.assertEqual({'executions': '12345'}, wf.data())
scheduler.TaskRunner(wf.delete)()
self.assertEqual(1, self.mistral.executions.delete.call_count)
self.assertEqual((wf.DELETE, wf.COMPLETE), wf.state)
| |
# -*- coding: utf-8 -*-
"""
sphinx.writers.html
~~~~~~~~~~~~~~~~~~~
docutils writers handling Sphinx' custom nodes.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import posixpath
import os
import copy
import warnings
from six import string_types
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
from sphinx.util.images import get_image_size
from sphinx.util.smartypants import sphinx_smarty_pants
# A good overview of the purpose behind these classes can be found here:
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
class HTMLWriter(Writer):
# override embed-stylesheet default value to 0.
settings_spec = copy.deepcopy(Writer.settings_spec)
for _setting in settings_spec[2]:
if '--embed-stylesheet' in _setting[1]:
_setting[2]['default'] = 0
def __init__(self, builder):
Writer.__init__(self)
self.builder = builder
def translate(self):
# sadly, this is mostly copied from parent class
self.visitor = visitor = self.builder.translator_class(self.builder,
self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
self.clean_meta = ''.join(visitor.meta[2:])
class HTMLTranslator(BaseTranslator):
"""
Our custom HTML translator.
"""
def __init__(self, builder, *args, **kwds):
BaseTranslator.__init__(self, *args, **kwds)
self.highlighter = builder.highlighter
self.no_smarty = 0
self.builder = builder
self.highlightlang = self.highlightlang_base = \
builder.config.highlight_language
self.highlightopts = builder.config.highlight_options
self.highlightlinenothreshold = sys.maxsize
self.docnames = [builder.current_docname] # for singlehtml builder
self.protect_literal_text = 0
self.permalink_text = builder.config.html_add_permalinks
# support backwards-compatible setting to a bool
if not isinstance(self.permalink_text, string_types):
self.permalink_text = self.permalink_text and u'\u00B6' or ''
self.permalink_text = self.encode(self.permalink_text)
self.secnumber_suffix = builder.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
def visit_start_of_file(self, node):
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
self.docnames.pop()
def visit_desc(self, node):
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' \
and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
pass
def depart_desc_signature_line(self, node):
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
self.body.append('</code>')
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' → ')
def depart_desc_returns(self, node):
pass
def visit_desc_name(self, node):
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
# How many required parameters are left.
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
# the parameter. Otherwise, put the comma before. This ensures that
# signatures like the following render correctly (see issue #1001):
#
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
self.body.append(self.param_separator)
if self.optional_param_level == 0:
self.required_params_left -= 1
if not node.hasattr('noemph'):
self.body.append('<em>')
def depart_desc_parameter(self, node):
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
self.body.append('</em>')
def visit_desc_content(self, node):
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
self.body.append('</dd>')
def visit_versionmodified(self, node):
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
if self.settings.cloak_email_addresses and \
atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
if 'target' in node:
atts['target'] = node['target']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) %
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
self.visit_reference(node)
def depart_number_reference(self, node):
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
def visit_comment(self, node):
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
self.set_first_last(node)
def visit_seealso(self, node):
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
self.depart_admonition(node)
def add_secnumber(self, node):
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
anchorname = '#' + node.parent['ids'][0]
if (docname, anchorname) not in self.builder.secnumbers:
anchorname = (docname, '') # try first heading which has no anchor
else:
anchorname = (docname, anchorname)
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
anchorname = '' # try first heading which has no anchor
if self.builder.secnumbers.get(anchorname):
numbers = self.builder.secnumbers[anchorname]
self.body.append('.'.join(map(str, numbers)) +
self.secnumber_suffix)
def add_fignumber(self, node):
def append_fignumber(figtype, figure_id):
if self.builder.name == 'singlehtml':
key = (self.docnames[-1], figtype)
else:
key = figtype
if figure_id in self.builder.fignumbers.get(key, {}):
self.body.append('<span class="caption-number">')
prefix = self.builder.config.numfig_format.get(figtype)
if prefix is None:
msg = 'numfig_format is not defined for %s' % figtype
self.builder.warn(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
figtype = self.builder.env.domains['std'].get_figtype(node)
if figtype:
if len(node['ids']) == 0:
msg = 'Any IDs not assigned for %s node' % node.tagname
self.builder.env.warn_node(msg, node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
generates hyperlink targets inside listing tags (<ul>, <ol> and <dl>) if multiple
IDs are assigned to listings. That is invalid DOM structure.
(This is a bug of docutils <= 0.12)
This exports hyperlink targets before listings to make valid DOM structure.
"""
for id in node['ids'][1:]:
self.body.append('<span id="%s"></span>' % id)
node['ids'].remove(id)
# overwritten
def visit_bullet_list(self, node):
if len(node) == 1 and node[0].tagname == 'toctree':
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
BaseTranslator.visit_bullet_list(self, node)
# overwritten
def visit_enumerated_list(self, node):
self.generate_targets_for_listing(node)
BaseTranslator.visit_enumerated_list(self, node)
# overwritten
def visit_title(self, node):
BaseTranslator.visit_title(self, node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
# add permalink anchor
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
self.body.append(u'</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
u'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
self.add_permalink_ref(node.parent, _('Permalink to this table'))
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
BaseTranslator.depart_title(self, node)
# overwritten
def visit_literal_block(self, node):
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return BaseTranslator.visit_literal_block(self, node)
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= \
self.highlightlinenothreshold - 1
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
if lang is self.highlightlang_base:
# only pass highlighter options for original language
opts = self.highlightopts
else:
opts = {}
def warner(msg, **kwargs):
self.builder.warn(msg, (self.builder.current_docname, node.line), **kwargs)
highlighted = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node):
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
BaseTranslator.visit_caption(self, node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
self.body.append('</span>')
# append permalink if available
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.add_permalink_ref(node.parent, _('Permalink to this code'))
elif isinstance(node.parent, nodes.figure):
image_nodes = node.parent.traverse(nodes.image)
target_node = image_nodes and image_nodes[0] or node.parent
self.add_permalink_ref(target_node, _('Permalink to this image'))
elif node.parent.get('toctree'):
self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
BaseTranslator.depart_caption(self, node)
def visit_doctest_block(self, node):
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal'))
self.protect_literal_text += 1
def depart_literal(self, node):
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node):
self.body.append(self.starttag(node, 'pre'))
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
for production in node:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
self.body.append(lastname + '</strong> ::= ')
elif lastname is not None:
self.body.append('%s ' % (' ' * len(lastname)))
production.walkabout(self)
self.body.append('\n')
self.body.append('</pre>\n')
raise nodes.SkipNode
def depart_productionlist(self, node):
pass
def visit_production(self, node):
pass
def depart_production(self, node):
pass
def visit_centered(self, node):
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
self.body.append('</strong></p>')
# overwritten
def should_be_compact_paragraph(self, node):
"""Determine if the <p> tags around paragraph can be omitted."""
if isinstance(node.parent, addnodes.desc_content):
# Never compact desc_content items.
return False
if isinstance(node.parent, addnodes.versionmodified):
# Never compact versionmodified nodes.
return False
return BaseTranslator.should_be_compact_paragraph(self, node)
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_highlightlang(self, node):
self.highlightlang = node['lang']
self.highlightlinenothreshold = node['linenothreshold']
def depart_highlightlang(self, node):
pass
def visit_download_reference(self, node):
if self.builder.download_support and node.hasattr('filename'):
self.body.append(
'<a class="reference download internal" href="%s" download="">' %
posixpath.join(self.builder.dlpath, node['filename']))
self.context.append('</a>')
else:
self.context.append('')
def depart_download_reference(self, node):
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
node['uri'] = posixpath.join(self.builder.imgpath,
self.builder.images[olduri])
uri = node['uri']
if uri.lower().endswith(('svg', 'svgz')):
atts = {'src': uri}
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
atts['alt'] = node.get('alt', uri)
if 'align' in node:
self.body.append('<div align="%s" class="align-%s">' %
(node['align'], node['align']))
self.context.append('</div>\n')
else:
self.context.append('')
self.body.append(self.emptytag(node, 'img', '', **atts))
return
if 'scale' in node:
# Try to figure out image height and width. Docutils does that too,
# but it tries the final file name, which does not necessarily exist
# yet at the time the HTML file is written.
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
self.builder.env.warn_node('Could not obtain image size. '
':scale: option is ignored.', node)
else:
if 'width' not in node:
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
BaseTranslator.visit_image(self, node)
# overwritten
def depart_image(self, node):
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
BaseTranslator.depart_image(self, node)
def visit_toctree(self, node):
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
pass
def depart_acks(self, node):
pass
def visit_hlist(self, node):
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
self.body.append('<td>')
def depart_hlistcol(self, node):
self.body.append('</td>')
def visit_option_group(self, node):
BaseTranslator.visit_option_group(self, node)
self.context[-2] = self.context[-2].replace(' ', ' ')
def bulk_text_processor(self, text):
return text
# overwritten
def visit_Text(self, node):
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
# moved here from base class's visit_literal to support
# more formatting in literal nodes
for token in self.words_and_spaces.findall(encoded):
if token.strip():
# protect literal text from line wrapping
self.body.append('<span class="pre">%s</span>' % token)
elif token in ' \n':
# allow breaks at whitespace
self.body.append(token)
else:
# protect runs of multiple spaces; the last one can wrap
self.body.append(' ' * (len(token) - 1) + ' ')
else:
if self.in_mailto and self.settings.cloak_email_addresses:
encoded = self.cloak_email(encoded)
else:
encoded = self.bulk_text_processor(encoded)
self.body.append(encoded)
def visit_note(self, node):
self.visit_admonition(node, 'note')
def depart_note(self, node):
self.depart_admonition(node)
def visit_warning(self, node):
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
self.depart_admonition(node)
def visit_attention(self, node):
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
self.depart_admonition()
def visit_caution(self, node):
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
self.depart_admonition()
def visit_danger(self, node):
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
self.depart_admonition()
def visit_error(self, node):
self.visit_admonition(node, 'error')
def depart_error(self, node):
self.depart_admonition()
def visit_hint(self, node):
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
self.depart_admonition()
def visit_important(self, node):
self.visit_admonition(node, 'important')
def depart_important(self, node):
self.depart_admonition()
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
self.depart_admonition()
# these are only handled specially in the SmartyPantsHTMLTranslator
def visit_literal_emphasis(self, node):
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
return self.visit_strong(node)
def depart_literal_strong(self, node):
return self.depart_strong(node)
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_termsep(self, node):
warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
'This warning is displayed because some Sphinx extension '
'uses sphinx.addnodes.termsep. Please report it to '
'author of the extension.', RemovedInSphinx16Warning)
self.body.append('<br />')
raise nodes.SkipNode
def visit_manpage(self, node):
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
return self.depart_literal_emphasis(node)
# overwritten to add even/odd classes
def visit_table(self, node):
self._table_row_index = 0
return BaseTranslator.visit_table(self, node)
def visit_row(self, node):
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
node.column = 0
def visit_entry(self, node):
BaseTranslator.visit_entry(self, node)
if self.body[-1] == ' ':
self.body[-1] = ' '
def visit_field_list(self, node):
self._fieldlist_row_index = 0
return BaseTranslator.visit_field_list(self, node)
def visit_field(self, node):
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node):
context_count = len(self.context)
BaseTranslator.visit_field_name(self, node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace(' ', ' ')
def visit_math(self, node, math_env=''):
self.builder.warn('using "math" markup without a Sphinx math extension '
'active, please use one of the math extensions '
'described at http://sphinx-doc.org/ext/math.html',
(self.builder.current_docname, node.line))
raise nodes.SkipNode
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
class SmartyPantsHTMLTranslator(HTMLTranslator):
"""
Handle ordinary text via smartypants, converting quotes and dashes
to the correct entities.
"""
def __init__(self, *args, **kwds):
self.no_smarty = 0
HTMLTranslator.__init__(self, *args, **kwds)
def visit_literal(self, node):
self.no_smarty += 1
try:
# this raises SkipNode
HTMLTranslator.visit_literal(self, node)
finally:
self.no_smarty -= 1
def visit_literal_block(self, node):
self.no_smarty += 1
try:
HTMLTranslator.visit_literal_block(self, node)
except nodes.SkipNode:
# HTMLTranslator raises SkipNode for simple literal blocks,
# but not for parsed literal blocks
self.no_smarty -= 1
raise
def depart_literal_block(self, node):
HTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
def visit_literal_emphasis(self, node):
self.no_smarty += 1
self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
self.depart_emphasis(node)
self.no_smarty -= 1
def visit_literal_strong(self, node):
self.no_smarty += 1
self.visit_strong(node)
def depart_literal_strong(self, node):
self.depart_strong(node)
self.no_smarty -= 1
def visit_desc_signature(self, node):
self.no_smarty += 1
HTMLTranslator.visit_desc_signature(self, node)
def depart_desc_signature(self, node):
self.no_smarty -= 1
HTMLTranslator.depart_desc_signature(self, node)
def visit_productionlist(self, node):
self.no_smarty += 1
try:
HTMLTranslator.visit_productionlist(self, node)
finally:
self.no_smarty -= 1
def visit_option(self, node):
self.no_smarty += 1
HTMLTranslator.visit_option(self, node)
def depart_option(self, node):
self.no_smarty -= 1
HTMLTranslator.depart_option(self, node)
def bulk_text_processor(self, text):
if self.no_smarty <= 0:
return sphinx_smarty_pants(text)
return text
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.object_storage import base
from tempest import test
class ContainerTest(base.BaseObjectTest):
def setUp(self):
super(ContainerTest, self).setUp()
self.containers = []
def tearDown(self):
self.delete_containers(self.containers)
super(ContainerTest, self).tearDown()
def _create_container(self):
# setup container
container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(container_name)
self.containers.append(container_name)
return container_name
def _create_object(self, container_name, object_name=None):
# setup object
if object_name is None:
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.arbitrary_string()
self.object_client.create_object(container_name,
object_name,
data)
return object_name
@test.attr(type='smoke')
@test.idempotent_id('92139d73-7819-4db1-85f8-3f2f22a8d91f')
def test_create_container(self):
container_name = data_utils.rand_name(name='TestContainer')
resp, body = self.container_client.create_container(container_name)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@test.attr(type='smoke')
@test.idempotent_id('49f866ed-d6af-4395-93e7-4187eb56d322')
def test_create_container_overwrite(self):
# overwrite container with the same name
container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(container_name)
self.containers.append(container_name)
resp, _ = self.container_client.create_container(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
@test.attr(type='smoke')
@test.idempotent_id('c2ac4d59-d0f5-40d5-ba19-0635056d48cd')
def test_create_container_with_metadata_key(self):
# create container with the blank value of metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': ''}
resp, _ = self.container_client.create_container(
container_name,
metadata=metadata)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
# if the value of metadata is blank, metadata is not registered
# in the server
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.attr(type='smoke')
@test.idempotent_id('e1e8df32-7b22-44e1-aa08-ccfd8d446b58')
def test_create_container_with_metadata_value(self):
# create container with metadata value
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': 'Meta1'}
resp, _ = self.container_client.create_container(
container_name,
metadata=metadata)
self.containers.append(container_name)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertIn('x-container-meta-test-container-meta', resp)
self.assertEqual(resp['x-container-meta-test-container-meta'],
metadata['test-container-meta'])
@test.attr(type='smoke')
@test.idempotent_id('24d16451-1c0c-4e4f-b59c-9840a3aba40e')
def test_create_container_with_remove_metadata_key(self):
# create container with the blank value of remove metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata_1 = {'test-container-meta': 'Meta1'}
self.container_client.create_container(
container_name,
metadata=metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta': ''}
resp, _ = self.container_client.create_container(
container_name,
remove_metadata=metadata_2)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.attr(type='smoke')
@test.idempotent_id('8a21ebad-a5c7-4e29-b428-384edc8cd156')
def test_create_container_with_remove_metadata_value(self):
# create container with remove metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
resp, _ = self.container_client.create_container(
container_name,
remove_metadata=metadata)
self.assertHeaders(resp, 'Container', 'PUT')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta', resp)
@test.attr(type='smoke')
@test.idempotent_id('95d3a249-b702-4082-a2c4-14bb860cf06a')
def test_delete_container(self):
# create a container
container_name = self._create_container()
# delete container, success asserted within
resp, _ = self.container_client.delete_container(container_name)
self.assertHeaders(resp, 'Container', 'DELETE')
self.containers.remove(container_name)
@test.attr(type='smoke')
@test.idempotent_id('312ff6bd-5290-497f-bda1-7c5fec6697ab')
def test_list_container_contents(self):
# get container contents list
container_name = self._create_container()
object_name = self._create_object(container_name)
resp, object_list = self.container_client.list_container_contents(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('4646ac2d-9bfb-4c7d-a3c5-0f527402b3df')
def test_list_container_contents_with_no_object(self):
# get empty container contents list
container_name = self._create_container()
resp, object_list = self.container_client.list_container_contents(
container_name)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual('', object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('fe323a32-57b9-4704-a996-2e68f83b09bc')
def test_list_container_contents_with_delimiter(self):
# get container contents list using delimiter param
container_name = self._create_container()
object_name = data_utils.rand_name(name='TestObject/')
self._create_object(container_name, object_name)
params = {'delimiter': '/'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name.split('/')[0], object_list.strip('/\n'))
@test.attr(type='smoke')
@test.idempotent_id('55b4fa5c-e12e-4ca9-8fcf-a79afe118522')
def test_list_container_contents_with_end_marker(self):
# get container contents list using end_marker param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'end_marker': 'ZzzzObject1234567890'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('196f5034-6ab0-4032-9da9-a937bbb9fba9')
def test_list_container_contents_with_format_json(self):
# get container contents list using format_json param
container_name = self._create_container()
self._create_object(container_name)
params = {'format': 'json'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertIsNotNone(object_list)
self.assertTrue([c['name'] for c in object_list])
self.assertTrue([c['hash'] for c in object_list])
self.assertTrue([c['bytes'] for c in object_list])
self.assertTrue([c['content_type'] for c in object_list])
self.assertTrue([c['last_modified'] for c in object_list])
@test.attr(type='smoke')
@test.idempotent_id('655a53ca-4d15-408c-a377-f4c6dbd0a1fa')
def test_list_container_contents_with_format_xml(self):
# get container contents list using format_xml param
container_name = self._create_container()
self._create_object(container_name)
params = {'format': 'xml'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertIsNotNone(object_list)
self.assertEqual(object_list.tag, 'container')
self.assertTrue('name' in object_list.keys())
self.assertEqual(object_list.find(".//object").tag, 'object')
self.assertEqual(object_list.find(".//name").tag, 'name')
self.assertEqual(object_list.find(".//hash").tag, 'hash')
self.assertEqual(object_list.find(".//bytes").tag, 'bytes')
self.assertEqual(object_list.find(".//content_type").tag,
'content_type')
self.assertEqual(object_list.find(".//last_modified").tag,
'last_modified')
@test.attr(type='smoke')
@test.idempotent_id('297ec38b-2b61-4ff4-bcd1-7fa055e97b61')
def test_list_container_contents_with_limit(self):
# get container contents list using limit param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'limit': data_utils.rand_int_id(1, 10000)}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('c31ddc63-2a58-4f6b-b25c-94d2937e6867')
def test_list_container_contents_with_marker(self):
# get container contents list using marker param
container_name = self._create_container()
object_name = self._create_object(container_name)
params = {'marker': 'AaaaObject1234567890'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('58ca6cc9-6af0-408d-aaec-2a6a7b2f0df9')
def test_list_container_contents_with_path(self):
# get container contents list using path param
container_name = self._create_container()
object_name = data_utils.rand_name(name='Swift/TestObject')
self._create_object(container_name, object_name)
params = {'path': 'Swift'}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('77e742c7-caf2-4ec9-8aa4-f7d509a3344c')
def test_list_container_contents_with_prefix(self):
# get container contents list using prefix param
container_name = self._create_container()
object_name = self._create_object(container_name)
prefix_key = object_name[0:8]
params = {'prefix': prefix_key}
resp, object_list = self.container_client.list_container_contents(
container_name,
params=params)
self.assertHeaders(resp, 'Container', 'GET')
self.assertEqual(object_name, object_list.strip('\n'))
@test.attr(type='smoke')
@test.idempotent_id('96e68f0e-19ec-4aa2-86f3-adc6a45e14dd')
def test_list_container_metadata(self):
# List container metadata
container_name = self._create_container()
metadata = {'name': 'Pictures'}
self.container_client.update_container_metadata(
container_name,
metadata=metadata)
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertHeaders(resp, 'Container', 'HEAD')
self.assertIn('x-container-meta-name', resp)
self.assertEqual(resp['x-container-meta-name'], metadata['name'])
@test.attr(type='smoke')
@test.idempotent_id('a2faf936-6b13-4f8d-92a2-c2278355821e')
def test_list_no_container_metadata(self):
# HEAD container without metadata
container_name = self._create_container()
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertHeaders(resp, 'Container', 'HEAD')
self.assertNotIn('x-container-meta-', str(resp))
@test.attr(type='smoke')
@test.idempotent_id('cf19bc0b-7e16-4a5a-aaed-cb0c2fe8deef')
def test_update_container_metadata_with_create_and_delete_matadata(self):
# Send one request of adding and deleting metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata_1 = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata_1)
self.containers.append(container_name)
metadata_2 = {'test-container-meta2': 'Meta2'}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata_2,
remove_metadata=metadata_1)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
self.assertIn('x-container-meta-test-container-meta2', resp)
self.assertEqual(resp['x-container-meta-test-container-meta2'],
metadata_2['test-container-meta2'])
@test.attr(type='smoke')
@test.idempotent_id('2ae5f295-4bf1-4e04-bfad-21e54b62cec5')
def test_update_container_metadata_with_create_metadata(self):
# update container metadata using add metadata
container_name = self._create_container()
metadata = {'test-container-meta1': 'Meta1'}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertIn('x-container-meta-test-container-meta1', resp)
self.assertEqual(resp['x-container-meta-test-container-meta1'],
metadata['test-container-meta1'])
@test.attr(type='smoke')
@test.idempotent_id('3a5ce7d4-6e4b-47d0-9d87-7cd42c325094')
def test_update_container_metadata_with_delete_metadata(self):
# update container metadata using delete metadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
resp, _ = self.container_client.delete_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
@test.attr(type='smoke')
@test.idempotent_id('31f40a5f-6a52-4314-8794-cd89baed3040')
def test_update_container_metadata_with_create_matadata_key(self):
# update container metadata with a blenk value of metadata
container_name = self._create_container()
metadata = {'test-container-meta1': ''}
resp, _ = self.container_client.update_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(
container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
@test.attr(type='smoke')
@test.idempotent_id('a2e36378-6f1f-43f4-840a-ffd9cfd61914')
def test_update_container_metadata_with_delete_metadata_key(self):
# update container metadata with a blank value of matadata
container_name = data_utils.rand_name(name='TestContainer')
metadata = {'test-container-meta1': 'Meta1'}
self.container_client.create_container(container_name,
metadata=metadata)
self.containers.append(container_name)
metadata = {'test-container-meta1': ''}
resp, _ = self.container_client.delete_container_metadata(
container_name,
metadata=metadata)
self.assertHeaders(resp, 'Container', 'POST')
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertNotIn('x-container-meta-test-container-meta1', resp)
| |
# Copyright (C) 2007 Alexandre Conrad, alexandre (dot) conrad (at) gmail (dot) com
#
# This module is part of FormAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import cgi
import warnings
import logging
logger = logging.getLogger('formalchemy.' + __name__)
MIN_SA_VERSION = '0.4.5'
from sqlalchemy import __version__
if __version__.split('.') < MIN_SA_VERSION.split('.'):
raise ImportError('Version %s or later of SQLAlchemy required' % MIN_SA_VERSION)
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import SynonymProperty
from sqlalchemy.orm import compile_mappers, object_session, class_mapper
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy.orm.dynamic import DynamicAttributeImpl
from sqlalchemy.util import OrderedDict
from formalchemy import fields
from formalchemy import renderers
from formalchemy import fatypes
compile_mappers() # initializes InstrumentedAttributes
try:
# 0.5
from sqlalchemy.orm.attributes import manager_of_class
def _get_attribute(cls, p):
manager = manager_of_class(cls)
return manager[p.key]
except ImportError:
# 0.4
def _get_attribute(cls, p):
return getattr(cls, p.key)
def prettify(text):
"""
Turn an attribute name into something prettier, for a default label where none is given.
>>> prettify("my_column_name")
'My column name'
"""
return text.replace("_", " ").capitalize()
class SimpleMultiDict(dict):
"""
Adds `getone`, `getall` methods to dict. Assumes that values are either
a string or a list of strings.
"""
def getone(self, key):
if key not in self:
raise KeyError(key)
v = dict.get(self, key)
if v is None or isinstance(v, basestring) or isinstance(v, cgi.FieldStorage):
return v
return v[0]
def getall(self, key):
v = dict.get(self, key)
if v is None:
return []
elif isinstance(v, basestring):
return [v]
return v
class ModelRenderer(object):
"""
The `ModelRenderer` class is the superclass for all classes needing to deal
with `model` access and supporting rendering capabilities.
"""
prettify = staticmethod(prettify)
def __init__(self, model, session=None, data=None, prefix=None):
"""
- `model`:
a SQLAlchemy mapped class or instance. New object creation
should be done by passing the class, which will need a default
(no-parameter) constructor. After construction or binding of
the :class:`~formalchemy.forms.FieldSet`, the instantiated object will be available as
the `.model` attribute.
- `session=None`:
the session to use for queries (for relations). If `model` is associated
with a session, that will be used by default. (Objects mapped with a
`scoped_session
<http://www.sqlalchemy.org/docs/05/session.html#contextual-thread-local-sessions>`_
will always have a session. Other objects will
also have a session if they were loaded by a Query.)
- `data=None`:
dictionary-like object of user-submitted data to validate and/or
sync to the `model`. Scalar attributes should have a single
value in the dictionary; multi-valued relations should have a
list, even if there are zero or one values submitted. Currently,
pylons request.params() objects and plain dictionaries are known
to work.
- `prefix=None`:
the prefix to prepend to html name attributes. This is useful to avoid
field name conflicts when there are two fieldsets creating objects
from the same model in one html page. (This is not needed when
editing existing objects, since the object primary key is used as part
of the field name.)
Only the `model` parameter is required.
After binding, :class:`~formalchemy.forms.FieldSet`'s `model` attribute will always be an instance.
If you bound to a class, `FormAlchemy` will call its constructor with no
arguments to create an appropriate instance.
.. NOTE::
This instance will not be added to the current session, even if you are using `Session.mapper`.
All of these parameters may be overridden by the `bind` or `rebind`
methods. The `bind` method returns a new instance bound as specified,
while `rebind` modifies the current :class:`~formalchemy.forms.FieldSet` and has
no return value. (You may not `bind` to a different type of SQLAlchemy
model than the initial one -- if you initially bind to a `User`, you
must subsequently bind `User`'s to that :class:`~formalchemy.forms.FieldSet`.)
Typically, you will configure a :class:`~formalchemy.forms.FieldSet` once in
your common form library, then `bind` specific instances later for editing. (The
`bind` method is thread-safe; `rebind` is not.) Thus:
load stuff:
>>> from formalchemy.tests import FieldSet, User, session
now, in `library.py`
>>> fs = FieldSet(User)
>>> fs.configure(options=[]) # put all configuration stuff here
and in `controller.py`
>>> from library import fs
>>> user = session.query(User).first()
>>> fs2 = fs.bind(user)
>>> html = fs2.render()
The `render_fields` attribute is an OrderedDict of all the `Field`'s
that have been configured, keyed by name. The order of the fields
is the order in `include`, or the order they were declared
in the SQLAlchemy model class if no `include` is specified.
The `_fields` attribute is an OrderedDict of all the `Field`'s
the ModelRenderer knows about, keyed by name, in their
unconfigured state. You should not normally need to access
`_fields` directly.
(Note that although equivalent `Field`'s (fields referring to
the same attribute on the SQLAlchemy model) will equate with
the == operator, they are NOT necessarily the same `Field`
instance. Stick to referencing `Field`'s from their parent
`FieldSet` to always get the "right" instance.)
"""
self._fields = OrderedDict()
self._render_fields = OrderedDict()
self.model = self.session = None
self.prefix = prefix
if not model:
raise Exception('model parameter may not be None')
ModelRenderer.rebind(self, model, session, data)
cls = isinstance(self.model, type) and self.model or type(self.model)
try:
class_mapper(cls)
except:
# this class is not managed by SA. extract any raw Fields defined on it.
keys = cls.__dict__.keys()
keys.sort(lambda a, b: cmp(a.lower(), b.lower())) # 2.3 support
for key in keys:
field = cls.__dict__[key]
if isinstance(field, fields.Field):
if field.name and field.name != key:
raise Exception('Fields in a non-mapped class have the same name as their attribute. Do not manually give them a name.')
field.name = field.key = key
self.append(field)
if not self._fields:
raise Exception("not bound to a SA instance, and no manual Field definitions found")
else:
# SA class.
# load synonyms so we can ignore them
synonyms = set(p for p in class_mapper(cls).iterate_properties
if isinstance(p, SynonymProperty))
# attributes we're interested in
attrs = []
for p in class_mapper(cls).iterate_properties:
attr = _get_attribute(cls, p)
if ((isinstance(p, SynonymProperty) or attr.property.key not in (s.name for s in synonyms))
and not isinstance(attr.impl, DynamicAttributeImpl)):
attrs.append(attr)
# sort relations last before storing in the OrderedDict
L = [fields.AttributeField(attr, self) for attr in attrs]
L.sort(lambda a, b: cmp(a.is_relation, b.is_relation)) # note, key= not used for 2.3 support
self._fields.update((field.key, field) for field in L)
def append(self, field):
"""Append a Field to the FieldSet.
By default, this Field will be included in the rendered form or table.
"""
if not isinstance(field, fields.Field):
raise ValueError('Can only add Field objects; got %s instead' % field)
field.parent = self
_fields = self._render_fields or self._fields
_fields[field.name] = field
return self # Cascade pattern
def add(self, field):
warnings.warn(DeprecationWarning('FieldSet.add is deprecated. Use FieldSet.append instead.'))
self.append(field)
def extend(self, fields):
"""Add a list of fields. By default, each Field will be included in the
rendered form or table."""
for field in fields:
self.append(field)
return self # Cascade pattern
def insert(self, field, new_field):
"""Insert a new field before an existing field"""
fields_ = self._render_fields or self._fields
if not isinstance(new_field, fields.Field):
raise ValueError('Can only add Field objects; got %s instead' % field)
if isinstance(field, fields.AbstractField):
try:
index = fields_.keys().index(field.name)
except ValueError:
raise ValueError('%s not in fields' % field.name)
else:
raise TypeError('field must be a Field. Got %r' % new_field)
items = fields_.items()
new_field.parent = self
items.insert(index, (new_field.name, new_field))
if self._render_fields:
self._render_fields = OrderedDict(items)
else:
self._fields = OrderedDict(items)
return self # Cascade pattern
def modify(self, *args):
"""Modify fields with their new value, without modifying the order"""
for override in args:
if override.name not in self._render_fields.keys():
raise ValueError("Field %s isn't part of the fields to render, or you didn't configure you FieldSet yet" % override)
for i, field in enumerate(self._render_fields):
if field == override.key:
self._render_fields[field] = override
break
return self
def render_fields(self):
"""
The set of attributes that will be rendered, as a (ordered)
dict of `{fieldname: Field}` pairs. If you haven't called configure
with exclude/include, then this will be the list of default Fields
as found by introspecting the SQLAlchemy model.
"""
if not self._render_fields:
self._render_fields = OrderedDict([(field.key, field) for field in self._get_fields()])
return self._render_fields
render_fields = property(render_fields)
def configure(self, pk=False, exclude=[], include=[], options=[]):
"""
The `configure` method specifies a set of attributes to be rendered.
By default, all attributes are rendered except primary keys and
foreign keys. But, relations `based on` foreign keys `will` be
rendered. For example, if an `Order` has a `user_id` FK and a `user`
relation based on it, `user` will be rendered (as a select box of
`User`'s, by default) but `user_id` will not.
Parameters:
* `pk=False`:
set to True to include primary key columns
* `exclude=[]`:
an iterable of attributes to exclude. Other attributes will
be rendered normally
* `include=[]`:
an iterable of attributes to include. Other attributes will
not be rendered
* `options=[]`:
an iterable of modified attributes. The set of attributes to
be rendered is unaffected
* `global_validator=None`:
global_validator` should be a function that performs
validations that need to know about the entire form.
* `focus=True`:
the attribute (e.g., `fs.orders`) whose rendered input element
gets focus. Default value is True, meaning, focus the first
element. False means do not focus at all.
Only one of {`include`, `exclude`} may be specified.
Note that there is no option to include foreign keys. This is
deliberate. Use `include` if you really need to manually edit FKs.
If `include` is specified, fields will be rendered in the order given
in `include`. Otherwise, fields will be rendered in alphabetical
order.
Examples: given a `FieldSet` `fs` bound to a `User` instance as a
model with primary key `id` and attributes `name` and `email`, and a
relation `orders` of related Order objects, the default will be to
render `name`, `email`, and `orders`. To render the orders list as
checkboxes instead of a select, you could specify::
>>> from formalchemy.tests import FieldSet, User
>>> fs = FieldSet(User)
>>> fs.configure(options=[fs.orders.checkbox()])
To render only name and email,
>>> fs.configure(include=[fs.name, fs.email])
or
>>> fs.configure(exclude=[fs.orders])
Of course, you can include modifications to a field in the `include`
parameter, such as here, to render name and options-as-checkboxes:
>>> fs.configure(include=[fs.name, fs.orders.checkbox()])
"""
self._render_fields = OrderedDict([(field.key, field) for field in self._get_fields(pk, exclude, include, options)])
def bind(self, model=None, session=None, data=None):
"""
Return a copy of this FieldSet or Grid, bound to the given
`model`, `session`, and `data`. The parameters to this method are the
same as in the constructor.
Often you will create and `configure` a FieldSet or Grid at application
startup, then `bind` specific instances to it for actual editing or display.
"""
if not (model or session or data):
raise Exception('must specify at least one of {model, session, data}')
if not model:
if not self.model:
raise Exception('model must be specified when none is already set')
model = fields._pk(self.model) is None and type(self.model) or self.model
# copy.copy causes a stacktrace on python 2.5.2/OSX + pylons. unable to reproduce w/ simpler sample.
mr = object.__new__(self.__class__)
mr.__dict__ = dict(self.__dict__)
# two steps so bind's error checking can work
ModelRenderer.rebind(mr, model, session, data)
mr._fields = OrderedDict([(key, renderer.bind(mr)) for key, renderer in self._fields.iteritems()])
if self._render_fields:
mr._render_fields = OrderedDict([(field.key, field) for field in
[field.bind(mr) for field in self._render_fields.itervalues()]])
return mr
def rebind(self, model=None, session=None, data=None):
"""
Like `bind`, but acts on this instance. No return value.
Not all parameters are treated the same; specifically, what happens if they are NOT specified is different:
* if `model` is not specified, the old model is used
* if `session` is not specified, FA tries to re-guess session from the model
* if data is not specified, it is rebound to None.
"""
original_model = model
if model:
if isinstance(model, type):
try:
model = model()
except:
raise Exception('%s appears to be a class, not an instance, but FormAlchemy cannot instantiate it. (Make sure all constructor parameters are optional!)' % model)
# take object out of session, if present
try:
_obj_session = object_session(model)
except AttributeError:
pass # non-SA object; doesn't need session
else:
if _obj_session:
_obj_session.expunge(model)
elif object_session(model):
# for instances of mapped classes, require that the instance have a PK already
try:
class_mapper(type(model))
except:
pass
else:
if fields._pk(model) is None:
raise Exception('Mapped instances to be bound must either have a primary key set or not be in a Session. When creating a new object, bind the class instead [i.e., bind(User), not bind(User())]')
if self.model and type(self.model) != type(model):
raise ValueError('You can only bind to another object of the same type you originally bound to (%s), not %s' % (type(self.model), type(model)))
self.model = model
self._bound_pk = fields._pk(model)
# Assign new data
if data is None:
self.data = None
elif hasattr(data, 'getall') and hasattr(data, 'getone'):
self.data = data
else:
try:
self.data = SimpleMultiDict(data)
except:
raise Exception('unsupported data object %s. currently only dicts and Paste multidicts are supported' % self.data)
# Reset Field deserialization caches:
_fields = self._render_fields or self._fields
for f in _fields:
self[f]._reset_cache()
if session:
if not isinstance(session, Session) and not isinstance(session, ScopedSession):
raise ValueError('Invalid SQLAlchemy session object %s' % session)
self.session = session
elif model:
if '_obj_session' in locals():
# model may be a temporary object, expunged from its session -- grab the existing reference
self.session = _obj_session
else:
try:
o_session = object_session(model)
except AttributeError:
pass # non-SA object
else:
if o_session:
self.session = o_session
# if we didn't just instantiate (in which case object_session will be None),
# the session should be the same as the object_session
if self.session and model == original_model:
try:
o_session = object_session(self.model)
except AttributeError:
pass # non-SA object
else:
if o_session and self.session is not o_session:
raise Exception('You may not explicitly bind to a session when your model already belongs to a different one')
def sync(self):
"""
Sync (copy to the corresponding attributes) the data passed to the constructor or `bind` to the `model`.
"""
if self.data is None:
raise Exception("No data bound; cannot sync")
for field in self.render_fields.itervalues():
field.sync()
if self.session:
self.session.add(self.model)
def _raw_fields(self):
return self._fields.values()
def _get_fields(self, pk=False, exclude=[], include=[], options=[]):
# sanity check
if include and exclude:
raise Exception('Specify at most one of include, exclude')
# help people who meant configure(include=[X]) but just wrote configure(X), resulting in pk getting the positional argument
if pk not in [True, False]:
raise ValueError('pk option must be True or False, not %s' % pk)
# verify that options that should be lists of Fields, are
for iterable in ['include', 'exclude', 'options']:
try:
L = list(eval(iterable))
except:
raise ValueError('`%s` parameter should be an iterable' % iterable)
for field in L:
if not isinstance(field, fields.AbstractField):
raise TypeError('non-AbstractField object `%s` found in `%s`' % (field, iterable))
if field not in self._fields.values():
raise ValueError('Unrecognized Field `%s` in `%s` -- did you mean to call append() first?' % (field, iterable))
# if include is given, those are the fields used. otherwise, include those not explicitly (or implicitly) excluded.
if not include:
ignore = list(exclude) # don't modify `exclude` directly to avoid surprising caller
if not pk:
ignore.extend([wrapper for wrapper in self._raw_fields() if wrapper.is_pk and not wrapper.is_collection])
ignore.extend([wrapper for wrapper in self._raw_fields() if wrapper.is_raw_foreign_key])
include = [field for field in self._raw_fields() if field not in ignore]
# in the returned list, replace any fields in `include` w/ the corresponding one in `options`, if present.
# this is a bit clunky because we want to
# 1. preserve the order given in `include`
# 2. not modify `include` (or `options`) directly; that could surprise the caller
options_dict = {} # create + update for 2.3's benefit
options_dict.update(dict([(wrapper, wrapper) for wrapper in options]))
L = []
for wrapper in include:
if wrapper in options_dict:
L.append(options_dict[wrapper])
else:
L.append(wrapper)
return L
def __getattr__(self, attrname):
try:
return self._render_fields[attrname]
except KeyError:
try:
return self._fields[attrname]
except KeyError:
raise AttributeError(attrname)
__getitem__ = __getattr__
def __setattr__(self, attrname, value):
if attrname not in ('_fields', '__dict__', 'focus') and \
(attrname in self._fields or isinstance(value, fields.AbstractField)):
raise AttributeError('Do not set field attributes manually. Use append() or configure() instead')
object.__setattr__(self, attrname, value)
def __delattr__(self, attrname):
if attrname in self._render_fields:
del self._render_fields[attrname]
elif attrname in self._fields:
raise RuntimeError("You try to delete a field but your form is not configured")
else:
raise AttributeError("field %s does not exist" % attrname)
__delitem__ = __delattr__
def render(self, **kwargs):
raise NotImplementedError()
class EditableRenderer(ModelRenderer):
default_renderers = {
fatypes.String: renderers.TextFieldRenderer,
fatypes.Integer: renderers.IntegerFieldRenderer,
fatypes.Float: renderers.FloatFieldRenderer,
fatypes.Numeric: renderers.FloatFieldRenderer,
fatypes.Boolean: renderers.CheckBoxFieldRenderer,
fatypes.DateTime: renderers.DateTimeFieldRenderer,
fatypes.Date: renderers.DateFieldRenderer,
fatypes.Time: renderers.TimeFieldRenderer,
fatypes.Binary: renderers.FileFieldRenderer,
'dropdown': renderers.SelectFieldRenderer,
'checkbox': renderers.CheckBoxSet,
'radio': renderers.RadioSet,
'password': renderers.PasswordFieldRenderer,
'textarea': renderers.TextAreaFieldRenderer,
}
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
@@is_compatible_with
@@name
@@base_dtype
@@real_dtype
@@is_ref_dtype
@@as_ref
@@is_floating
@@is_complex
@@is_integer
@@is_quantized
@@is_unsigned
@@as_numpy_dtype
@@as_datatype_enum
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self.is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self.is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (real) floating point type."""
return issubclass(self.as_numpy_dtype, np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
return np.dtype(self.as_numpy_dtype).itemsize
# Define standard wrappers for the types_pb2.DataType enum.
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
if key == type_value:
return val
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service registry for apitools."""
import collections
import logging
import re
import textwrap
from apitools.base.py import base_api
from apitools.gen import util
# We're a code generator. I don't care.
# pylint:disable=too-many-statements
_MIME_PATTERN_RE = re.compile(r'(?i)[a-z0-9_*-]+/[a-z0-9_*-]+')
class ServiceRegistry(object):
"""Registry for service types."""
def __init__(self, client_info, message_registry, command_registry,
names, root_package, base_files_package,
unelidable_request_methods):
self.__client_info = client_info
self.__package = client_info.package
self.__names = names
self.__service_method_info_map = collections.OrderedDict()
self.__message_registry = message_registry
self.__command_registry = command_registry
self.__root_package = root_package
self.__base_files_package = base_files_package
self.__unelidable_request_methods = unelidable_request_methods
self.__all_scopes = set(self.__client_info.scopes)
def Validate(self):
self.__message_registry.Validate()
@property
def scopes(self):
return sorted(list(self.__all_scopes))
def __GetServiceClassName(self, service_name):
return self.__names.ClassName(
'%sService' % self.__names.ClassName(service_name))
def __PrintDocstring(self, printer, method_info, method_name, name):
"""Print a docstring for a service method."""
if method_info.description:
description = util.CleanDescription(method_info.description)
first_line, newline, remaining = method_info.description.partition(
'\n')
if not first_line.endswith('.'):
first_line = '%s.' % first_line
description = '%s%s%s' % (first_line, newline, remaining)
else:
description = '%s method for the %s service.' % (method_name, name)
with printer.CommentContext():
printer('"""%s' % description)
printer()
printer('Args:')
printer(' request: (%s) input message', method_info.request_type_name)
printer(' global_params: (StandardQueryParameters, default: None) '
'global arguments')
if method_info.upload_config:
printer(' upload: (Upload, default: None) If present, upload')
printer(' this stream with the request.')
if method_info.supports_download:
printer(
' download: (Download, default: None) If present, download')
printer(' data from the request via this stream.')
printer('Returns:')
printer(' (%s) The response message.', method_info.response_type_name)
printer('"""')
def __WriteSingleService(
self, printer, name, method_info_map, client_class_name):
printer()
class_name = self.__GetServiceClassName(name)
printer('class %s(base_api.BaseApiService):', class_name)
with printer.Indent():
printer('"""Service class for the %s resource."""', name)
printer()
printer('_NAME = %s', repr(name))
# Print the configs for the methods first.
printer()
printer('def __init__(self, client):')
with printer.Indent():
printer('super(%s.%s, self).__init__(client)',
client_class_name, class_name)
printer('self._upload_configs = {')
with printer.Indent(indent=' '):
for method_name, method_info in method_info_map.items():
upload_config = method_info.upload_config
if upload_config is not None:
printer(
"'%s': base_api.ApiUploadInfo(", method_name)
with printer.Indent(indent=' '):
attrs = sorted(
x.name for x in upload_config.all_fields())
for attr in attrs:
printer('%s=%r,',
attr, getattr(upload_config, attr))
printer('),')
printer('}')
# Now write each method in turn.
for method_name, method_info in method_info_map.items():
printer()
params = ['self', 'request', 'global_params=None']
if method_info.upload_config:
params.append('upload=None')
if method_info.supports_download:
params.append('download=None')
printer('def %s(%s):', method_name, ', '.join(params))
with printer.Indent():
self.__PrintDocstring(
printer, method_info, method_name, name)
printer("config = self.GetMethodConfig('%s')", method_name)
upload_config = method_info.upload_config
if upload_config is not None:
printer("upload_config = self.GetUploadConfig('%s')",
method_name)
arg_lines = [
'config, request, global_params=global_params']
if method_info.upload_config:
arg_lines.append(
'upload=upload, upload_config=upload_config')
if method_info.supports_download:
arg_lines.append('download=download')
printer('return self._RunMethod(')
with printer.Indent(indent=' '):
for line in arg_lines[:-1]:
printer('%s,', line)
printer('%s)', arg_lines[-1])
printer()
printer('{0}.method_config = lambda: base_api.ApiMethodInfo('
.format(method_name))
with printer.Indent(indent=' '):
method_info = method_info_map[method_name]
attrs = sorted(
x.name for x in method_info.all_fields())
for attr in attrs:
if attr in ('upload_config', 'description'):
continue
value = getattr(method_info, attr)
if value is not None:
printer('%s=%r,', attr, value)
printer(')')
def __WriteProtoServiceDeclaration(self, printer, name, method_info_map):
"""Write a single service declaration to a proto file."""
printer()
printer('service %s {', self.__GetServiceClassName(name))
with printer.Indent():
for method_name, method_info in method_info_map.items():
for line in textwrap.wrap(method_info.description,
printer.CalculateWidth() - 3):
printer('// %s', line)
printer('rpc %s (%s) returns (%s);',
method_name,
method_info.request_type_name,
method_info.response_type_name)
printer('}')
def WriteProtoFile(self, printer):
"""Write the services in this registry to out as proto."""
self.Validate()
client_info = self.__client_info
printer('// Generated services for %s version %s.',
client_info.package, client_info.version)
printer()
printer('syntax = "proto2";')
printer('package %s;', self.__package)
printer('import "%s";', client_info.messages_proto_file_name)
printer()
for name, method_info_map in self.__service_method_info_map.items():
self.__WriteProtoServiceDeclaration(printer, name, method_info_map)
def WriteFile(self, printer):
"""Write the services in this registry to out."""
self.Validate()
client_info = self.__client_info
printer('"""Generated client library for %s version %s."""',
client_info.package, client_info.version)
printer('# NOTE: This file is autogenerated and should not be edited '
'by hand.')
printer('from %s import base_api', self.__base_files_package)
if self.__root_package:
import_prefix = 'from {0} '.format(self.__root_package)
else:
import_prefix = ''
printer('%simport %s as messages', import_prefix,
client_info.messages_rule_name)
printer()
printer()
printer('class %s(base_api.BaseApiClient):',
client_info.client_class_name)
with printer.Indent():
printer(
'"""Generated client library for service %s version %s."""',
client_info.package, client_info.version)
printer()
printer('MESSAGES_MODULE = messages')
printer('BASE_URL = {0!r}'.format(client_info.base_url))
printer()
printer('_PACKAGE = {0!r}'.format(client_info.package))
printer('_SCOPES = {0!r}'.format(
client_info.scopes or
['https://www.googleapis.com/auth/userinfo.email']))
printer('_VERSION = {0!r}'.format(client_info.version))
printer('_CLIENT_ID = {0!r}'.format(client_info.client_id))
printer('_CLIENT_SECRET = {0!r}'.format(client_info.client_secret))
printer('_USER_AGENT = {0!r}'.format(client_info.user_agent))
printer('_CLIENT_CLASS_NAME = {0!r}'.format(
client_info.client_class_name))
printer('_URL_VERSION = {0!r}'.format(client_info.url_version))
printer('_API_KEY = {0!r}'.format(client_info.api_key))
printer()
printer("def __init__(self, url='', credentials=None,")
with printer.Indent(indent=' '):
printer('get_credentials=True, http=None, model=None,')
printer('log_request=False, log_response=False,')
printer('credentials_args=None, default_global_params=None,')
printer('additional_http_headers=None, '
'response_encoding=None):')
with printer.Indent():
printer('"""Create a new %s handle."""', client_info.package)
printer('url = url or self.BASE_URL')
printer(
'super(%s, self).__init__(', client_info.client_class_name)
printer(' url, credentials=credentials,')
printer(' get_credentials=get_credentials, http=http, '
'model=model,')
printer(' log_request=log_request, '
'log_response=log_response,')
printer(' credentials_args=credentials_args,')
printer(' default_global_params=default_global_params,')
printer(' additional_http_headers=additional_http_headers,')
printer(' response_encoding=response_encoding)')
for name in self.__service_method_info_map.keys():
printer('self.%s = self.%s(self)',
name, self.__GetServiceClassName(name))
for name, method_info in self.__service_method_info_map.items():
self.__WriteSingleService(
printer, name, method_info, client_info.client_class_name)
def __RegisterService(self, service_name, method_info_map):
if service_name in self.__service_method_info_map:
raise ValueError(
'Attempt to re-register descriptor %s' % service_name)
self.__service_method_info_map[service_name] = method_info_map
def __CreateRequestType(self, method_description, body_type=None):
"""Create a request type for this method."""
schema = {}
schema['id'] = self.__names.ClassName('%sRequest' % (
self.__names.ClassName(method_description['id'], separator='.'),))
schema['type'] = 'object'
schema['properties'] = collections.OrderedDict()
if 'parameterOrder' not in method_description:
ordered_parameters = list(method_description.get('parameters', []))
else:
ordered_parameters = method_description['parameterOrder'][:]
for k in method_description['parameters']:
if k not in ordered_parameters:
ordered_parameters.append(k)
for parameter_name in ordered_parameters:
field_name = self.__names.CleanName(parameter_name)
field = dict(method_description['parameters'][parameter_name])
if 'type' not in field:
raise ValueError('No type found in parameter %s' % field)
schema['properties'][field_name] = field
if body_type is not None:
body_field_name = self.__GetRequestField(
method_description, body_type)
if body_field_name in schema['properties']:
raise ValueError('Failed to normalize request resource name')
if 'description' not in body_type:
body_type['description'] = (
'A %s resource to be passed as the request body.' % (
self.__GetRequestType(body_type),))
schema['properties'][body_field_name] = body_type
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __CreateVoidResponseType(self, method_description):
"""Create an empty response type."""
schema = {}
method_name = self.__names.ClassName(
method_description['id'], separator='.')
schema['id'] = self.__names.ClassName('%sResponse' % method_name)
schema['type'] = 'object'
schema['description'] = 'An empty %s response.' % method_name
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id']
def __NeedRequestType(self, method_description, request_type):
"""Determine if this method needs a new request type created."""
if not request_type:
return True
method_id = method_description.get('id', '')
if method_id in self.__unelidable_request_methods:
return True
message = self.__message_registry.LookupDescriptorOrDie(request_type)
if message is None:
return True
field_names = [x.name for x in message.fields]
parameters = method_description.get('parameters', {})
for param_name, param_info in parameters.items():
if (param_info.get('location') != 'path' or
self.__names.CleanName(param_name) not in field_names):
break
else:
return False
return True
def __MaxSizeToInt(self, max_size):
"""Convert max_size to an int."""
size_groups = re.match(r'(?P<size>\d+)(?P<unit>.B)?$', max_size)
if size_groups is None:
raise ValueError('Could not parse maxSize')
size, unit = size_groups.group('size', 'unit')
shift = 0
if unit is not None:
unit_dict = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
shift = unit_dict.get(unit.upper())
if shift is None:
raise ValueError('Unknown unit %s' % unit)
return int(size) * (1 << shift)
def __ComputeUploadConfig(self, media_upload_config, method_id):
"""Fill out the upload config for this method."""
config = base_api.ApiUploadInfo()
if 'maxSize' in media_upload_config:
config.max_size = self.__MaxSizeToInt(
media_upload_config['maxSize'])
if 'accept' not in media_upload_config:
logging.warn(
'No accept types found for upload configuration in '
'method %s, using */*', method_id)
config.accept.extend([
str(a) for a in media_upload_config.get('accept', '*/*')])
for accept_pattern in config.accept:
if not _MIME_PATTERN_RE.match(accept_pattern):
logging.warn('Unexpected MIME type: %s', accept_pattern)
protocols = media_upload_config.get('protocols', {})
for protocol in ('simple', 'resumable'):
media = protocols.get(protocol, {})
for attr in ('multipart', 'path'):
if attr in media:
setattr(config, '%s_%s' % (protocol, attr), media[attr])
return config
def __ComputeMethodInfo(self, method_description, request, response,
request_field):
"""Compute the base_api.ApiMethodInfo for this method."""
relative_path = self.__names.NormalizeRelativePath(
''.join((self.__client_info.base_path,
method_description['path'])))
method_id = method_description['id']
ordered_params = []
for param_name in method_description.get('parameterOrder', []):
param_info = method_description['parameters'][param_name]
if param_info.get('required', False):
ordered_params.append(param_name)
method_info = base_api.ApiMethodInfo(
relative_path=relative_path,
method_id=method_id,
http_method=method_description['httpMethod'],
description=util.CleanDescription(
method_description.get('description', '')),
query_params=[],
path_params=[],
ordered_params=ordered_params,
request_type_name=self.__names.ClassName(request),
response_type_name=self.__names.ClassName(response),
request_field=request_field,
)
flat_path = method_description.get('flatPath', None)
if flat_path is not None:
flat_path = self.__names.NormalizeRelativePath(
self.__client_info.base_path + flat_path)
if flat_path != relative_path:
method_info.flat_path = flat_path
if method_description.get('supportsMediaUpload', False):
method_info.upload_config = self.__ComputeUploadConfig(
method_description.get('mediaUpload'), method_id)
method_info.supports_download = method_description.get(
'supportsMediaDownload', False)
self.__all_scopes.update(method_description.get('scopes', ()))
for param, desc in method_description.get('parameters', {}).items():
param = self.__names.CleanName(param)
location = desc['location']
if location == 'query':
method_info.query_params.append(param)
elif location == 'path':
method_info.path_params.append(param)
else:
raise ValueError(
'Unknown parameter location %s for parameter %s' % (
location, param))
method_info.path_params.sort()
method_info.query_params.sort()
return method_info
def __BodyFieldName(self, body_type):
if body_type is None:
return ''
return self.__names.FieldName(body_type['$ref'])
def __GetRequestType(self, body_type):
return self.__names.ClassName(body_type.get('$ref'))
def __GetRequestField(self, method_description, body_type):
"""Determine the request field for this method."""
body_field_name = self.__BodyFieldName(body_type)
if body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_resource' % body_field_name)
# It's exceedingly unlikely that we'd get two name collisions, which
# means it's bound to happen at some point.
while body_field_name in method_description.get('parameters', {}):
body_field_name = self.__names.FieldName(
'%s_body' % body_field_name)
return body_field_name
def AddServiceFromResource(self, service_name, methods):
"""Add a new service named service_name with the given methods."""
method_descriptions = methods.get('methods', {})
method_info_map = collections.OrderedDict()
items = sorted(method_descriptions.items())
for method_name, method_description in items:
method_name = self.__names.MethodName(method_name)
# NOTE: According to the discovery document, if the request or
# response is present, it will simply contain a `$ref`.
body_type = method_description.get('request')
if body_type is None:
request_type = None
else:
request_type = self.__GetRequestType(body_type)
if self.__NeedRequestType(method_description, request_type):
request = self.__CreateRequestType(
method_description, body_type=body_type)
request_field = self.__GetRequestField(
method_description, body_type)
else:
request = request_type
request_field = base_api.REQUEST_IS_BODY
if 'response' in method_description:
response = method_description['response']['$ref']
else:
response = self.__CreateVoidResponseType(method_description)
method_info_map[method_name] = self.__ComputeMethodInfo(
method_description, request, response, request_field)
self.__command_registry.AddCommandForMethod(
service_name, method_name, method_info_map[method_name],
request, response)
nested_services = methods.get('resources', {})
services = sorted(nested_services.items())
for subservice_name, submethods in services:
new_service_name = '%s_%s' % (service_name, subservice_name)
self.AddServiceFromResource(new_service_name, submethods)
self.__RegisterService(service_name, method_info_map)
| |
""" discovery and running of std-library "unittest" style tests. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import traceback
import _pytest._code
from _pytest.compat import getimfunc
from _pytest.config import hookimpl
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.python import Class
from _pytest.python import Function
from _pytest.python import Module
from _pytest.python import transfer_markers
def pytest_pycollect_makeitem(collector, name, obj):
# has unittest been imported and is obj a subclass of its TestCase?
try:
if not issubclass(obj, sys.modules["unittest"].TestCase):
return
except Exception:
return
# yes, so let's collect it
return UnitTestCase(name, parent=collector)
class UnitTestCase(Class):
# marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs
nofuncargs = True
def setup(self):
cls = self.obj
if getattr(cls, "__unittest_skip__", False):
return # skipped
setup = getattr(cls, "setUpClass", None)
if setup is not None:
setup()
teardown = getattr(cls, "tearDownClass", None)
if teardown is not None:
self.addfinalizer(teardown)
super(UnitTestCase, self).setup()
def collect(self):
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = TestLoader()
module = self.getparent(Module).obj
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
funcobj = getimfunc(x)
transfer_markers(funcobj, cls, module)
yield TestCaseFunction(name, parent=self, callobj=funcobj)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction("runTest", parent=self)
class TestCaseFunction(Function):
nofuncargs = True
_excinfo = None
_testcase = None
def setup(self):
self._testcase = self.parent.obj(self.name)
self._fix_unittest_skip_decorator()
self._obj = getattr(self._testcase, self.name)
if hasattr(self._testcase, "setup_method"):
self._testcase.setup_method(self._obj)
if hasattr(self, "_request"):
self._request._fillfixtures()
def _fix_unittest_skip_decorator(self):
"""
The @unittest.skip decorator calls functools.wraps(self._testcase)
The call to functools.wraps() fails unless self._testcase
has a __name__ attribute. This is usually automatically supplied
if the test is a function or method, but we need to add manually
here.
See issue #1169
"""
if sys.version_info[0] == 2:
setattr(self._testcase, "__name__", self.name)
def teardown(self):
if hasattr(self._testcase, "teardown_method"):
self._testcase.teardown_method(self._obj)
# Allow garbage collection on TestCase instance attributes.
self._testcase = None
self._obj = None
def startTest(self, testcase):
pass
def _addexcinfo(self, rawexcinfo):
# unwrap potential exception info (see twisted trial support below)
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
except TypeError:
try:
try:
values = traceback.format_exception(*rawexcinfo)
values.insert(
0,
"NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n",
)
fail("".join(values), pytrace=False)
except (fail.Exception, KeyboardInterrupt):
raise
except: # noqa
fail(
"ERROR: Unknown Incompatible Exception "
"representation:\n%r" % (rawexcinfo,),
pytrace=False,
)
except KeyboardInterrupt:
raise
except fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
self.__dict__.setdefault("_excinfo", []).append(excinfo)
def addError(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addFailure(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase, reason):
try:
skip(reason)
except skip.Exception:
self._skipped_by_mark = True
self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
xfail(str(reason))
except xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(self, testcase, reason=""):
self._unexpectedsuccess = reason
def addSuccess(self, testcase):
pass
def stopTest(self, testcase):
pass
def _handle_skip(self):
# implements the skipping machinery (see #2137)
# analog to pythons Lib/unittest/case.py:run
testMethod = getattr(self._testcase, self._testcase._testMethodName)
if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
):
# If the class or method was skipped.
skip_why = getattr(
self._testcase.__class__, "__unittest_skip_why__", ""
) or getattr(testMethod, "__unittest_skip_why__", "")
try: # PY3, unittest2 on PY2
self._testcase._addSkip(self, self._testcase, skip_why)
except TypeError: # PY2
if sys.version_info[0] != 2:
raise
self._testcase._addSkip(self, skip_why)
return True
return False
def runtest(self):
if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
self._testcase(result=self)
else:
# disables tearDown and cleanups for post mortem debugging (see #1890)
if self._handle_skip():
return
self._testcase.debug()
def _prunetraceback(self, excinfo):
Function._prunetraceback(self, excinfo)
traceback = excinfo.traceback.filter(
lambda x: not x.frame.f_globals.get("__unittest")
)
if traceback:
excinfo.traceback = traceback
@hookimpl(tryfirst=True)
def pytest_runtest_makereport(item, call):
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
try:
del call.result
except AttributeError:
pass
# twisted trial support
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
ut = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
def excstore(
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(
self, exc_value, exc_type, exc_tb, captureVars=captureVars
)
except TypeError:
Failure__init__(self, exc_value, exc_type, exc_tb)
ut.Failure.__init__ = excstore
yield
ut.Failure.__init__ = Failure__init__
else:
yield
def check_testcase_implements_trial_reporter(done=[]):
if done:
return
from zope.interface import classImplements
from twisted.trial.itrial import IReporter
classImplements(TestCaseFunction, IReporter)
done.append(1)
| |
import os
import sys
# dmrs sub-directory
directory = os.path.dirname(os.path.realpath(__file__))
# add pydmrs submodule to Python path
sys.path.insert(1, os.path.join(directory, 'pydmrs'))
from pydmrs.components import Pred, Sortinfo, EventSortinfo, InstanceSortinfo
from pydmrs.core import Link, ListDmrs
from pydmrs.graphlang.graphlang import parse_graphlang
from pydmrs.mapping.paraphrase import paraphrase
def create_sortinfo(cvarsort, features):
assert len(cvarsort) == 1 and cvarsort != 'i'
assert isinstance(features, tuple)
features = [feature.replace('-', '_dash_').replace('.', '_dot_') for feature in features]
if cvarsort == 'e' and features == ('sf', 'tense', 'mood', 'perf', 'prog'):
return EventSortinfo
elif cvarsort == 'x' and features == ('pers', 'num', 'gend', 'ind', 'pt'):
return InstanceSortinfo
else:
return type(str(cvarsort.upper() + 'Sortinfo'), (Sortinfo,), dict(cvarsort=cvarsort, __slots__=features))
class Dmrs(ListDmrs):
# composable dmrs
__slots__ = ('nodes', 'links', 'index', 'top', 'anchors')
def __init__(self, nodes=(), links=(), index=None, top=None, **kwargs):
super(Dmrs, self).__init__(nodes=nodes, links=links, index=index, top=top)
self.anchors = dict()
@staticmethod
def parse(string, sortinfo_classes=None, sortinfo_shortforms=None):
anchors = dict()
dmrs = parse_graphlang(string, cls=Dmrs, anchors=anchors, sortinfo_classes=sortinfo_classes, sortinfo_shortforms=sortinfo_shortforms)
dmrs.anchors = anchors
return dmrs
def subgraph(self, nodeid, exclude=()):
subgraph_nodeid = nodeid
subgraph = {subgraph_nodeid}
for link in self.get_links(nodeid=subgraph_nodeid):
stack = list()
stack.append(link.start)
stack.append(link.end)
visited = {subgraph_nodeid}
while stack:
nodeid = stack.pop()
# if self.index is not None and nodeid == self.index.nodeid: # or top?
# break
if nodeid in exclude and nodeid != subgraph_nodeid:
break
if nodeid not in visited:
visited.add(nodeid)
for link in self.get_links(nodeid=nodeid):
stack.append(link.start)
stack.append(link.end)
else:
subgraph |= visited
return SubDmrs(dmrs=self, nodeids=subgraph)
def compose(self, other, fusion=None, other_head=False, hierarchy=None):
assert isinstance(other, Dmrs)
nodeid_mapping = dict()
# unify anchors
if fusion is None:
for anchor1 in self.anchors:
for anchor2 in other.anchors:
if anchor1 != anchor2:
continue
node1 = self.anchors[anchor1]
node2 = other.anchors[anchor2]
node1.unify(node2, hierarchy=hierarchy)
nodeid_mapping[node2.nodeid] = node1.nodeid
else:
for anchor1, anchor2 in fusion.items():
# if anchor1 not in self.anchors or anchor2 not in other.anchors:
# continue
node1 = self.anchors[anchor1]
node2 = other.anchors[anchor2]
node1.unify(node2, hierarchy=hierarchy)
nodeid_mapping[node2.nodeid] = node1.nodeid
# add missing nodes, update node ids
for node2 in other.iter_nodes():
nodeid2 = node2.nodeid
if nodeid2 in nodeid_mapping:
node2.nodeid = nodeid_mapping[nodeid2]
else:
node2.nodeid = None
nodeid_mapping[nodeid2] = self.add_node(node2)
# add missing links, update existing links
links1 = set((link1.start, link1.end) for link1 in self.iter_links())
for link2 in other.iter_links():
start = nodeid_mapping[link2.start]
end = nodeid_mapping[link2.end]
if (start, end) not in links1:
link1 = Link(start, end, link2.rargname, link2.post)
self.add_link(link1)
if other_head and (start, end) in links1:
self.remove_link((start, end))
link1 = Link(start, end, link2.rargname, link2.post)
self.add_link(link1)
# update index and top
if other_head:
if other.index is None:
self.index = None
else:
self.index = self[other.index.nodeid]
if other.top is None:
self.top = None
else:
self.top = self[other.top.nodeid]
# set anchors
if other_head:
self.anchors = {anchor: self[node2.nodeid] for anchor, node2 in other.anchors.items()}
def apply_paraphrases(self, paraphrases, hierarchy=None, match_top_index=True):
return paraphrase(dmrs=self, paraphrases=paraphrases, hierarchy=hierarchy, match_top_index=match_top_index)
def remove_underspecifications(self):
for node in list(self.iter_nodes()):
if type(node.pred) is Pred:
self.remove_node(node.nodeid)
self.remove_links(link for link in self.iter_links() if link.start == node.nodeid or link.end == node.nodeid)
continue
# TODO: remove underspecification in partially underspecified predicate
if node.sortinfo is not None:
node.sortinfo = node.sortinfo.__class__(**{key: None if node.sortinfo[key] in ('u', '?') else node.sortinfo[key] for key in node.sortinfo if key != 'cvarsort'})
if node.carg == '?':
node.carg = None
def get_mrs(self, requires_rel_suffix=False):
# labels = dict(zip(self, range(1, len(self) + 1)))
# redirected = []
quantifiers = dict()
# labels = dict(zip(self, self))
labels = {nodeid: [nodeid] for nodeid in self}
for link in self.iter_links():
assert isinstance(link.start, int) and isinstance(link.end, int)
assert link.rargname is not None or link.post == 'EQ' # ('ARG1', 'ARG2', 'ARG3', 'ARG4', 'ARG', 'RSTR', 'BODY', 'L-INDEX', 'R-INDEX', 'L-HNDL', 'R-HNDL')
assert link.post in ('NEQ', 'EQ', 'H', 'HEQ')
if link.post == 'EQ':
# upper, lower = (link.start, link.end) if link.start > link.end else (link.end, link.start)
# labels[upper] = lower
labels[link.start].append(link.end)
labels[link.end].append(link.start)
elif link.rargname == 'RSTR' and link.post == 'H':
quantifiers[link.start] = link.end
# for upper, lower in labels.items():
# lower_lower = labels[lower]
# while lower_lower != lower:
# lower = lower_lower
# lower_lower = labels[lower]
# labels[upper] = lower
for nodeid in labels:
eqs = labels[nodeid]
if isinstance(eqs, int):
continue
lowest = nodeid
n = 0
while n < len(eqs):
lbl = eqs[n]
if lbl < lowest:
lowest = lbl
for nodeid in labels[lbl]:
if nodeid not in eqs:
eqs.append(nodeid)
n += 1
for lbl in eqs:
labels[lbl] = lowest
ordered = sorted(labels.values())
labels = {nodeid: ordered.index(label) + 1 for nodeid, label in labels.items()}
predicates = dict()
cargs = dict()
variables = dict()
index = max(labels.values())
for node in self.iter_nodes():
assert node.nodeid in self
assert node.pred is not None
predicates[node.nodeid] = str(node.pred)
if node.carg is not None:
cargs[node.nodeid] = node.carg
if node.nodeid not in quantifiers:
assert node.sortinfo is not None
index += 1
variables[node.nodeid] = (node.sortinfo.cvarsort + str(index), node.sortinfo)
args = dict()
hcons = dict()
if self.top is not None:
hcons[0] = labels[self.top.nodeid]
for link in self.iter_links():
if link.start not in args and link.rargname is not None:
args[link.start] = dict()
if link.post == 'NEQ':
assert link.rargname not in args[link.start]
args[link.start][link.rargname] = variables[link.end][0]
elif link.post == 'EQ':
if link.rargname is not None:
assert link.rargname not in args[link.start]
args[link.start][link.rargname] = variables[link.end][0]
elif link.post == 'H':
assert link.rargname not in args[link.start]
index += 1
args[link.start][link.rargname] = 'h' + str(index)
hcons[index] = labels[link.end]
elif link.post == 'HEQ':
args[link.start][link.rargname] = 'h' + str(labels[link.end])
else:
assert False
elempreds = []
for nodeid in self:
carg_string = 'CARG: "{}" '.format(cargs[nodeid]) if nodeid in cargs else ''
if nodeid in quantifiers:
intrinsic_string = variables[quantifiers[nodeid]][0]
# TODO: Hack for Chinese
if intrinsic_string[0] == 'e':
for nodeid2, (variable, _) in variables.items():
if intrinsic_string == variable:
for nodeid3, label in labels.items():
if label == hcons[int(args[nodeid2]['ARG1'][1:])]:
intrinsic_string = variables[nodeid3][0]
break
else:
assert False
break
else:
assert False
else:
intrinsic_string = '{} [ {} {}]'.format(variables[nodeid][0], variables[nodeid][1].cvarsort, ''.join('{}: {} '.format(feature.upper().replace('_DASH_', '-').replace('_DOT_', '.'), value.lower()) for feature, value in variables[nodeid][1].iter_specified() if value is not None))
args_string = ''.join('{}: {} '.format(role.upper(), arg) for role, arg in args[nodeid].items()) if nodeid in args else ''
if requires_rel_suffix:
elempred_string = '[ {}_rel LBL: h{} {}ARG0: {} {}]'.format(predicates[nodeid], labels[nodeid], carg_string, intrinsic_string, args_string)
else:
elempred_string = '[ {} LBL: h{} {}ARG0: {} {}]'.format(predicates[nodeid], labels[nodeid], carg_string, intrinsic_string, args_string)
elempreds.append(elempred_string)
top_string = '' if self.top is None else 'TOP: h0 '
index_string = '' if self.index is None else 'INDEX: {} '.format(variables[self.index.nodeid][0])
eps_string = ' '.join(elempreds)
hcons_string = ' '.join('h{} qeq h{}'.format(*qeq) for qeq in hcons.items())
mrs_string = '[ {}{}RELS: < {} > HCONS: < {} > ICONS: < > ]'.format(top_string, index_string, eps_string, hcons_string)
return mrs_string
class SubDmrs(Dmrs):
def __init__(self, dmrs, nodeids=None):
self.dmrs = dmrs
self.nodeids = set(dmrs) if nodeids is None else set(nodeids)
if dmrs.index is not None and dmrs.index.nodeid in self.nodeids:
self.index = dmrs.index
else:
self.index = None
if dmrs.top is not None and dmrs.top.nodeid in self.nodeids:
self.top = dmrs.top
else:
self.top = None
self.cfrom = dmrs.cfrom
self.cto = dmrs.cto
self.surface = dmrs.surface
self.ident = dmrs.ident
def __iter__(self):
for nodeid in self.dmrs:
if nodeid in self.nodeids:
yield nodeid
def __len__(self):
return sum(1 for nodeid in self.dmrs if nodeid in self.nodeids)
def __getitem__(self, nodeid):
if nodeid in self.nodeids:
return self.dmrs[nodeid]
raise KeyError(nodeid)
def iter_nodes(self):
for node in self.dmrs.iter_nodes():
if node.nodeid in self.nodeids:
yield node
def iter_links(self):
for link in self.dmrs.iter_links():
if link.start in self.nodeids and link.end in self.nodeids:
yield link
def add_node(self, node):
raise NotImplementedError
def add_link(self, link):
raise NotImplementedError
def remove_node(self, nodeid):
if nodeid in self.nodeids:
self.nodeids.remove(nodeid)
if self.index is not None and self.index.nodeid == nodeid:
self.index = None
if self.top is not None and self.top.nodeid == nodeid:
self.top = None
def remove_link(self, link):
raise NotImplementedError
def renumber_node(self, old_id, new_id):
raise NotImplementedError
| |
"""
High level functional tests for minitrue.
"""
import urlparse
from twisted.internet import reactor
from twisted.trial.unittest import TestCase
from twisted.web import server, resource
from minitrue import misdirection, proxy
from minitrue.utils import StringIO, Constructor
from minitrue.test.observer import ObserverMixin, SubstringObserver
from minitrue.test.connect import getWithProxy, getWithoutProxy
class _News(resource.Resource):
"""
A news source in Oceania.
"""
message = "Chocolate rations have been decreased to 20g per week."
def render_GET(self, request):
accept = request.getHeader("Accept-Language")
if accept is not None and "oldspeak" in accept:
language = "oldspeak"
else:
language = "newspeak"
request.setHeader('Content-Language', language)
return self.message
class _Book(resource.Resource):
def render_GET(self, _):
return "Chapter I: Ignorance is Strength\n\n..."
def buildTarget():
root = resource.Resource()
root.putChild("news", _News())
root.putChild("book", _Book())
return server.Site(root)
class _MinitrueConstructor(Constructor):
factory = proxy.MinitrueFactory
misdirectingProxyConstructor = _MinitrueConstructor()
requestManglingProxyConstructor = _MinitrueConstructor()
responseManglingProxyConstructor = _MinitrueConstructor()
@misdirectingProxyConstructor.kwarg(kwargName="requestMangler")
@misdirection.misdirector
def misdirector(url):
"""
Misdirects requests to The Book towards a reputable news source.
"""
url = urlparse.urlsplit(url)
if url.path == "/book":
url = url._replace(path="/news")
return urlparse.urlunsplit(url)
@requestManglingProxyConstructor.kwarg()
def requestMangler(request):
"""
Modifies requests for oldspeak into requests for newspeak.
"""
headers = request.requestHeaders
headers.setRawHeaders("Accept-Language", ["newspeak"])
@responseManglingProxyConstructor.kwarg()
def responseMangler(response):
"""
Modifies some response content, because the chocolate rations have
ostensibly not been decreased.
"""
request = response.client.father
content = response.content.read()
if "news" in request.uri:
response.content = StringIO(content.replace("decreased", "increased"))
class ProxyTestMixin(object):
def setUp(self):
self.listeningPorts = {}
proxyFactory = self.proxyConstructor()
port = reactor.listenTCP(0, proxyFactory)
self.listeningPorts["proxy"] = port
target = buildTarget()
port = reactor.listenTCP(0, target)
self.listeningPorts["target"] = port
def get(self, path="/", query="", fragment="", headers=None, proxy=True):
url = self._buildURL(path, query, fragment)
if proxy:
proxy = self.listeningPorts["proxy"].getHost()
return getWithProxy(url, proxy.host, proxy.port, headers)
else:
return getWithoutProxy(url, headers)
def _buildURL(self, path, query, fragment):
target = self.listeningPorts["target"].getHost()
netloc = "%s:%s" % (target.host, target.port)
parts = "http", netloc, path, query, fragment
return urlparse.urlunsplit(parts)
def tearDown(self):
for port in self.listeningPorts.values():
port.stopListening()
class MisdirectionTest(ProxyTestMixin, ObserverMixin, TestCase):
proxyConstructor = misdirectingProxyConstructor
def setUp(self):
ProxyTestMixin.setUp(self)
ObserverMixin.setUp(self)
self.misdirectionObserver = SubstringObserver("Misdirecting")
self.addObserver(self.misdirectionObserver)
def tearDown(self):
ProxyTestMixin.tearDown(self)
ObserverMixin.tearDown(self)
def verifyMisdirected(self, content, expected):
if expected:
self.assertIn("Chocolate rations", content)
else:
self.assertIn("Ignorance is Strength", content)
return content
def _misdirectionTest(self, withProxy):
d = self.get("/book", proxy=withProxy).deferred
d.addCallback(self.verifyMisdirected, withProxy)
observer = self.misdirectionObserver
d.addCallback(self.verifyObserved, observer, withProxy)
return d
def test_notMisdirected(self):
return self._misdirectionTest(False)
def test_misdirected(self):
"""
Checks that requests to the book are misdirected somewhere else.
"""
return self._misdirectionTest(True)
class RequestManglingTest(ProxyTestMixin, TestCase):
proxyConstructor = requestManglingProxyConstructor
def getInOldspeak(self, path="/", query="", fragment="", proxy=True):
headers = {"Accept-Language": "oldspeak"}
return self.get(path, query, fragment, headers, proxy)
def verifyRequestMangled(self, content, factory, expectMangled=True):
language, = factory.responseHeaders["content-language"]
if expectMangled:
self.assertEqual(language, "newspeak")
else:
self.assertEqual(language, "oldspeak")
return content
def _requestManglingTest(self, withProxy):
factory = self.getInOldspeak("/news", proxy=withProxy)
d = factory.deferred
d.addCallback(self.verifyRequestMangled, factory, withProxy)
return d
def test_requestNotMangled(self):
return self._requestManglingTest(False)
def test_requestMangled(self):
return self._requestManglingTest(True)
class ResponseManglingTest(ProxyTestMixin, TestCase):
proxyConstructor = responseManglingProxyConstructor
def verifyResponseMangled(self, content, expectMangled):
mangled = "increased" in content and "decreased" not in content
if expectMangled:
self.assertTrue(mangled)
else:
self.assertFalse(mangled)
return content
def _responseManglingTest(self, withProxy):
d = self.get("/news", proxy=withProxy).deferred
d.addCallback(self.verifyResponseMangled, withProxy)
return d
def test_responseNotMangled(self):
return self._responseManglingTest(False)
def test_responseMangled(self):
return self._responseManglingTest(True)
| |
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class YAML_parser:
def __init__(self):
self.data = {
'name': '', # project name
'mcu' : '',
'board' : '',
'core': '', # core
'linker_file': '', # linker command file
'include_paths': [], # include paths
'source_paths': [], # source paths
'source_files_c': [], # c source files
'source_files_cpp': [], # c++ source files
'source_files_s': [], # assembly source files
'source_files_obj': [], # object files
'source_files_lib': [], # libraries
'macros': [], # macros (defines)
'project_dir': {
'name': '', # the name of the project
'path' : '' }, # the path where will be generated
# tool specific settings, will be parsed separately
'misc': [],
}
def process_files(self, source_list, group_name):
"""Process files accroding to the extension."""
for source_file in source_list:
extension = source_file.split(".")[-1]
if extension == 'c':
self.data['source_files_c'][group_name].append(source_file)
elif extension == 's':
self.data['source_files_s'][group_name].append(source_file)
elif extension == 'cpp':
self.data['source_files_cpp'][group_name].append(source_file)
def find_group_name(self, common_attributes):
""" Creates new dictionaries based on group_name """
group_name = None
try:
for k, v in common_attributes.items():
if k == 'group_name':
group_name = v[0]
except KeyError:
pass
self.data['source_files_c'] = {}
self.data['source_files_c'][group_name] = []
self.data['source_files_cpp'] = {}
self.data['source_files_cpp'][group_name] = []
self.data['source_files_s'] = {}
self.data['source_files_s'][group_name] = []
return group_name
def find_paths(self, common_attributes):
""" Find defined include and source paths """
include_paths = []
source_paths = []
try:
for k, v in common_attributes.items():
if k == 'source_paths':
source_paths = (v)
elif k == 'include_paths':
include_paths = (v)
except KeyError:
pass
self.data['source_paths'] = source_paths
self.data['include_paths'] = include_paths
def find_source_files(self, common_attributes, group_name):
try:
for k, v in common_attributes.items():
if k == 'source_files':
self.process_files(v, group_name)
except KeyError:
pass
def find_macros(self, common_attributes):
macros = []
try:
for k, v in common_attributes.items():
if k == 'macros':
macros = v
except KeyError:
pass
self.data['macros'] = macros
def parse_yaml(self, dic, tool, toolchain):
""" Parse single yaml file, find all data in records and return it. """
# load all common attributes (paths, files, groups)
common_attributes = _finditem(dic, 'common')
# prepare dic based on found groups in common
group_name = self.find_group_name(common_attributes)
self.find_paths(common_attributes)
self.find_source_files(common_attributes, group_name)
self.find_macros(common_attributes)
project_dir = _finditem(common_attributes, 'project_dir')
if project_dir:
self.data['project_dir'].update(project_dir)
self.data['core'] = _finditem(common_attributes, 'core')
self.data['board'] = _finditem(common_attributes, 'board')
# load all specific files
specific_dic = {}
specific_attributes = _finditem(dic, 'tool_specific')
if specific_attributes:
try:
for k, v in specific_attributes.items():
if k == tool or k == toolchain:
specific_dic = v
except KeyError:
pass
for k, v in specific_dic.items():
if "source_files" == k:
# source files might have virtual dir
self.process_files(v, group_name)
elif "misc" == k:
self.data[k] = v
elif "include_paths" == k or "source_paths" == k:
self.data[k] += (v)
elif "macros" == k:
self.data[k] += (v)
elif "project_dir" == k:
self.data[k].update(v)
else:
self.data[k] = v
# need to consider all object names (.o, .obj)
obj = get_source_files_by_extension(dic, 'o')
if obj:
self.data['source_files_obj'].append(obj)
obj = get_source_files_by_extension(dic, 'obj')
if obj:
self.data['source_files_obj'].append(obj)
# need to consider all library names (.lib, .ar)
lib = get_source_files_by_extension(dic, 'lib')
if lib:
self.data['source_files_lib'].append(lib)
lib = get_source_files_by_extension(dic, 'ar')
if lib:
self.data['source_files_lib'].append(lib)
lib = get_source_files_by_extension(dic, 'a')
if lib:
self.data['source_files_lib'].append(lib)
return self.data
def parse_yaml_list(self, project_list):
""" Process list of dictionaries from yaml files. """
for dic in project_list:
mcu = _finditem(dic, 'mcu') # TODO fix naming
if mcu:
self.data['mcu'] = mcu[0]
board = _finditem(dic, 'board')
if board:
self.data['board'] = board[0]
project_dir = _finditem(dic, 'project_dir')
if project_dir['name']:
self.data['project_dir']['name'] = project_dir['name'][0]
if project_dir['path']:
self.data['project_dir']['path'] = project_dir['path'][0]
include_paths = _finditem(dic, 'include_paths')
if include_paths:
self.data['include_paths'] += (include_paths)
source_paths = _finditem(dic, 'source_paths')
if source_paths:
self.data['source_paths'] += (source_paths)
linker_file = _finditem(dic, 'linker_file')
if linker_file:
if len(linker_file) != 1:
raise RuntimeError(
"Defined %s linker files. Only one allowed." % len(linker_file))
self.data['linker_file'] = linker_file[0]
source_c = _finditem(dic, 'source_files_c')
if source_c:
self.data['source_files_c'].append(source_c)
source_cpp = _finditem(dic, 'source_files_cpp')
if source_cpp:
self.data['source_files_cpp'].append(source_cpp)
source_s = _finditem(dic, 'source_files_s')
if source_s:
self.data['source_files_s'].append(source_s)
source_obj = _finditem(dic, 'source_files_obj')
if source_obj:
self.data['source_files_obj'].append(source_obj)
source_lib = _finditem(dic, 'source_files_lib')
if source_lib:
self.data['source_files_lib'].append(source_lib)
core = _finditem(dic, 'core')
if core:
self.data['core'] = core[0]
macros = _finditem(dic, 'macros')
if macros:
self.data['macros'] += (macros)
misc = _finditem(dic, 'misc')
if misc:
self.data['misc'].append(misc)
return self.data
def set_name(self, project_name):
self.data['name'] = project_name
def get_source_files_by_extension(dic, extension):
""" Returns list of source files based on defined extension. """
find_extension = 'source_files_' + extension
return _finditem(dic, find_extension)
def find_all_values(obj, key):
files = []
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = find_all_values(v, key)
if item:
files.append(item)
return files
def _finditem(obj, key):
if key in obj:
return obj[key]
for k, v in obj.items():
if isinstance(v, dict):
item = _finditem(v, key)
if item is not None:
return item
| |
from __future__ import annotations
from datetime import datetime, timedelta, tzinfo
from typing import Any, ClassVar, Sequence
import attrs
from tzlocal import get_localzone
from ...abc import Trigger
from ...marshalling import marshal_date, marshal_timezone, unmarshal_date, unmarshal_timezone
from ...util import timezone_repr
from ...validators import as_aware_datetime, as_timezone, require_state_version
from .fields import (
DEFAULT_VALUES, BaseField, DayOfMonthField, DayOfWeekField, MonthField, WeekField)
@attrs.define(kw_only=True)
class CronTrigger(Trigger):
"""
Triggers when current time matches all specified time constraints, similarly to how the UNIX
cron scheduler works.
:param year: 4-digit year
:param month: month (1-12)
:param day: day of the (1-31)
:param week: ISO week (1-53)
:param day_of_week: number or name of weekday (0-7 or sun,mon,tue,wed,thu,fri,sat,sun)
:param hour: hour (0-23)
:param minute: minute (0-59)
:param second: second (0-59)
:param start_time: earliest possible date/time to trigger on (defaults to current time)
:param end_time: latest possible date/time to trigger on
:param timezone: time zone to use for the date/time calculations
(defaults to the local timezone)
.. note:: The first weekday is always **monday**.
"""
FIELDS_MAP: ClassVar[list[tuple[str, type[BaseField]]]] = [
('year', BaseField),
('month', MonthField),
('day', DayOfMonthField),
('week', WeekField),
('day_of_week', DayOfWeekField),
('hour', BaseField),
('minute', BaseField),
('second', BaseField)
]
year: int | str | None = None
month: int | str | None = None
day: int | str | None = None
week: int | str | None = None
day_of_week: int | str | None = None
hour: int | str | None = None
minute: int | str | None = None
second: int | str | None = None
start_time: datetime = attrs.field(converter=as_aware_datetime, factory=datetime.now)
end_time: datetime | None = None
timezone: tzinfo | str = attrs.field(converter=as_timezone, factory=get_localzone)
_fields: list[BaseField] = attrs.field(init=False, eq=False, factory=list)
_last_fire_time: datetime | None = attrs.field(init=False, eq=False, default=None)
def __attrs_post_init__(self) -> None:
self._set_fields([self.year, self.month, self.day, self.week, self.day_of_week, self.hour,
self.minute, self.second])
self._last_fire_time: datetime | None = None
def _set_fields(self, values: Sequence[int | str | None]) -> None:
self._fields = []
assigned_values = {field_name: value
for (field_name, _), value in zip(self.FIELDS_MAP, values)
if value is not None}
for field_name, field_class in self.FIELDS_MAP:
exprs = assigned_values.pop(field_name, None)
if exprs is None:
exprs = '*' if assigned_values else DEFAULT_VALUES[field_name]
field = field_class(field_name, exprs)
self._fields.append(field)
@classmethod
def from_crontab(cls, expr: str, timezone: str | tzinfo = 'local') -> CronTrigger:
"""
Create a :class:`~CronTrigger` from a standard crontab expression.
See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here.
:param expr: minute, hour, day of month, month, day of week
:param timezone: time zone to use for the date/time calculations
(defaults to local timezone if omitted)
"""
values = expr.split()
if len(values) != 5:
raise ValueError(f'Wrong number of fields; got {len(values)}, expected 5')
return cls(minute=values[0], hour=values[1], day=values[2], month=values[3],
day_of_week=values[4], timezone=timezone)
def _increment_field_value(self, dateval: datetime, fieldnum: int) -> tuple[datetime, int]:
"""
Increments the designated field and resets all less significant fields to their minimum
values.
:return: a tuple containing the new date, and the number of the field that was actually
incremented
"""
values = {}
i = 0
while i < len(self._fields):
field = self._fields[i]
if not field.real:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
difference = datetime(**values) - dateval.replace(tzinfo=None)
dateval = datetime.fromtimestamp(dateval.timestamp() + difference.total_seconds(),
self.timezone)
return dateval, fieldnum
def _set_field_value(self, dateval: datetime, fieldnum: int, new_value: int) -> datetime:
values = {}
for i, field in enumerate(self._fields):
if field.real:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values, tzinfo=self.timezone)
def next(self) -> datetime | None:
if self._last_fire_time:
start_time = self._last_fire_time + timedelta(microseconds=1)
else:
start_time = self.start_time
fieldnum = 0
next_time = datetime_ceil(start_time).astimezone(self.timezone)
while 0 <= fieldnum < len(self._fields):
field = self._fields[fieldnum]
curr_value = field.get_value(next_time)
next_value = field.get_next_value(next_time)
if next_value is None:
# No valid value was found
next_time, fieldnum = self._increment_field_value(next_time, fieldnum - 1)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.real:
next_time = self._set_field_value(next_time, fieldnum, next_value)
fieldnum += 1
else:
next_time, fieldnum = self._increment_field_value(next_time, fieldnum)
else:
# A valid value was found, no changes necessary
fieldnum += 1
# Return if the date has rolled past the end date
if self.end_time and next_time > self.end_time:
return None
if fieldnum >= 0:
self._last_fire_time = next_time
return next_time
def __getstate__(self) -> dict[str, Any]:
return {
'version': 1,
'timezone': marshal_timezone(self.timezone),
'fields': [str(f) for f in self._fields],
'start_time': marshal_date(self.start_time),
'end_time': marshal_date(self.end_time),
'last_fire_time': marshal_date(self._last_fire_time)
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
self.timezone = unmarshal_timezone(state['timezone'])
self.start_time = unmarshal_date(state['start_time'])
self.end_time = unmarshal_date(state['end_time'])
self._last_fire_time = unmarshal_date(state['last_fire_time'])
self._set_fields(state['fields'])
def __repr__(self) -> str:
fields = [f'{field.name}={str(field)!r}' for field in self._fields]
fields.append(f'start_time={self.start_time.isoformat()!r}')
if self.end_time:
fields.append(f'end_time={self.end_time.isoformat()!r}')
fields.append(f'timezone={timezone_repr(self.timezone)!r}')
return f'CronTrigger({", ".join(fields)})'
def datetime_ceil(dateval: datetime) -> datetime:
"""Round the given datetime object upwards."""
if dateval.microsecond > 0:
return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
return dateval
| |
#!/usr/bin/env python
""" This file contains code for detecting cones """
import numpy as np, cv2, argparse, time, sys
import rospy, threading
# Needed for publishing the messages
from sensor_msgs.msg import Image
from robo_magellan.msg import pose_data
from robo_magellan.msg import location_msgs as location_data
from cv_bridge import CvBridge, CvBridgeError
from robo_magellan.cone_finder import ConeFinder, ConeSeeker
class RosColorDepth:
node_name = "cone_finder"
bridge = CvBridge()
thread_lock = threading.Lock()
ts = time.clock()
cf = None
cs = None
lc = 0
pub = rospy.Publisher('cone_finder/locations', location_data, queue_size=10)
colorPub = rospy.Publisher("cone_finder/colorImage", Image, queue_size=10)
depthPub = rospy.Publisher("cone_finder/depthImage", Image, queue_size=10)
def __init__(self):
rospy.init_node('cone_finder')
self.started = True
self.show_all_contours = rospy.get_param('~show_all_contours', False)
minArea = rospy.get_param("~minConeArea", 300)
min_aspect_ratio = rospy.get_param('~min_aspect_ratio', 1.1)
# By default, allow a large Y coordinate for cones.
max_y = rospy.get_param('~max_y', 10000)
self.cf = ConeFinder(minArea, min_aspect_ratio, max_y)
self.cs = ConeSeeker()
max_contour_vertices = rospy.get_param('~max_contour_vertices', 1E9)
self.cf.set_max_contour_vertices(max_contour_vertices)
rospy.Subscriber("/camera/color/image_raw", Image, self.imageCallback)
self.depthImage = None
rospy.Subscriber("/camera/depth/image_raw", Image, self.depthCallback)
self.capture_video = rospy.get_param("~captureVideo", False)
self.publish_images = rospy.get_param("~publishImages", False)
self.thresholdAlgorithm = rospy.get_param('~thresholdAlgorithm', 'bin')
self.cf.setThresholdAlgorithm(self.thresholdAlgorithm)
self.contourFilterAlgorithm = rospy.get_param('~contourFilterAlgorithm',
'convexNull')
self.cf.setContourFilterAlgorithm(self.contourFilterAlgorithm)
if rospy.has_param('~binConfig'):
binConfig = rospy.get_param('~binConfig')
rospy.loginfo('Using bin configuration from %s' % binConfig)
self.cf.setBinConfiguration(binConfig)
self.cs.setIgnorePriorDetections(rospy.get_param('~ignorePriorDetections', True))
rospy.loginfo('Threshold algorithm %s' % self.thresholdAlgorithm)
rospy.loginfo("[%s] Initialized." %(self.node_name))
rospy.spin()
def imageCallback(self, colorImage):
thread = threading.Thread(target=self.processImage, args=(colorImage, self.depthImage))
thread.setDaemon(True)
thread.start()
def depthCallback(self, depthImage):
self.depthImage = depthImage
def start(self):
self.started = True
def pause(self):
self.started = False
def stop(self):
self.started = False
def markVideo(self, imghull, contours, poses):
(cl, conf, sadj, tadj) = self.cs.seek_cone(poses)
(ih, iw) = imghull.shape[:2]
if self.show_all_contours:
img_height = imghull.shape[0]
for c in contours:
if len(c) >= 3:
cv2.polylines(imghull, [c], True, (0,255,255), 1)
x,y,w,h = cv2.boundingRect(c)
area = cv2.contourArea(c)
msg_str = 'a={0:.0f} y={1:d}'.format(area, ih-y-h)
cv2.putText(imghull, msg_str, (x+w, y+h),
cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 255), 2, cv2.LINE_AA)
if conf > 0.1:
pt1 = (iw/2 + cl.x - cl.w/2, ih - cl.y - cl.h)
pt2 = (iw/2 + cl.x + cl.w/2, ih - cl.y)
cv2.rectangle(imghull, pt1, pt2, (0, 0, 255), 3)
#conf_str = '{0:.2f}'.format(conf)
#cv2.putText(imghull, conf_str, pt1, cv2.FONT_HERSHEY_SIMPLEX,
# 1, (0, 0, 255), 2, cv2.LINE_AA)
msg_str = 'a={0:.0f} y={1:d}'.format(cl.area, cl.y)
cv2.putText(imghull, msg_str, pt1,
cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 2, cv2.LINE_AA)
msg_str = 'time:{0:.3f}'.format((time.clock() - self.ts)/self.lc)
msgY = imghull.shape[1] - 80
cv2.putText(imghull, msg_str, (10, msgY), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 0, 0), 2, cv2.LINE_AA)
alg_str = 'alg:{0}/{1}'.format(self.thresholdAlgorithm,
self.contourFilterAlgorithm)
cv2.putText(imghull, alg_str, (250, msgY), cv2.FONT_HERSHEY_SIMPLEX,
1, (0,0,255), 2, cv2.LINE_AA)
def publishImages(self, imghull, colorImage, depthImage):
ts = rospy.Time.now()
# Convert from the OpenCV images to ROS image messages and
# publish the marked-up images.
# (Why are we publishing the depth image when we haven't
# added any information to it?)
colorMsg = self.bridge.cv2_to_imgmsg(imghull, "bgr8")
colorMsg.header.stamp = ts
colorMsg.header.frame_id = colorImage.header.frame_id
self.colorPub.publish(colorMsg)
if depthImage is not None:
depthMsg = depthImage
depthMsg.header.stamp = ts
#depthMsg.header.frame_id = 'camera_link'
self.depthPub.publish(depthMsg)
def processImage(self, colorImage, depthImage):
# Skip this image if we're already processing an image.
# That is, if we cannot acquire the thread lock with no
# delay, then another thread has the lock and is processing
# an image. This reduces our effective frame rate to what
# we can process.
if not self.thread_lock.acquire(False):
return
if not self.started:
return
# Convert from a ROS image message to an OpenCV image.
cvRGB = self.bridge.imgmsg_to_cv2(colorImage, "bgr8")
ch, cw = cvRGB.shape[:2]
# Convert the depth image, if we have one.
if depthImage is not None:
cvDepth = self.bridge.imgmsg_to_cv2(depthImage)
dh, dw = cvDepth.shape[:2]
else:
cvDepth = None
dh, dw = ch, cw
# If the depth image is a different size then the color image,
# resize the color image to match. (This assumes that the
# field of view of the two cameras is the same. Is this true?
# It seems better to convert the coordinates of the color
# image to world coordinates and then to depth image coordinates
# to find what portion of the depth image can be correlated to
# the RGB image, then either padding the depth image with zeros,
# if smaller than the RGB, or cropping to match the RGB field
# of view, if larger than the RGB.)
if (ch != dh) and (cw != dw):
cvRGB = cv2.resize(cvRGB, (dw, dh))
ch, cw = dh, dw
try:
self.lc = self.lc + 1
contours, poses, listOfCones = self.cf.find_cones(cvRGB, cvDepth)
# Use this function to capture video - here unmodified video
if self.capture_video:
self.cf.captureFrames(cvRGB, cvDepth)
imghull = cvRGB.copy()
loc = location_data()
loc.poses = poses
loc.header.stamp = rospy.Time.now()
self.pub.publish(loc)
if self.publish_images:
if len(poses):
# Frame big 3 cones - they are sorted by area
cv2.drawContours(imghull, listOfCones[0:2], -1, (0, 255, 0), 3)
self.markVideo(imghull, contours, poses)
self.publishImages(imghull, colorImage, depthImage)
if self.lc % 100 == 0:
msg_str = 'Frames: {0:d} time per frame: {1:.3f}s'.format(self.lc, (time.clock() - self.ts)/self.lc)
rospy.loginfo(msg_str)
self.lc = 0
self.ts = time.clock()
except CvBridgeError as e:
rospy.logerr(e)
self.thread_lock.release()
def find_cones_main():
r = RosColorDepth()
if __name__ == "__main__":
try:
find_cones_main()
except rospy.ROSInterruptException:
pass
| |
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy import exists
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import select
from csv2 import config
from lib.schema import *
import time
'''
dev code
= db_session.query(Cloud).filter(Cloud.cloud_type=="openstack")
db_session.merge(new_flav)
db_session.commit()
'''
def get_limits(group_name=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Limit = Base.classes.cloud_limits
if group_name is None:
limit_list = db_session.query(Limit)
else:
limit_list = db_session.query(Limit).filter(Limit.group_name==group_name)
return limit_list
#
# This function accepts a group name and cloud name and returns all virtual machines related to that group and cloud
# if no group and cloud name is given it returns the entire list of vms
#
def get_vms(group_name=None, cloud_name=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
VM = Base.classes.csv2_vms
if group_name is None:
vm_list = db_session.query(VM)
else:
if cloud_name is None:
vm_list = db_session.query(VM).filter(VM.group_name==group_name)
else:
vm_list = db_session.query(VM).filter(VM.group_name==group_name, VM.cloud_name==cloud_name)
return vm_list
def get_flavors(filter=None):
Base = declarative_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Flavors = Base.classes.cloud_flavors
flavor_list = db_session.query(Flavors)
return flavor_list
def get_images(filter=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Images = Base.classes.cloud_images
image_list = db_session.query(Images)
return image_list
def get_networks(filter=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Networks = Base.classes.cloud_networks
network_list = db_session.query(Networks)
return network_list
def get_groups(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
conn = engine.connect()
s = select([view_group_with_yaml])
return conn.execute(s)
def get_group_resources(group_name):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
conn = engine.connect()
s = select([view_group_resources]).where(view_group_resources.c.group_name == group_name)
return conn.execute(s)
def get_counts(group_name=None):
metadata = MetaData()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
conn = engine.connect()
s = select([view_group_list]).where(view_group_list.c.group_name == group_name)
return conn.execute(s)
def get_cloud_status(group_name):
metadata = MetaData()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
conn = engine.connect()
s = select([view_cloud_status]).where(view_cloud_status.c.group_name == group_name)
return conn.execute(s)
#
# This function accepts a user name and retrieves & returns all groups associated with the user
#
def get_user_groups(user):
user_group_list = []
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
user_groups = Base.classes.csv2_user_groups
user_group_rows = db_session.query(user_groups).filter(user_groups.username==user)
if user_group_rows is not None:
for row in user_group_rows:
user_group_list.append(row.group_name)
return user_group_list
#
# This function accepts a group name and returns all jobs related to that group
# if no group name is given it returns the entire list of jobs
#
def get_condor_jobs(group_name=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Jobs = Base.classes.condor_jobs
if group_name is None:
job_list = db_session.query(Jobs)
else:
job_list = db_session.query(Jobs).filter(Jobs.group_name==group_name)
return job_list
def get_condor_machines(filter=None):
Base = automap_base()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Machines = Base.classes.condor_machines
if group_name is None:
machine_list = db_session.query(Machines)
else:
# machines do not currently have group_name in their classad (and therefore not in database)
# there are several possible solutions
# 1. find a way to inject it on boot
# 2. have the cscollector update the classad by cross referencing the job_id
# 3. do not add group_name to machine classads and instead cross reference job_ids every time to get machine list
#machine_list = db_session.query(Machines).filter(Machines.group_name=group_name)
machine_list = []
return machine_list
# add new group resources
def put_group_resources(query_dict):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
metadata = MetaData(bind=engine)
table = Table('csv2_group_resources', metadata, autoload=True)
db_session = Session(engine)
columns = table.c
action = query_dict['action']
# Only accept data if column exisits in the table.
query_filtered = {}
for key in query_dict:
if key in columns:
query_filtered.update({key:query_dict[key]})
if action =="add":
if(db_session.query(exists().where(table.c.cloud_name==query_dict['cloud_name'] and table.c.group_name==query_dict['group_name'])).scalar()):
return 0
else:
ins = table.insert().values(query_filtered)
elif action =="modify":
#ins = table.update().where(table.c.cloud_name==query_dict['cloud_name_orig'] and table.c.group_name==query_dict['group_name']).values(query_filtered)
ins = table.update().where(table.c.cloud_name==query_dict['cloud_name'] and table.c.group_name==query_dict['group_name']).values(query_filtered)
elif action =="delete":
ins = table.delete(table.c.cloud_name==query_dict['cloud_name'] and table.c.group_name==query_dict['group_name'])
else:
return 0
conn = engine.connect()
conn.execute(ins)
return 1
# add new groups
def put_groups(query_dict):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
metadata = MetaData(bind=engine)
table = Table('csv2_groups', metadata, autoload=True)
db_session = Session(engine)
columns = table.c
action = query_dict['action']
# Only accept data if column exisits in the table.
query_filtered = {}
for key in query_dict:
if key in columns:
query_filtered.update({key:query_dict[key]})
if action =="add":
if(db_session.query(exists().where(table.c.group_name==query_dict['group_name'])).scalar()):
return 0
else:
#if group_name in user_groups:
ins = table.insert().values(query_filtered)
elif action =="modify":
#ins = table.update().where(table.c.cloud_name==query_dict['cloud_name_orig'] and table.c.group_name==query_dict['group_name']).values(query_filtered)
ins = table.update().where(table.c.group_name==query_dict['group_name']).values(query_filtered)
elif action =="delete":
ins = table.delete(table.c.group_name==query_dict['group_name'])
else:
return 0
conn = engine.connect()
conn.execute(ins)
return 1
| |
from __future__ import print_function, division
from math import log as _log
from .sympify import _sympify
from .cache import cacheit
from .core import C
from .logic import _fuzzy_group
from .singleton import S
from .expr import Expr
from .evalf import PrecisionExhausted
from .function import (_coeff_isneg, expand_complex, expand_multinomial,
expand_mul)
from .logic import fuzzy_bool
from .compatibility import as_int, xrange
from .evaluate import global_evaluate
from sympy.mpmath.libmp import sqrtrem as mpmath_sqrtrem
from sympy.utilities.iterables import sift
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
>>> from sympy import integer_nthroot
>>> integer_nthroot(16,2)
(4, True)
>>> integer_nthroot(26,2)
(5, False)
"""
y, n = int(y), int(n)
if y < 0:
raise ValueError("y must be nonnegative")
if n < 1:
raise ValueError("n must be positive")
if y in (0, 1):
return y, True
if n == 1:
return y, True
if n == 2:
x, rem = mpmath_sqrtrem(y)
return int(x), not rem
if n > y:
return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y**(1./n) + 0.5)
except OverflowError:
exp = _log(y, 2)/n
if exp > 53:
shift = int(exp - 53)
guess = int(2.0**(exp - shift) + 1) << shift
else:
guess = int(2.0**exp)
#print n
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n - 1)
#xprev, x = x, x - (t*x-y)//(n*t)
xprev, x = x, ((n - 1)*x + y//t)//n
#print n, x-xprev, abs(x-xprev) < 2
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return x, t == y
class Pow(Expr):
"""
Defines the expression x**y as "x raised to a power y"
Singleton definitions involving (0, 1, -1, oo, -oo):
+--------------+---------+-----------------------------------------------+
| expr | value | reason |
+==============+=========+===============================================+
| z**0 | 1 | Although arguments over 0**0 exist, see [2]. |
+--------------+---------+-----------------------------------------------+
| z**1 | z | |
+--------------+---------+-----------------------------------------------+
| (-oo)**(-1) | 0 | |
+--------------+---------+-----------------------------------------------+
| (-1)**-1 | -1 | |
+--------------+---------+-----------------------------------------------+
| S.Zero**-1 | oo | This is not strictly true, as 0**-1 may be |
| | | undefined, but is convenient is some contexts |
| | | where the base is assumed to be positive. |
+--------------+---------+-----------------------------------------------+
| 1**-1 | 1 | |
+--------------+---------+-----------------------------------------------+
| oo**-1 | 0 | |
+--------------+---------+-----------------------------------------------+
| 0**oo | 0 | Because for all complex numbers z near |
| | | 0, z**oo -> 0. |
+--------------+---------+-----------------------------------------------+
| 0**-oo | oo | This is not strictly true, as 0**oo may be |
| | | oscillating between positive and negative |
| | | values or rotating in the complex plane. |
| | | It is convenient, however, when the base |
| | | is positive. |
+--------------+---------+-----------------------------------------------+
| 1**oo | nan | Because there are various cases where |
| 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), |
| | | but lim( x(t)**y(t), t) != 1. See [3]. |
+--------------+---------+-----------------------------------------------+
| (-1)**oo | nan | Because of oscillations in the limit. |
| (-1)**(-oo) | | |
+--------------+---------+-----------------------------------------------+
| oo**oo | oo | |
+--------------+---------+-----------------------------------------------+
| oo**-oo | 0 | |
+--------------+---------+-----------------------------------------------+
| (-oo)**oo | nan | |
| (-oo)**-oo | | |
+--------------+---------+-----------------------------------------------+
Because symbolic computations are more flexible that floating point
calculations and we prefer to never return an incorrect answer,
we choose not to conform to all IEEE 754 conventions. This helps
us avoid extra test-case code in the calculation of limits.
See Also
========
Infinity
NegativeInfinity
NaN
References
==========
.. [1] http://en.wikipedia.org/wiki/Exponentiation
.. [2] http://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero
.. [3] http://en.wikipedia.org/wiki/Indeterminate_forms
"""
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, evaluate=None):
if evaluate is None:
evaluate = global_evaluate[0]
from sympy.functions.elementary.exponential import exp_polar
b = _sympify(b)
e = _sympify(e)
if evaluate:
if e is S.Zero:
return S.One
elif e is S.One:
return b
elif e.is_integer and _coeff_isneg(b):
if e.is_even:
b = -b
elif e.is_odd:
return -Pow(-b, e)
if b is S.One:
if e in (S.NaN, S.Infinity, -S.Infinity):
return S.NaN
return S.One
elif S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0
return S.NaN
else:
# recognize base as E
if not e.is_Atom and b is not S.Exp1 and b.func is not exp_polar:
from sympy import numer, denom, log, sign, im, factor_terms
c, ex = factor_terms(e, sign=False).as_coeff_Mul()
den = denom(ex)
if den.func is log and den.args[0] == b:
return S.Exp1**(c*numer(ex))
elif den.is_Add:
s = sign(im(b))
if s.is_Number and s and den == \
log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi:
return S.Exp1**(c*numer(ex))
obj = b._eval_power(e)
if obj is not None:
return obj
obj = Expr.__new__(cls, b, e)
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@classmethod
def class_key(cls):
return 3, 2, cls.__name__
def _eval_power(self, other):
b, e = self.as_base_exp()
if b is S.NaN:
return (b**e)**other # let __new__ handle it
s = None
if other.is_integer:
s = 1
elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)...
s = 1
elif e.is_real is not None:
# helper functions ===========================
def _half(e):
"""Return True if the exponent has a literal 2 as the
denominator, else None."""
if getattr(e, 'q', None) == 2:
return True
n, d = e.as_numer_denom()
if n.is_integer and d == 2:
return True
def _n2(e):
"""Return ``e`` evaluated to a Number with 2 significant
digits, else None."""
try:
rv = e.evalf(2, strict=True)
if rv.is_Number:
return rv
except PrecisionExhausted:
pass
# ===================================================
if e.is_real:
# we need _half(other) with constant floor or
# floor(S.Half - e*arg(b)/2/pi) == 0
# handle -1 as special case
if (e == -1) == True:
# floor arg. is 1/2 + arg(b)/2/pi
if _half(other):
if b.is_negative is True:
return S.NegativeOne**other*Pow(-b, e*other)
if b.is_real is False:
return Pow(b.conjugate()/C.Abs(b)**2, other)
elif e.is_even:
if b.is_real:
b = abs(b)
if b.is_imaginary:
b = abs(C.im(b))*S.ImaginaryUnit
if (abs(e) < 1) == True or (e == 1) == True:
s = 1 # floor = 0
elif b.is_nonnegative:
s = 1 # floor = 0
elif C.re(b).is_nonnegative and (abs(e) < 2) == True:
s = 1 # floor = 0
elif C.im(b).is_nonzero and (abs(e) == 2) == True:
s = 1 # floor = 0
elif _half(other):
s = C.exp(2*S.Pi*S.ImaginaryUnit*other*C.floor(
S.Half - e*C.arg(b)/(2*S.Pi)))
if s.is_real and _n2(C.sign(s) - s) == 0:
s = C.sign(s)
else:
s = None
else:
# e.is_real is False requires:
# _half(other) with constant floor or
# floor(S.Half - im(e*log(b))/2/pi) == 0
try:
s = C.exp(2*S.ImaginaryUnit*S.Pi*other*
C.floor(S.Half - C.im(e*C.log(b))/2/S.Pi))
# be careful to test that s is -1 or 1 b/c sign(I) == I:
# so check that s is real
if s.is_real and _n2(C.sign(s) - s) == 0:
s = C.sign(s)
else:
s = None
except PrecisionExhausted:
s = None
if s is not None:
return s*Pow(b, e*other)
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
return self.base.is_even
def _eval_is_positive(self):
if self.base.is_positive:
if self.exp.is_real:
return True
elif self.base.is_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_nonpositive:
if self.exp.is_odd:
return False
elif self.base.is_imaginary:
if self.exp.is_integer:
m = self.exp % 4
if m.is_zero:
return True
if m.is_integer and m.is_zero is False:
return False
if self.exp.is_imaginary:
return C.log(self.base).is_imaginary
def _eval_is_negative(self):
if self.base.is_negative:
if self.exp.is_odd:
return True
if self.exp.is_even:
return False
elif self.base.is_positive:
if self.exp.is_real:
return False
elif self.base.is_nonnegative:
if self.exp.is_real:
return False
elif self.base.is_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_real:
if self.exp.is_even:
return False
def _eval_is_integer(self):
b, e = self.args
c1 = b.is_integer
c2 = e.is_integer
if c1 is None or c2 is None:
return None
if not c1 and e.is_nonnegative: # rat**nonneg
return False
if c1 and c2: # int**int
if b is S.NegativeOne:
return True
if e.is_nonnegative or e.is_positive:
return True
if self.exp.is_negative:
return False
if c1 and e.is_negative and e.is_bounded: # int**neg
return False
if b.is_Number and e.is_Number:
# int**nonneg or rat**?
check = self.func(*self.args)
return check.is_Integer
def _eval_is_real(self):
real_b = self.base.is_real
if real_b is None:
if self.base.func == C.exp and self.base.args[0].is_imaginary:
return self.exp.is_imaginary
return
real_e = self.exp.is_real
if real_e is None:
return
if real_b and real_e:
if self.base.is_positive:
return True
elif self.base.is_nonnegative:
if self.exp.is_nonnegative:
return True
else:
if self.exp.is_integer:
return True
elif self.base.is_negative:
if self.exp.is_Rational:
return False
if real_e and self.exp.is_negative:
return Pow(self.base, -self.exp).is_real
im_b = self.base.is_imaginary
im_e = self.exp.is_imaginary
if im_b:
if self.exp.is_integer:
if self.exp.is_even:
return True
elif self.exp.is_odd:
return False
elif im_e and C.log(self.base).is_imaginary:
return True
elif self.exp.is_Add:
c, a = self.exp.as_coeff_Add()
if c and c.is_Integer:
return C.Mul(
self.base**c, self.base**a, evaluate=False).is_real
elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit):
if (self.exp/2).is_integer is False:
return False
if real_b and im_e:
if self.base is S.NegativeOne:
return True
c = self.exp.coeff(S.ImaginaryUnit)
if c:
ok = (c*C.log(self.base)/S.Pi).is_Integer
if ok is not None:
return ok
if real_b is False: # we already know it's not imag
i = C.arg(self.base)*self.exp/S.Pi
return i.is_integer
def _eval_is_complex(self):
if all(a.is_complex for a in self.args):
return True
def _eval_is_imaginary(self):
if self.base.is_imaginary:
if self.exp.is_integer:
odd = self.exp.is_odd
if odd is not None:
return odd
return
if self.exp.is_imaginary:
imlog = C.log(self.base).is_imaginary
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if self.base.is_real and self.exp.is_real:
if self.base.is_positive:
return False
else:
rat = self.exp.is_rational
if not rat:
return rat
if self.exp.is_integer:
return False
else:
half = (2*self.exp).is_integer
if half:
return self.base.is_negative
return half
if self.base.is_real is False: # we already know it's not imag
i = C.arg(self.base)*self.exp/S.Pi
return (2*i).is_odd
def _eval_is_odd(self):
if self.exp.is_integer:
if self.exp.is_positive:
return self.base.is_odd
elif self.exp.is_nonnegative and self.base.is_odd:
return True
elif self.base is S.NegativeOne:
return True
def _eval_is_bounded(self):
if self.exp.is_negative:
if self.base.is_zero:
return False
if self.base.is_unbounded:
return True
c1 = self.base.is_bounded
if c1 is None:
return
c2 = self.exp.is_bounded
if c2 is None:
return
if c1 and c2:
if self.exp.is_nonnegative or self.base.is_nonzero:
return True
def _eval_is_polar(self):
return self.base.is_polar
def _eval_subs(self, old, new):
def _check(ct1, ct2, old):
"""Return bool, pow where, if bool is True, then the exponent of
Pow `old` will combine with `pow` so the substitution is valid,
otherwise bool will be False,
cti are the coefficient and terms of an exponent of self or old
In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y)
will give y**2 since (b**x)**2 == b**(2*x); if that equality does
not hold then the substitution should not occur so `bool` will be
False.
"""
coeff1, terms1 = ct1
coeff2, terms2 = ct2
if terms1 == terms2:
pow = coeff1/coeff2
try:
pow = as_int(pow)
combines = True
except ValueError:
combines = Pow._eval_power(
Pow(*old.as_base_exp(), evaluate=False),
pow) is not None
return combines, pow
return False, None
if old == self.base:
return new**self.exp._subs(old, new)
if old.func is self.func and self.base is old.base:
if self.exp.is_Add is False:
ct1 = self.exp.as_independent(C.Symbol, as_Add=False)
ct2 = old.exp.as_independent(C.Symbol, as_Add=False)
ok, pow = _check(ct1, ct2, old)
if ok:
# issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2
return self.func(new, pow)
else: # b**(6*x+a).subs(b**(3*x), y) -> y**2 * b**a
# exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2))
oarg = old.exp
new_l = []
o_al = []
ct2 = oarg.as_coeff_mul()
for a in self.exp.args:
newa = a._subs(old, new)
ct1 = newa.as_coeff_mul()
ok, pow = _check(ct1, ct2, old)
if ok:
new_l.append(new**pow)
continue
o_al.append(newa)
if new_l:
new_l.append(Pow(self.base, Add(*o_al), evaluate=False))
return Mul(*new_l)
if old.func is C.exp and self.exp.is_real and self.base.is_positive:
ct1 = old.args[0].as_independent(C.Symbol, as_Add=False)
ct2 = (self.exp*C.log(self.base)).as_independent(
C.Symbol, as_Add=False)
ok, pow = _check(ct1, ct2, old)
if ok:
return self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z
def as_base_exp(self):
"""Return base and exp of self.
If base is 1/Integer, then return Integer, -exp. If this extra
processing is not needed, the base and exp properties will
give the raw arguments
Examples
========
>>> from sympy import Pow, S
>>> p = Pow(S.Half, 2, evaluate=False)
>>> p.as_base_exp()
(2, -2)
>>> p.args
(1/2, 2)
"""
b, e = self.args
if b.is_Rational and b.p == 1:
return Integer(b.q), -e
return b, e
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import adjoint
i, p = self.exp.is_integer, self.base.is_positive
if i:
return adjoint(self.base)**self.exp
if p:
return self.base**adjoint(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return adjoint(expanded)
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
i, p = self.exp.is_integer, self.base.is_positive
if i:
return c(self.base)**self.exp
if p:
return self.base**c(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return c(expanded)
def _eval_transpose(self):
from sympy.functions.elementary.complexes import transpose
i, p = self.exp.is_integer, self.base.is_complex
if p:
return self.base**self.exp
if i:
return transpose(self.base)**self.exp
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return transpose(expanded)
def _eval_expand_power_exp(self, **hints):
"""a**(n+m) -> a**n*a**m"""
b = self.base
e = self.exp
if e.is_Add and e.is_commutative:
expr = []
for x in e.args:
expr.append(self.func(self.base, x))
return Mul(*expr)
return self.func(b, e)
def _eval_expand_power_base(self, **hints):
"""(a*b)**n -> a**n * b**n"""
force = hints.get('force', False)
b = self.base
e = self.exp
if not b.is_Mul:
return self
cargs, nc = b.args_cnc(split_1=False)
# expand each term - this is top-level-only
# expansion but we have to watch out for things
# that don't have an _eval_expand method
if nc:
nc = [i._eval_expand_power_base(**hints)
if hasattr(i, '_eval_expand_power_base') else i
for i in nc]
if e.is_Integer:
if e.is_positive:
rv = Mul(*nc*e)
else:
rv = 1/Mul(*nc*-e)
if cargs:
rv *= Mul(*cargs)**e
return rv
if not cargs:
return self.func(Mul(*nc), e, evaluate=False)
nc = [Mul(*nc)]
# sift the commutative bases
def pred(x):
if x is S.ImaginaryUnit:
return S.ImaginaryUnit
polar = x.is_polar
if polar:
return True
if polar is None:
return fuzzy_bool(x.is_nonnegative)
sifted = sift(cargs, pred)
nonneg = sifted[True]
other = sifted[None]
neg = sifted[False]
imag = sifted[S.ImaginaryUnit]
if imag:
I = S.ImaginaryUnit
i = len(imag) % 4
if i == 0:
pass
elif i == 1:
other.append(I)
elif i == 2:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
else:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
other.append(I)
del imag
# bring out the bases that can be separated from the base
if force or e.is_integer:
# treat all commutatives the same and put nc in other
cargs = nonneg + neg + other
other = nc
else:
# this is just like what is happening automatically, except
# that now we are doing it for an arbitrary exponent for which
# no automatic expansion is done
assert not e.is_Integer
# handle negatives by making them all positive and putting
# the residual -1 in other
if len(neg) > 1:
o = S.One
if not other and neg[0].is_Number:
o *= neg.pop(0)
if len(neg) % 2:
o = -o
for n in neg:
nonneg.append(-n)
if o is not S.One:
other.append(o)
elif neg and other:
if neg[0].is_Number and neg[0] is not S.NegativeOne:
other.append(S.NegativeOne)
nonneg.append(-neg[0])
else:
other.extend(neg)
else:
other.extend(neg)
del neg
cargs = nonneg
other += nc
rv = S.One
if cargs:
rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs])
if other:
rv *= self.func(Mul(*other), e, evaluate=False)
return rv
def _eval_expand_multinomial(self, **hints):
"""(a+b+..) ** n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
base, exp = self.args
result = self
if exp.is_Rational and exp.p > 0 and base.is_Add:
if not exp.is_Integer:
n = Integer(exp.p // exp.q)
if not n:
return result
else:
radical, result = self.func(base, exp - n), []
expanded_base_n = self.func(base, n)
if expanded_base_n.is_Pow:
expanded_base_n = \
expanded_base_n._eval_expand_multinomial()
for term in Add.make_args(expanded_base_n):
result.append(term*radical)
return Add(*result)
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for b in base.args:
if b.is_Order:
order_terms.append(b)
else:
other_terms.append(b)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
o = Add(*order_terms)
if n == 2:
return expand_multinomial(f**n, deep=False) + n*f*o
else:
g = expand_multinomial(f**(n - 1), deep=False)
return expand_mul(f*g, deep=False) + n*g*o
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = self.func(a.q * b.q, n)
a, b = a.p*b.q, a.q*b.p
else:
k = self.func(a.q, n)
a, b = a.p, a.q*b
elif not b.is_Integer:
k = self.func(b.q, n)
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c - b*d, b*c + a*d
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x+y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
from sympy.polys.polyutils import basic_from_dict
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
return basic_from_dict(expansion_dict, *p)
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n - 1))._eval_expand_multinomial()
if multi.is_Add:
return Add(*[f*g for f in base.args
for g in multi.args])
else:
# XXX can this ever happen if base was an Add?
return Add(*[f*multi for f in base.args])
elif (exp.is_Rational and exp.p < 0 and base.is_Add and
abs(exp.p) > exp.q):
return 1 / self.func(base, -exp)._eval_expand_multinomial()
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= self.func(base, term)
else:
tail += term
return coeff * self.func(base, tail)
else:
return result
def as_real_imag(self, deep=True, **hints):
from sympy.polys.polytools import poly
if self.exp.is_Integer:
exp = self.exp
re, im = self.base.as_real_imag(deep=deep)
if not im:
return self, S.Zero
a, b = symbols('a b', cls=Dummy)
if exp >= 0:
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial(self.base**exp)
return expr.as_real_imag()
expr = poly(
(a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp
else:
mag = re**2 + im**2
re, im = re/mag, -im/mag
if re.is_Number and im.is_Number:
# We can be more efficient in this case
expr = expand_multinomial((re + im*S.ImaginaryUnit)**-exp)
return expr.as_real_imag()
expr = poly((a + b)**-exp)
# Terms with even b powers will be real
r = [i for i in expr.terms() if not i[0][1] % 2]
re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
# Terms with odd b powers will be imaginary
r = [i for i in expr.terms() if i[0][1] % 4 == 1]
im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
r = [i for i in expr.terms() if i[0][1] % 4 == 3]
im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
return (re_part.subs({a: re, b: S.ImaginaryUnit*im}),
im_part1.subs({a: re, b: im}) + im_part3.subs({a: re, b: -im}))
elif self.exp.is_Rational:
re, im = self.base.as_real_imag(deep=deep)
if im.is_zero and self.exp is S.Half:
if re.is_nonnegative:
return self, S.Zero
if re.is_nonpositive:
return S.Zero, (-self.base)**self.exp
# XXX: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
r = self.func(self.func(re, 2) + self.func(im, 2), S.Half)
t = C.atan2(im, re)
rp, tp = self.func(r, self.exp), t*self.exp
return (rp*C.cos(tp), rp*C.sin(tp))
else:
if deep:
hints['complex'] = False
expanded = self.expand(deep, **hints)
if hints.get('ignore') == expanded:
return None
else:
return (C.re(expanded), C.im(expanded))
else:
return (C.re(self), C.im(self))
def _eval_derivative(self, s):
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * C.log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp.is_negative and base.is_number and base.is_real is False:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return self.func(base, exp).expand()
return self.func(base, exp)
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return bool(self.base._eval_is_polynomial(syms) and
self.exp.is_Integer and (self.exp >= 0))
else:
return True
def _eval_is_rational(self):
p = self.func(*self.as_base_exp()) # in case it's unevaluated
if not p.is_Pow:
return p.is_rational
b, e = p.as_base_exp()
if e.is_Rational and b.is_Rational:
# we didn't check that e is not an Integer
# because Rational**Integer autosimplifies
return False
if e.is_integer:
return b.is_rational
def _eval_is_rational_function(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_rational_function(syms) and \
self.exp.is_Integer
else:
return True
def _eval_is_algebraic_expr(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_algebraic_expr(syms) and \
self.exp.is_Rational
else:
return True
def as_numer_denom(self):
if not self.is_commutative:
return self, S.One
base, exp = self.as_base_exp()
n, d = base.as_numer_denom()
# this should be the same as ExpBase.as_numer_denom wrt
# exponent handling
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
int_exp = exp.is_integer
# the denominator cannot be separated from the numerator if
# its sign is unknown unless the exponent is an integer, e.g.
# sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the
# denominator is negative the numerator and denominator can
# be negated and the denominator (now positive) separated.
if not (d.is_real or int_exp):
n = base
d = S.One
dnonpos = d.is_nonpositive
if dnonpos:
n, d = -n, -d
elif dnonpos is None and not int_exp:
n = base
d = S.One
if neg_exp:
n, d = d, n
exp = -exp
return self.func(n, exp), self.func(d, exp)
def matches(self, expr, repl_dict={}, old=False):
expr = _sympify(expr)
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp.matches(S.Zero, d)
if d is not None:
return d
# make sure the expression to be matched is an Expr
if not isinstance(expr, Expr):
return None
b, e = expr.as_base_exp()
# special case number
sb, se = self.as_base_exp()
if sb.is_Symbol and se.is_Integer and expr:
if e.is_rational:
return sb.matches(b**(e/se), repl_dict)
return sb.matches(expr**(1/se), repl_dict)
d = repl_dict.copy()
d = self.base.matches(b, d)
if d is None:
return None
d = self.exp.xreplace(d).matches(e, d)
if d is None:
return Expr.matches(self, expr, repl_dict)
return d
def _eval_nseries(self, x, n, logx):
# NOTE! This function is an important part of the gruntz algorithm
# for computing limits. It has to return a generalized power
# series with coefficients in C(log, log(x)). In more detail:
# It has to return an expression
# c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i are
# expressions involving only numbers, the log function, and log(x).
from sympy import powsimp, collect, exp, log, O, ceiling
b, e = self.args
if e.is_Integer:
if e > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x-x**3/3+...)**4 = ...
return expand_multinomial(self.func(b._eval_nseries(x, n=n,
logx=logx), e), deep=False)
elif e is S.NegativeOne:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 - x + x**2 - x**3 ...
# so we need to rewrite base to the form "1+x"
nuse = n
cf = 1
try:
ord = b.as_leading_term(x)
cf = C.Order(ord, x).getn()
if cf and cf.is_Number:
nuse = n + 2*ceiling(cf)
else:
cf = 1
except NotImplementedError:
pass
b_orig, prefactor = b, O(1, x)
while prefactor.is_Order:
nuse += 1
b = b_orig._eval_nseries(x, n=nuse, logx=logx)
prefactor = b.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = expand_mul((b - prefactor)/prefactor)
if rest.is_Order:
return 1/prefactor + rest/prefactor + O(x**n, x)
k, l = rest.leadterm(x)
if l.is_Rational and l > 0:
pass
elif l.is_number and l > 0:
l = l.evalf()
elif l == 0:
k = k.simplify()
if k == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
if cf < 0:
cf = S.One/abs(cf)
try:
dn = C.Order(1/prefactor, x).getn()
if dn and dn < 0:
pass
else:
dn = 0
except NotImplementedError:
dn = 0
terms = [1/prefactor]
for m in xrange(1, ceiling((n - dn)/l*cf)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(
deep=False)
else:
new_term = expand_mul(new_term, deep=False)
terms.append(new_term)
terms.append(O(x**n, x))
return powsimp(Add(*terms), deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for
# example:
# sin(x)**(-4) = 1/( sin(x)**4) = ...
# and expand the denominator:
nuse, denominator = n, O(1, x)
while denominator.is_Order:
denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx)
nuse += 1
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator)._eval_nseries(x, n=n, logx=logx)
if e.has(Symbol):
return exp(e*log(b))._eval_nseries(x, n=n, logx=logx)
# see if the base is as simple as possible
bx = b
while bx.is_Pow and bx.exp.is_Rational:
bx = bx.base
if bx == x:
return self
# work for b(x)**e where e is not an Integer and does not contain x
# and hopefully has no other symbols
def e2int(e):
"""return the integer value (if possible) of e and a
flag indicating whether it is bounded or not."""
n = e.limit(x, 0)
unbounded = n.is_unbounded
if not unbounded:
# XXX was int or floor intended? int used to behave like floor
# so int(-Rational(1, 2)) returned -1 rather than int's 0
try:
n = int(n)
except TypeError:
#well, the n is something more complicated (like 1+log(2))
try:
n = int(n.evalf()) + 1 # XXX why is 1 being added?
except TypeError:
pass # hope that base allows this to be resolved
n = _sympify(n)
return n, unbounded
order = O(x**n, x)
ei, unbounded = e2int(e)
b0 = b.limit(x, 0)
if unbounded and (b0 is S.One or b0.has(Symbol)):
# XXX what order
if b0 is S.One:
resid = (b - 1)
if resid.is_positive:
return S.Infinity
elif resid.is_negative:
return S.Zero
raise ValueError('cannot determine sign of %s' % resid)
return b0**ei
if (b0 is S.Zero or b0.is_unbounded):
if unbounded is not False:
return b0**e # XXX what order
if not ei.is_number: # if not, how will we proceed?
raise ValueError(
'expecting numerical exponent but got %s' % ei)
nuse = n - ei
if e.is_real and e.is_positive:
lt = b.as_leading_term(x)
# Try to correct nuse (= m) guess from:
# (lt + rest + O(x**m))**e =
# lt**e*(1 + rest/lt + O(x**m)/lt)**e =
# lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n)
try:
cf = C.Order(lt, x).getn()
nuse = ceiling(n - cf*(e - 1))
except NotImplementedError:
pass
bs = b._eval_nseries(x, n=nuse, logx=logx)
terms = bs.removeO()
if terms.is_Add:
bs = terms
lt = terms.as_leading_term(x)
# bs -> lt + rest -> lt*(1 + (bs/lt - 1))
return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries(
x, n=nuse, logx=logx)).expand() + order)
if bs.is_Add:
from sympy import O
# So, bs + O() == terms
c = Dummy('c')
res = []
for arg in bs.args:
if arg.is_Order:
arg = c*arg.expr
res.append(arg)
bs = Add(*res)
rv = (bs**e).series(x).subs(c, O(1, x))
rv += order
return rv
rv = bs**e
if terms != bs:
rv += order
return rv
# either b0 is bounded but neither 1 nor 0 or e is unbounded
# b -> b0 + (b-b0) -> b0 * (1 + (b/b0-1))
o2 = order*(b0**-e)
z = (b/b0 - 1)
o = O(z, x)
#r = self._compute_oseries3(z, o2, self.taylor_term)
if o is S.Zero or o2 is S.Zero:
unbounded = True
else:
if o.expr.is_number:
e2 = log(o2.expr*x)/log(x)
else:
e2 = log(o2.expr)/log(o.expr)
n, unbounded = e2int(e2)
if unbounded:
# requested accuracy gives infinite series,
# order is probably non-polynomial e.g. O(exp(-1/x), x).
r = 1 + z
else:
l = []
g = None
for i in xrange(n + 2):
g = self._taylor_term(i, z, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
r = Add(*l)
return expand_mul(r*b0**e) + order
def _eval_as_leading_term(self, x):
if not self.exp.has(x):
return self.func(self.base.as_leading_term(x), self.exp)
return C.exp(self.exp * C.log(self.base)).as_leading_term(x)
@cacheit
def _taylor_term(self, n, x, *previous_terms): # of (1+x)**e
return C.binomial(self.exp, n) * self.func(x, n)
def _sage_(self):
return self.args[0]._sage_()**self.args[1]._sage_()
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> sqrt(4 + 4*sqrt(2)).as_content_primitive()
(2, sqrt(1 + sqrt(2)))
>>> sqrt(3 + 3*sqrt(2)).as_content_primitive()
(1, sqrt(3)*sqrt(1 + sqrt(2)))
>>> from sympy import expand_power_base, powsimp, Mul
>>> from sympy.abc import x, y
>>> ((2*x + 2)**2).as_content_primitive()
(4, (x + 1)**2)
>>> (4**((1 + y)/2)).as_content_primitive()
(2, 4**(y/2))
>>> (3**((1 + y)/2)).as_content_primitive()
(1, 3**((y + 1)/2))
>>> (3**((5 + y)/2)).as_content_primitive()
(9, 3**((y + 1)/2))
>>> eq = 3**(2 + 2*x)
>>> powsimp(eq) == eq
True
>>> eq.as_content_primitive()
(9, 3**(2*x))
>>> powsimp(Mul(*_))
3**(2*x + 2)
>>> eq = (2 + 2*x)**y
>>> s = expand_power_base(eq); s.is_Mul, s
(False, (2*x + 2)**y)
>>> eq.as_content_primitive()
(1, (2*(x + 1))**y)
>>> s = expand_power_base(_[1]); s.is_Mul, s
(True, 2**y*(x + 1)**y)
See docstring of Expr.as_content_primitive for more examples.
"""
b, e = self.as_base_exp()
b = _keep_coeff(*b.as_content_primitive(radical=radical))
ce, pe = e.as_content_primitive(radical=radical)
if b.is_Rational:
#e
#= ce*pe
#= ce*(h + t)
#= ce*h + ce*t
#=> self
#= b**(ce*h)*b**(ce*t)
#= b**(cehp/cehq)*b**(ce*t)
#= b**(iceh+r/cehq)*b**(ce*t)
#= b**(iceh)*b**(r/cehq)*b**(ce*t)
#= b**(iceh)*b**(ce*t + r/cehq)
h, t = pe.as_coeff_Add()
if h.is_Rational:
ceh = ce*h
c = self.func(b, ceh)
r = S.Zero
if not c.is_Rational:
iceh, r = divmod(ceh.p, ceh.q)
c = self.func(b, iceh)
return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q))
e = _keep_coeff(ce, pe)
# b**e = (h*t)**e = h**e*t**e = c*m*t**e
if e.is_Rational and b.is_Mul:
h, t = b.as_content_primitive(radical=radical) # h is positive
c, m = self.func(h, e).as_coeff_Mul() # so c is positive
m, me = m.as_base_exp()
if m is S.One or me == e: # probably always true
# return the following, not return c, m*Pow(t, e)
# which would change Pow into Mul; we let sympy
# decide what to do by using the unevaluated Mul, e.g
# should it stay as sqrt(2 + 2*sqrt(5)) or become
# sqrt(2)*sqrt(1 + sqrt(5))
return c, self.func(_keep_coeff(m, t), e)
return S.One, self.func(b, e)
def is_constant(self, *wrt, **flags):
if flags.get('simplify', True):
self = self.simplify()
b, e = self.as_base_exp()
bz = b.equals(0)
if bz: # recalculate with assumptions in case it's unevaluated
new = b**e
if new != self:
return new.is_constant()
econ = e.is_constant(*wrt)
bcon = b.is_constant(*wrt)
if bcon:
if econ:
return True
bz = b.equals(0)
if bz is False:
return False
elif bcon is None:
return None
return e.equals(0)
from .add import Add
from .numbers import Integer
from .mul import Mul, _keep_coeff
from .symbol import Symbol, Dummy, symbols
| |
''' python modules '''
from sys import _getframe as getframe
from types import IntType
from types import ListType
from types import TupleType
''' own modules '''
from main.io_module import add_Separator
from main.information import Information
from sge.fileclass import Fileclass
from sge.sge import SGE
class Gsnap(SGE):
def __init__(self, split, *args, **kwargs):
super(Gsnap, self).__init__(*args, **kwargs)
self._modulename = 'gsnap'
self._moduledir = add_Separator('{0}{1}'.format(self._maindir, self._modulename))
self._progpath = Information.GSNAP
self.__split = split
self.start_logging('pipeline.sge.aligner.gsnap')
self.set_cpuvalue_line('smp', kwargs['cores'])
if self._with_sgemodule:
self.set_sgemodulevalue_line((self._progpath, Information.SAMTOOLS))
self.load_loption()
self.__refpath = Information.GMAP_DIR
self.__ref = Information.GMAP_REF
self.__transcript = Information.GSNAP_TRANSCRIPT
self.__compressed = ''
self.__rnadna_string = ''
self.__readgroup_gsnap = '--read-group-id='
self.__sample_gsnap = '--read-group-name='
self.__libray_gsnap = '--read-group-library='
self.__platform_gsnap = '--read-group-platform='
self.set_readgroup_string()
self.set_compressed()
self.set_dna_rna()
# gsnap -D /projects/seq-work/user/all/reference/gmap/ -d mm10 --gunzip --use-sarray=1 -B 4 -t $NSLOTS -N 1 -s Ensembl-75.ss.mm10.iit -n 1 -A sam $firstinput > $output
# progpath, refpath, files, reference, cpu, rna = False, splicefile = None, iddict = None, optionlist = None
def load_loption(self):
if 'avx' in self._progpath:
self.set_loption(('avx', ))
elif 'sse4.2' in self._progpath:
self.set_loption(('sse42', ))
'''
method sets the parameter for the readgroup ids
'''
def set_readgroup_string(self):
lib, sample, platform, ext = [], [], [], []
for inst in self._inputfiles:
if inst.use:
lib.extend(inst.libid)
sample.extend(inst.sample)
platform.extend(inst.platform)
ext.append(inst.ext)
ext = '-'.join(sorted(set(ext)))
lib = '-'.join(sorted(set(lib)))
sample = '-'.join(sorted(set(sample)))
platform = '-'.join(sorted(set(platform)))
readgroup = '-'.join([i for i in (lib, sample, ext) if i != ''])
if readgroup != '':
self.__readgroup_gsnap = '{0}{1}'.format(self.__readgroup_gsnap, readgroup)
if sample != '':
self.__sample_gsnap = '{0}{1}'.format(self.__sample_gsnap, sample)
if lib != '':
self.__libray_gsnap = '{0}{1}'.format(self.__libray_gsnap, lib)
if platform != '':
self.__platform_gsnap = '{0}{1}'.format(self.__platform_gsnap, platform)
'''
check if the files are compressed or not
'''
def set_compressed(self):
if len(self._inputfiles) == sum([1 for i in self._inputfiles if i.zipped]):
self.__compressed = '--gunzip'
elif sum([1 for i in self._inputfiles if i.zipped]) == 0:
self.__compressed = ''
else:
self.show_log('error', "{0}.{1} - input files are zipped and un-zipped. only one format possible".format(self._classname, getframe().f_code.co_name))
exit(2)
def set_dna_rna(self):
if self.__transcript != '':
self.__rnadna_string = '-s {0}'.format(self.__transcript)
else:
self.__rnadna_string = ''
#@DeprecationWarning
#out of support; new version of featourecounts support sorted bam file
# def build_convertline(self, optionlist, sortoptionlist):
# convertline = []
# samline = []
#
# if isinstance(self._cores, ListType) or isinstance(self._cores, TupleType):
# cpu = '{0}'.format(min((self._cores[0], self._cores[1], 4)))
# elif isinstance(self._cores, IntType):
# cpu = '{0}'.format(min((4, self._cores)))
#
# if self._with_sgemodule:
# samtools = 'samtools'
# samsort = 'sam_sort'
# else:
# samtools = Information.SAMTOOLS
# samsort = Information.GSNAP_SORT
#
# if '--force-single-end' in optionlist:
# if self.__split:
# pass
# else:
# convertline.append(samsort)
# convertline.extend(sortoptionlist)
# convertline.extend(('-D', self.__refpath, '-d', self.__ref, self._outputfiles[0].completepath, '|'))
# convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-'))
# samline.extend((samtools, 'index', self._outputfiles[1].completepath, '&&', 'rm', self._outputfiles[0].completepath))
# elif len(self._inputfiles) > 2:
# if self.__split:
# pass
# else:
# convertline.append(samsort)
# convertline.extend(sortoptionlist)
# convertline.extend(('-D', self.__refpath, '-d', self.__ref, self._outputfiles[0].completepath, '|'))
# convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-'))
# samline.extend((samtools, 'index', self._outputfiles[1].completepath, '&&', 'rm', self._outputfiles[0].completepath))
# elif len(self._inputfiles) == 2:
# if self.__split:
# pass
# else:
# if Information.GENECOUNT in ('yes', 'y', '1'):
# convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-', '&&', 'rm', self._outputfiles[0].completepath))
# # convertline.extend((Information.JAVAPATH, Information.JAR, Information.GSNAP_TRANSLOCATION_HELPER, 'I=', self._outputfiles[0].completepath, 'ON=', self._outputfiles[1].completepath, 'OT=', self._outputfiles[2].completepath, 'SO=', 'unsorted'))
# # convertline.extend(('&&', 'rm', self._outputfiles[0].completepath))
# else:
# # convertline.extend((Information.JAVAPATH, Information.JAR, Information.GSNAP_TRANSLOCATION_HELPER, 'I=', self._outputfiles[0].completepath, 'ON=', self._outputfiles[1].completepath, 'OT=', self._outputfiles[2].completepath, 'SO=', 'coordinate'))
# # convertline.extend(('&&', 'rm', self._outputfiles[0].completepath))
# # MIGHT WANNA SWITCH BACK TO THAT
# convertline.append(samsort)
# convertline.extend(sortoptionlist)
# convertline.extend(('-D', self.__refpath, '-d', self.__ref, self._outputfiles[0].completepath, '|'))
# convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-'))
# samline.extend((samtools, 'index', self._outputfiles[1].completepath, '&&', 'rm', self._outputfiles[0].completepath))
# else:
# if self.__split:
# pass
# else:
# convertline.append(samsort)
# convertline.extend(sortoptionlist)
# convertline.extend(('-D', self.__refpath, '-d', self.__ref, self._outputfiles[0].completepath, '|'))
# convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-'))
# samline.extend((samtools, 'index', self._outputfiles[1].completepath, '&&', 'rm', self._outputfiles[0].completepath))
#
# convertline = ' '.join([i for i in convertline if i != ''])
# samline = ' '.join([i for i in samline if i != ''])
#
# self._convertline = '\n'.join([i for i in (convertline, samline) if i != ''])
def build_convertline(self, sortoptionlist):
convertline = []
samline = []
if isinstance(self._cores, ListType) or isinstance(self._cores, TupleType):
cpu = '{0}'.format(min((self._cores[0], self._cores[1], 4)))
elif isinstance(self._cores, IntType):
cpu = '{0}'.format(min((4, self._cores)))
if self._with_sgemodule:
samtools = 'samtools'
samsort = 'sam_sort'
else:
samtools = Information.SAMTOOLS
samsort = Information.GSNAP_SORT
if self.__split:
pass
else:
convertline.append(samsort)
convertline.extend(sortoptionlist)
convertline.extend(('-D', self.__refpath, '-d', self.__ref, self._outputfiles[0].completepath, '|'))
convertline.extend((samtools, 'view', '-bh', '-@', cpu, '-o', self._outputfiles[1].completepath, '-'))
samline.extend((samtools, 'index', self._outputfiles[1].completepath, '&&', 'rm', self._outputfiles[0].completepath))
convertline = ' '.join([i for i in convertline if i != ''])
samline = ' '.join([i for i in samline if i != ''])
self._convertline = '\n'.join([i for i in (convertline, samline) if i != ''])
def check_optionlist(self, optionlist):
temp = ''
if '--force-single-end' in optionlist:
self.show_log('info', "{0}.{1} - input files treated as single end".format(self._classname, getframe().f_code.co_name, len(self._inputfiles)))
elif len(self._inputfiles) > 2:
self.show_log('info', "{0}.{1} - more than two input files. files are treated as single end".format(self._classname, getframe().f_code.co_name, len(self._inputfiles)))
self.show_log('info', "{0}.{1} - adding '--force-single-end' option".format(self._classname, getframe().f_code.co_name, len(self._inputfiles)))
temp = '--force-single-end'
elif len(self._inputfiles) == 2:
r1 = [i for i in self._inputfiles if i.read == ['R1']]
r2 = [i for i in self._inputfiles if i.read == ['R2']]
if len(r1) == 1 and len(r2) == 1 and not self.inputfiles[0].read == ['R1']:
self._inputfiles[0] = r1[0]
self._inputfiles[1] = r2[0]
self.show_log('info', "{0}.{1} - change order of inputfiles".format(self._classname, getframe().f_code.co_name))
elif len(r1) == 1 and len(r2) == 1 and self.inputfiles[0].read == ['R1']:
self.show_log('debug', "{0}.{1} - read direction correct".format(self._classname, getframe().f_code.co_name))
else:
self.show_log('info', "{0}.{1} - read direction can't be defined. order of inputfiles remains untouched".format(self._classname, getframe().f_code.co_name))
self.show_log('info', "{0}.{1} - adding '--allow-pe-name-mismatch' option".format(self._classname, getframe().f_code.co_name))
temp = '--allow-pe-name-mismatch'
return temp
def build_commandline(self, optionlist=[]):
commandline = []
if self._with_sgemodule:
commandline.append('gsnap')
else:
commandline.append(self._progpath)
commandline.extend(['--use-sarray=1', '-B 4'])
commandline.append(self.__compressed)
commandline.extend(['-D', self.__refpath, '-d', self.__ref])
commandline.extend(['-t', self.get_cpucommandcall()])
commandline.append(self.__rnadna_string)
commandline.extend(optionlist)
if self.__readgroup_gsnap[-1] != '=':
commandline.append(self.__readgroup_gsnap)
if self.__libray_gsnap[-1] != '=':
commandline.append(self.__libray_gsnap)
if self.__sample_gsnap[-1] != '=':
commandline.append(self.__sample_gsnap)
if self.__platform_gsnap[-1] != '=':
commandline.append(self.__platform_gsnap)
commandline.append(self.check_optionlist(optionlist))
commandline.extend(('-A', 'sam'))
commandline.extend([i.completepath for i in self._inputfiles])
commandline.append('>')
commandline.append(self._outputfiles[0].completepath)
commandline.append('\n')
commandline = [i for i in commandline if i != '']
self._commandline = ' '.join(commandline)
def build_outputfiles(self, optionlist = []):
ext = 'gsnap'
inputext = '-'.join(sorted(set([i.ext for i in self._inputfiles])))
if sum([1 for i in self._inputfiles if i.use]) == len(self._inputfiles):
purefilename = self.get_standard_filename()
else:
purefilename = sorted(set([i.rg for i in self._inputfiles]))
# samfile write to tmp
if inputext != '':
filename = '{0}{1}.{2}.{3}.sam'.format(self._tmpdir, purefilename, inputext, ext)
else:
filename = '{0}{1}.{2}.sam'.format(self._tmpdir, purefilename, ext)
fileinst = Fileclass(filename, self._log, False)
fileinst.ext = ext
fileinst.zipped = False
fileinst.sortorder = 'unsorted'
self._outputfiles.append(fileinst)
# normal output bam file
if inputext != '':
filename = '{0}{1}.{2}.{3}.bam'.format(self._moduledir, purefilename, inputext, ext)
else:
filename = '{0}{1}.{2}.bam'.format(self._moduledir, purefilename, ext)
fileinst = Fileclass(filename, self._log, False)
fileinst.ext = ext
fileinst.zipped = False
# old modell, new featurecounts can count on sorted bam files
# if len(self._inputfiles) == 2 and not '--force-single-end' in optionlist and Information.GENECOUNT in ('yes', 'y', '1'):
# fileinst.sortorder = 'unsorted'
# else:
# fileinst.sortorder = 'coordinate'
fileinst.sortorder = 'coordinate'
self._outputfiles.append(fileinst)
# translocation file for pe
# if inputext != '':
# filename = '{0}{1}.{2}.{3}.trans.bam'.format(self._moduledir, purefilename, inputext, ext)
# else:
# filename = '{0}{1}.{2}.trans.bam'.format(self._moduledir, purefilename, ext)
# fileinst = Fileclass(filename, self._log, False)
# fileinst.ext = ext
# fileinst.zipped = False
# fileinst.sortorder = 'coordinate'
# self._outputfiles.append(fileinst)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import codecs
import logging
import multiprocessing
import os
import re
import sys
import threading
import time
import traceback
from logging.handlers import BaseRotatingHandler, _MIDNIGHT
from stat import ST_MTIME
def register_handler(logger=None):
"""Wraps the handlers in the given Logger with an MultiProcHandler.
:param logger: whose handlers to wrap. By default, the root logger.
"""
if logger is None:
logger = logging.getLogger()
for i, orig_handler in enumerate(list(logger.handlers)):
handler = MultiProcHandler(
'mp-handler-{0}'.format(i), sub_handler=orig_handler
)
logger.removeHandler(orig_handler)
logger.addHandler(handler)
class MultiProcHandler(logging.Handler):
def __init__(self, name, sub_handler=None):
super(MultiProcHandler, self).__init__()
if sub_handler is None:
sub_handler = logging.StreamHandler()
self.sub_handler = sub_handler
self.setLevel(self.sub_handler.level)
self.setFormatter(self.sub_handler.formatter)
self.queue = multiprocessing.Queue(-1)
self._is_closed = False
# The thread handles receiving records asynchronously.
self._receive_thread = threading.Thread(target=self._receive, name=name)
self._receive_thread.daemon = True
self._receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcHandler, self).setFormatter(fmt)
self.sub_handler.setFormatter(fmt)
def _receive(self):
while not (self._is_closed and self.queue.empty()):
try:
record = self.queue.get()
self.sub_handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
self.queue.close()
self.queue.join_thread()
def _send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self._send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
if not self._is_closed:
self._is_closed = True
self._receive_thread.join(5.0) # Waits for receive queue to empty.
self.sub_handler.close()
super(MultiProcHandler, self).close()
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the logs file at certain timed
intervals.
If backup_count is > 0, when rollover is done, no more than backup_count
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backup_count=0,
encoding=None, delay=False, utc=False):
self.when = when.upper()
self.backup_count = backup_count
self.utc = utc
self.base_filename = filename
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError(
"You must specify a day for weekly rollover "
"from 0 to 6 (0 is Monday): %s" % self.when
)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError(
"Invalid day specified for weekly rollover: %s" % self.when)
self.day_of_week = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError(
"Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.current_filename = self._compute_fn()
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rollover_time = self.compute_rollover(t)
def compute_rollover(self, current_time):
"""
Work out the rollover time based on the specified time.
"""
result = current_time + self.interval
# If we are rolling over at midnight or weekly, then the interval
# is already known. What we need to figure out is WHEN the next
# interval is. In other words, if you are rolling over at midnight,
# then your base interval is 1 day, but you want to start that one day
# clock at midnight, not now. So, we have to fudge the rollover_time
# value in order to trigger the first rollover at the right time.
# After that, the regular interval will take care of the rest.
# Note that this code doesn't care about leap seconds.
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(current_time)
else:
t = time.localtime(current_time)
current_hour = t[3]
current_minute = t[4]
current_second = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - (
(current_hour * 60 + current_minute) * 60 + current_second)
result = current_time + r
# If we are rolling over on a certain day, add in the number of
# days until the next rollover, but offset by 1 since we just
# calculated the time until the next day starts. There are three
# cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e.,
# today is day 2 (Wednesday) and rollover is on
# day 6 (Sunday). Days to next rollover is simply
# 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e.,
# today is day 5 (Saturday) and rollover is on day 3
# (Thursday). Days to rollover is 6 - 5 + 3, or 4.
# In this case, it's the number of days left in the current
# week (1) plus the number of days in the next week until
# the rollover day (3).
# The calculations described in 2) and 3) above need to have a day
# added. This is because the above time calculation takes us to
# midnight on this day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.day_of_week:
if day < self.day_of_week:
daysToWait = self.day_of_week - day
else:
daysToWait = 6 - day + self.day_of_week + 1
new_rollover_time = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dest_now = t[-1]
dest_rollover_time = time.localtime(new_rollover_time)[
-1]
if dest_now != dest_rollover_time:
if not dest_now:
addend = -3600
else:
addend = 3600
new_rollover_time += addend
result = new_rollover_time
return result
def should_rollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
if self.current_filename != self._compute_fn():
return True
# print "No need to rollover: %d, %d" % (t, self.rollover_time)
return 0
def _compute_fn(self):
return self.base_filename + '.' + \
time.strftime(self.suffix, time.localtime())
def get_files_to_delete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dir_name, base_name = os.path.split(self.base_filename)
file_names = os.listdir(dir_name)
result = []
prefix = base_name + "."
plen = len(prefix)
for fileName in file_names:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dir_name, fileName))
result.sort()
if len(result) < self.backup_count:
result = []
else:
result = result[:len(result) - self.backup_count]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the
filename when the rollover happens. However, you want the file to be
named for the start of the interval, not the current time.
If there is a backup count, then we have to get a list of matching
filenames, sort them and remove the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
current_time = int(time.time())
dest_now = time.localtime(current_time)[-1]
t = self.rollover_time - self.interval
if self.utc:
time_tuple = time.gmtime(t)
else:
time_tuple = time.localtime(t)
dstThen = time_tuple[-1]
if dest_now != dstThen:
if dest_now:
addend = 3600
else:
addend = -3600
time_tuple = time.localtime(t + addend)
self.current_filename = self._compute_fn()
if self.backup_count > 0:
for s in self.get_files_to_delete():
os.remove(s)
# print "%s -> %s" % (self.base_filename, dfn)
self.stream = self._open()
new_rollover_time = self.compute_rollover(current_time)
while new_rollover_time <= current_time:
new_rollover_time = new_rollover_time + self.interval
# If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith(
'W')) and not self.utc:
dest_rollover_time = time.localtime(new_rollover_time)[-1]
if dest_now != dest_rollover_time:
if not dest_now:
addend = -3600
else:
addend = 3600
new_rollover_time += addend
self.rollover_time = new_rollover_time
def _open(self):
if self.encoding is None:
stream = open(self.current_filename, self.mode)
else:
stream = codecs.open(self.current_filename, self.mode,
self.encoding)
if os.path.exists(self.base_filename):
try:
os.remove(self.base_filename)
except OSError:
pass
try:
os.symlink(self.current_filename, self.base_filename)
except OSError:
pass
return stream
| |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from textwrap import dedent
import pytest
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.register import rules as python_rules
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.core.goals.publish import (
Publish,
PublishFieldSet,
PublishPackages,
PublishProcesses,
PublishRequest,
rules,
)
from pants.core.register import rules as core_rules
from pants.engine.process import InteractiveProcess
from pants.engine.rules import rule
from pants.engine.target import StringSequenceField
from pants.testutil.rule_runner import RuleRunner
class MockRepositoriesField(StringSequenceField):
alias = "repositories"
@dataclass(frozen=True)
class MockPublishRequest(PublishRequest):
pass
@dataclass(frozen=True)
class PublishTestFieldSet(PublishFieldSet):
publish_request_type = MockPublishRequest
required_fields = (MockRepositoriesField,)
repositories: MockRepositoriesField
@rule
async def mock_publish(request: MockPublishRequest) -> PublishProcesses:
if not request.field_set.repositories.value:
return PublishProcesses()
return PublishProcesses(
PublishPackages(
names=tuple(
artifact.relpath
for pkg in request.packages
for artifact in pkg.artifacts
if artifact.relpath
),
process=None if repo == "skip" else InteractiveProcess(["echo", repo]),
description="(requested)" if repo == "skip" else repo,
)
for repo in request.field_set.repositories.value
)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*core_rules(),
*python_rules(),
*rules(),
mock_publish,
PythonDistribution.register_plugin_field(MockRepositoriesField),
*PublishTestFieldSet.rules(),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
def test_noop(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "Nothing published." in result.stderr
def test_skipped_publish(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["skip"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz skipped (requested)." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl skipped (requested)." in result.stderr
def test_structured_output(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["skip"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=(
"--output=published.json",
"src:dist",
),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz skipped (requested)." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl skipped (requested)." in result.stderr
expected = [
{
"names": [
"my-package-0.1.0.tar.gz",
"my_package-0.1.0-py3-none-any.whl",
],
"published": False,
"status": "skipped (requested)",
"target": "src:dist",
},
]
with open("published.json") as fd:
data = json.load(fd)
assert data == expected
@pytest.mark.skip("Can not run interactive process from test..?")
def test_mocked_publish(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/BUILD": dedent(
"""\
python_sources()
python_distribution(
name="dist",
provides=python_artifact(
name="my-package",
version="0.1.0",
),
repositories=["mocked-repo"],
)
"""
),
}
)
result = rule_runner.run_goal_rule(
Publish,
args=("src:dist",),
env_inherit={"HOME", "PATH", "PYENV_ROOT"},
)
assert result.exit_code == 0
assert "my-package-0.1.0.tar.gz published." in result.stderr
assert "my_package-0.1.0-py3-none-any.whl published." in result.stderr
assert "mocked-repo" in result.stdout
| |
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
from dateutil.parser import parse as dateparse
import httplib as http
import logging
from flask import request, make_response
from framework.exceptions import HTTPError
from addons.base import generic_views
from addons.github.api import GitHubClient, ref_to_params
from addons.github.exceptions import NotFoundError, GitHubError
from addons.github.serializer import GitHubSerializer
from addons.github.utils import (
get_refs, check_permissions,
verify_hook_signature, MESSAGES
)
from website.models import NodeLog
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public, must_be_valid_project,
)
from website.util import rubeus
logger = logging.getLogger(__name__)
logging.getLogger('github3').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
SHORT_NAME = 'github'
FULL_NAME = 'GitHub'
############
# Generics #
############
github_account_list = generic_views.account_list(
SHORT_NAME,
GitHubSerializer
)
github_import_auth = generic_views.import_auth(
SHORT_NAME,
GitHubSerializer
)
def _get_folders(node_addon, folder_id):
pass
github_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
github_get_config = generic_views.get_config(
SHORT_NAME,
GitHubSerializer
)
github_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
github_root_folder = generic_views.root_folder(
SHORT_NAME
)
#################
# Special Cased #
#################
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_set_config(auth, **kwargs):
node_settings = kwargs.get('node_addon', None)
node = kwargs.get('node', None)
user_settings = kwargs.get('user_addon', None)
try:
if not node:
node = node_settings.owner
if not user_settings:
user_settings = node_settings.user_settings
except AttributeError:
raise HTTPError(http.BAD_REQUEST)
# Parse request
github_user_name = request.json.get('github_user', '')
github_repo_name = request.json.get('github_repo', '')
if not github_user_name or not github_repo_name:
raise HTTPError(http.BAD_REQUEST)
# Verify that repo exists and that user can access
connection = GitHubClient(external_account=node_settings.external_account)
repo = connection.repo(github_user_name, github_repo_name)
if repo is None:
if user_settings:
message = (
'Cannot access repo. Either the repo does not exist '
'or your account does not have permission to view it.'
)
else:
message = (
'Cannot access repo.'
)
return {'message': message}, http.BAD_REQUEST
changed = (
github_user_name != node_settings.user or
github_repo_name != node_settings.repo
)
# Update hooks
if changed:
# Delete existing hook, if any
node_settings.delete_hook()
# Update node settings
node_settings.user = github_user_name
node_settings.repo = github_repo_name
# Log repo select
node.add_log(
action='github_repo_linked',
params={
'project': node.parent_id,
'node': node._id,
'github': {
'user': github_user_name,
'repo': github_repo_name,
}
},
auth=auth,
)
# Add new hook
if node_settings.user and node_settings.repo:
node_settings.add_hook(save=False)
node_settings.save()
return {}
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_download_starball(node_addon, **kwargs):
archive = kwargs.get('archive', 'tar')
ref = request.args.get('sha', 'master')
connection = GitHubClient(external_account=node_addon.external_account)
headers, data = connection.starball(
node_addon.user, node_addon.repo, archive, ref
)
resp = make_response(data)
for key, value in headers.iteritems():
resp.headers[key] = value
return resp
#########
# HGrid #
#########
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_root_folder(*args, **kwargs):
"""View function returning the root container for a GitHub repo. In
contrast to other add-ons, this is exposed via the API for GitHub to
accommodate switching between branches and commits.
"""
node_settings = kwargs['node_addon']
auth = kwargs['auth']
data = request.args.to_dict()
return github_hgrid_data(node_settings, auth=auth, **data)
def github_hgrid_data(node_settings, auth, **kwargs):
# Quit if no repo linked
if not node_settings.complete:
return
connection = GitHubClient(external_account=node_settings.external_account)
# Initialize repo here in the event that it is set in the privacy check
# below. This potentially saves an API call in _check_permissions, below.
repo = None
# Quit if privacy mismatch and not contributor
node = node_settings.owner
if node.is_public and not node.is_contributor(auth.user):
try:
repo = connection.repo(node_settings.user, node_settings.repo)
except NotFoundError:
# TODO: Test me @jmcarp
# TODO: Add warning message
logger.error('Could not access GitHub repo')
return None
if repo.private:
return None
try:
branch, sha, branches = get_refs(
node_settings,
branch=kwargs.get('branch'),
sha=kwargs.get('sha'),
connection=connection,
)
except (NotFoundError, GitHubError):
# TODO: Show an alert or change GitHub configuration?
logger.error('GitHub repo not found')
return
if branch is not None:
ref = ref_to_params(branch, sha)
can_edit = check_permissions(
node_settings, auth, connection, branch, sha, repo=repo,
)
else:
ref = None
can_edit = False
name_tpl = '{user}/{repo}'.format(
user=node_settings.user, repo=node_settings.repo
)
permissions = {
'edit': can_edit,
'view': True,
'private': node_settings.is_private
}
urls = {
'upload': node_settings.owner.api_url + 'github/file/' + (ref or ''),
'fetch': node_settings.owner.api_url + 'github/hgrid/' + (ref or ''),
'branch': node_settings.owner.api_url + 'github/hgrid/root/',
'zip': node_settings.owner.api_url + 'github/zipball/' + (ref or ''),
'repo': 'https://github.com/{0}/{1}/tree/{2}'.format(node_settings.user, node_settings.repo, branch)
}
branch_names = [each.name for each in branches]
if not branch_names:
branch_names = [branch] # if repo un-init-ed then still add default branch to list of branches
return [rubeus.build_addon_root(
node_settings,
name_tpl,
urls=urls,
permissions=permissions,
branches=branch_names,
defaultBranch=branch,
private_key=kwargs.get('view_only', None),
)]
#########
# Repos #
#########
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_create_repo(**kwargs):
repo_name = request.json.get('name')
if not repo_name:
raise HTTPError(http.BAD_REQUEST)
node_settings = kwargs['node_addon']
connection = GitHubClient(external_account=node_settings.external_account)
try:
repo = connection.create_repo(repo_name, auto_init=True)
except GitHubError:
# TODO: Check status code
raise HTTPError(http.BAD_REQUEST)
return {
'user': repo.owner.login,
'repo': repo.name,
}
#########
# Hooks #
#########
# TODO: Refactor using NodeLogger
def add_hook_log(node, github, action, path, date, committer, include_urls=False,
sha=None, save=False):
"""Add log event for commit from webhook payload.
:param node: Node to add logs to
:param github: GitHub node settings record
:param path: Path to file
:param date: Date of commit
:param committer: Committer name
:param include_urls: Include URLs in `params`
:param sha: SHA of updated file
:param save: Save changes
"""
github_data = {
'user': github.user,
'repo': github.repo,
}
urls = {}
if include_urls:
# TODO: Move to helper function
url = node.web_url_for('addon_view_or_download_file', path=path, provider=SHORT_NAME)
urls = {
'view': '{0}?ref={1}'.format(url, sha),
'download': '{0}?action=download&ref={1}'.format(url, sha)
}
node.add_log(
action=action,
params={
'project': node.parent_id,
'node': node._id,
'path': path,
'github': github_data,
'urls': urls,
},
auth=None,
foreign_user=committer,
log_date=date,
save=save,
)
@must_be_valid_project
@must_not_be_registration
@must_have_addon('github', 'node')
def github_hook_callback(node_addon, **kwargs):
"""Add logs for commits from outside OSF.
"""
if request.json is None:
return {}
# Fail if hook signature is invalid
verify_hook_signature(
node_addon,
request.data,
request.headers,
)
node = kwargs['node'] or kwargs['project']
payload = request.json
for commit in payload.get('commits', []):
# TODO: Look up OSF user by commit
# Skip if pushed by OSF
if commit['message'] and commit['message'] in MESSAGES.values():
continue
_id = commit['id']
date = dateparse(commit['timestamp'])
committer = commit['committer']['name']
# Add logs
for path in commit.get('added', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_ADDED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('modified', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_UPDATED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('removed', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_REMOVED,
path, date, committer,
)
node.save()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import re
import time
import platform
__author__ = 'Libao Jin'
__create_date__ = '01/13/2017'
__last_update_date__ = '02/07/2018'
__copyright__ = "Copyright (c) 2018 Libao Jin"
__license__ = "MIT"
__version__ = "1.5.1"
__maintainer__ = "Libao Jin"
__email__ = "jinlibao@outlook.com"
__status__ = "Complete"
class Compiler():
'''Compile pandoc file to PDF, M$ Word documents etc.'''
folder = '.'
metadata = ''
filename = ''
source_file_body = 'body.tex'
source_file_main = 'main.tex'
output_file_main = 'main.pdf'
title = ''
last_name = 'Libao'
first_name = 'Jin'
email = 'ljin1@uwyo.edu'
author = r'{0} {1} (\\url{{{2}}})'.format(last_name, first_name, email)
date = '01/01/2017'
platform = ''
def __init__(self):
'''Initialization of class compile'''
self.folder = '.'
self.metadata = [('', '', '', '')]
self.platform = platform.system()
def get_metadata(self):
'''Get information from the folder structure and extract course information, and etc.'''
folder = self.folder
pathname = os.path.realpath(folder)
print(pathname)
print()
if self.platform == 'Windows':
string = r'([\w.-]+)\\([\w\d\s.-]+)\\LaTeX'
else:
string = '([\w.-]+)/([\w\d\s.-]+)/LaTeX'
pattern = re.compile(string)
self.metadata = re.findall(pattern, pathname)
print(self.metadata)
def generate_filename(self):
'''Generate filename for output file.'''
metadata = self.metadata[0]
print(metadata)
book = metadata[0]
doc_type = metadata[1].replace(' ', '.')
self.filename = '{0}.{1}_{2}.{3}.pdf'.format(book, doc_type, self.last_name, self.first_name)
def generate_title(self):
'''Generate title for the article/document.'''
metadata = self.metadata[0]
book = metadata[0].replace('.', ' ')
doc_type = metadata[1].replace('.', ' ')
self.title = '{0} {1}'.format(book, doc_type)
print(self.title)
def update_date(self):
t = time.localtime()
day = str(t.tm_mday).zfill(2)
month = str(t.tm_mon).zfill(2)
year = str(t.tm_year).zfill(4)
self.date = '{0}/{1}/{2}'.format(month, day, year)
def update_author_1(self):
'''Update author information in the source file to be compiled.'''
source_file = self.source_file_main
author = self.author
f = open(source_file, 'r')
content = f.read()
string = r'\\author{[\w\d\s]*}'
p = re.compile(string)
content = p.sub(r'\\author{{{0}}}'.format(author), content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def update_title(self):
'''Update title in the source file to be compiled.'''
source_file = self.source_file_main
title = self.title
f = open(source_file, 'r')
content = f.read()
string = r'\\title{[\w\d\s.-]*}'
p = re.compile(string)
content = p.sub(r'\\title{{{0}}}'.format(title), content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def heading_style_0(self):
'''Change heading style to not numberred heading.'''
source_file = self.source_file_body
f = open(source_file, 'r')
content = f.read()
string = r'\\section'
p = re.compile(string)
content = p.sub(r'\\textbf', content, count=1)
content = p.sub(r'\n\\textbf', content)
string = r'}\\label{[\w\d-]+}\n'
p = re.compile(string)
content = p.sub('.}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def heading_style_1(self):
'''Change heading style to not numberred heading.'''
source_file = self.source_file_body
f = open(source_file, 'r')
content = f.read()
string = r'\\section'
p = re.compile(string)
content = p.sub(r'\\textbf', content, count=1)
content = p.sub(r'\\newpage\n\\textbf', content)
string = r'}\\label{[\w\d-]+}\n'
p = re.compile(string)
content = p.sub('.}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def update_package(self, option):
'''Update title in the source file to be compiled.'''
source_file = self.source_file_main
f = open(source_file, 'r')
content = f.read()
if option == 'p':
string = r'^\\usepackage{fontspec}'
p = re.compile(string, re.MULTILINE)
content = p.sub(r'% \\usepackage{fontspec}', content)
string = r'^\\setmonofont\[Scale=0.8\]{Monaco}'
p = re.compile(string, re.MULTILINE)
content = p.sub(r'% \\setmonofont[Scale=0.8]{Monaco}', content)
elif option == 'x':
string = r'[% ]*\\usepackage{fontspec}'
p = re.compile(string)
content = p.sub(r'\\usepackage{fontspec}', content)
string = r'[% ]*\\setmonofont\[Scale=0.8\]{Monaco}'
p = re.compile(string)
content = p.sub(r'\\setmonofont[Scale=0.8]{Monaco}', content)
f.close()
f = open(source_file, 'w')
f.write(content)
f.close()
def compile_pdflatex(self):
'''Compile files by calling pandoc, pdflatex and rm commands to keep the file structure organized.'''
if self.platform == 'Windows':
path = '..\\' + self.filename
else:
path = '../' + self.filename
if os.path.exists(path):
os.remove(path)
if self.platform == 'Windows':
os.system('pdflatex -quiet {0}'.format(self.source_file_main))
os.system('pdflatex -quiet {0}'.format(self.source_file_main))
os.system('del *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
else:
os.system('pdflatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('pdflatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('rm *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
def compile_xelatex(self):
'''Compile files by calling pandoc, pdflatex and rm commands to keep the file structure organized.'''
if self.platform == 'Windows':
path = '..\\' + self.filename
else:
path = '../' + self.filename
if os.path.exists(path):
os.remove(path)
if self.platform == 'Windows':
os.system('xelatex -quiet {0}'.format(self.source_file_main))
os.system('xelatex -quiet {0}'.format(self.source_file_main))
os.system('del *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
else:
os.system('xelatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('xelatex -interaction=batchmode {0}'.format(self.source_file_main))
os.system('rm *.log *.aux *.idx *.out *.toc *~')
os.rename('{0}'.format(self.output_file_main), path)
def generate_source_file_body(self):
'''Generate source file body.tex from body.pdc by using pandoc'''
os.system('pandoc -f markdown -o body.tex body.pdc')
def run(self):
'''By a series commands to compile the tex file and clean up the unnecessary files.'''
self.get_metadata()
self.generate_filename()
self.generate_title()
self.generate_source_file_body()
if len(sys.argv) == 1:
print('Heading Style: Normal.')
self.update_author_1()
elif sys.argv[1] == '0':
print('Heading Style: Boldface.')
self.heading_style_0()
self.update_author_1()
elif sys.argv[1] == '1':
print('Heading Style: Boldface.')
self.heading_style_1()
self.update_author_1()
else:
print('Error.')
self.update_title()
if len(sys.argv) <= 2:
self.update_package('x')
self.compile_xelatex()
self.update_package('p')
elif sys.argv[2] == 'p':
self.compile_pdflatex()
self.update_package('p')
elif sys.argv[2] == 'x':
self.update_package('x')
self.compile_xelatex()
self.update_package('p')
if __name__ == '__main__':
compiler = Compiler()
compiler.run()
| |
#!/usr/bin/env python
# Author: Ryan Myers
# Models: Jeff Styers, Reagan Heller
#
# Last Updated: 2015-03-13
#
# This tutorial provides an example of creating a character
# and having it walk around on uneven terrain, as well
# as implementing a fully rotatable camera.
from direct.showbase.ShowBase import ShowBase
from panda3d.core import CollisionTraverser, CollisionNode
from panda3d.core import CollisionHandlerQueue, CollisionRay
from panda3d.core import Filename, AmbientLight, DirectionalLight
from panda3d.core import PandaNode, NodePath, Camera, TextNode
from panda3d.core import CollideMask
from direct.gui.OnscreenText import OnscreenText
from direct.actor.Actor import Actor
import random
import sys
import os
import math
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft)
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.07,
parent=base.a2dBottomRight, align=TextNode.ARight,
pos=(-0.1, 0.09), shadow=(0, 0, 0, 1))
class RoamingRalphDemo(ShowBase):
def __init__(self):
# Set up the window, camera, etc.
ShowBase.__init__(self)
# Set the background color to black
self.win.setClearColor((0, 0, 0, 1))
# This is used to store which keys are currently pressed.
self.keyMap = {
"left": 0, "right": 0, "forward": 0, "cam-left": 0, "cam-right": 0}
# Post the instructions
self.title = addTitle(
"Panda3D Tutorial: Roaming Ralph (Walking on Uneven Terrain)")
self.inst1 = addInstructions(0.06, "[ESC]: Quit")
self.inst2 = addInstructions(0.12, "[Left Arrow]: Rotate Ralph Left")
self.inst3 = addInstructions(0.18, "[Right Arrow]: Rotate Ralph Right")
self.inst4 = addInstructions(0.24, "[Up Arrow]: Run Ralph Forward")
self.inst6 = addInstructions(0.30, "[A]: Rotate Camera Left")
self.inst7 = addInstructions(0.36, "[S]: Rotate Camera Right")
# Set up the environment
#
# This environment model contains collision meshes. If you look
# in the egg file, you will see the following:
#
# <Collide> { Polyset keep descend }
#
# This tag causes the following mesh to be converted to a collision
# mesh -- a mesh which is optimized for collision, not rendering.
# It also keeps the original mesh, so there are now two copies ---
# one optimized for rendering, one for collisions.
self.environ = loader.loadModel("models/world")
self.environ.reparentTo(render)
# Create the main character, Ralph
ralphStartPos = self.environ.find("**/start_point").getPos()
self.ralph = Actor("models/ralph",
{"run": "models/ralph-run",
"walk": "models/ralph-walk"})
self.ralph.reparentTo(render)
self.ralph.setScale(.2)
self.ralph.setPos(ralphStartPos + (0, 0, 0.5))
# Create a floater object, which floats 2 units above ralph. We
# use this as a target for the camera to look at.
self.floater = NodePath(PandaNode("floater"))
self.floater.reparentTo(self.ralph)
self.floater.setZ(2.0)
# Accept the control keys for movement and rotation
self.accept("escape", sys.exit)
self.accept("arrow_left", self.setKey, ["left", True])
self.accept("arrow_right", self.setKey, ["right", True])
self.accept("arrow_up", self.setKey, ["forward", True])
self.accept("a", self.setKey, ["cam-left", True])
self.accept("s", self.setKey, ["cam-right", True])
self.accept("arrow_left-up", self.setKey, ["left", False])
self.accept("arrow_right-up", self.setKey, ["right", False])
self.accept("arrow_up-up", self.setKey, ["forward", False])
self.accept("a-up", self.setKey, ["cam-left", False])
self.accept("s-up", self.setKey, ["cam-right", False])
taskMgr.add(self.move, "moveTask")
# Game state variables
self.isMoving = False
# Set up the camera
self.disableMouse()
self.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2)
# We will detect the height of the terrain by creating a collision
# ray and casting it downward toward the terrain. One ray will
# start above ralph's head, and the other will start above the camera.
# A ray may hit the terrain, or it may hit a rock or a tree. If it
# hits the terrain, we can detect the height. If it hits anything
# else, we rule that the move is illegal.
self.cTrav = CollisionTraverser()
self.ralphGroundRay = CollisionRay()
self.ralphGroundRay.setOrigin(0, 0, 9)
self.ralphGroundRay.setDirection(0, 0, -1)
self.ralphGroundCol = CollisionNode('ralphRay')
self.ralphGroundCol.addSolid(self.ralphGroundRay)
self.ralphGroundCol.setFromCollideMask(CollideMask.bit(0))
self.ralphGroundCol.setIntoCollideMask(CollideMask.allOff())
self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
self.ralphGroundHandler = CollisionHandlerQueue()
self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)
self.camGroundRay = CollisionRay()
self.camGroundRay.setOrigin(0, 0, 9)
self.camGroundRay.setDirection(0, 0, -1)
self.camGroundCol = CollisionNode('camRay')
self.camGroundCol.addSolid(self.camGroundRay)
self.camGroundCol.setFromCollideMask(CollideMask.bit(0))
self.camGroundCol.setIntoCollideMask(CollideMask.allOff())
self.camGroundColNp = self.camera.attachNewNode(self.camGroundCol)
self.camGroundHandler = CollisionHandlerQueue()
self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)
# Uncomment this line to see the collision rays
#self.ralphGroundColNp.show()
#self.camGroundColNp.show()
# Uncomment this line to show a visual representation of the
# collisions occuring
#self.cTrav.showCollisions(render)
# Create some lighting
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor((.3, .3, .3, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection((-5, -5, -5))
directionalLight.setColor((1, 1, 1, 1))
directionalLight.setSpecularColor((1, 1, 1, 1))
render.setLight(render.attachNewNode(ambientLight))
render.setLight(render.attachNewNode(directionalLight))
# Records the state of the arrow keys
def setKey(self, key, value):
self.keyMap[key] = value
# Accepts arrow keys to move either the player or the menu cursor,
# Also deals with grid checking and collision detection
def move(self, task):
# Get the time that elapsed since last frame. We multiply this with
# the desired speed in order to find out with which distance to move
# in order to achieve that desired speed.
dt = globalClock.getDt()
# If the camera-left key is pressed, move camera left.
# If the camera-right key is pressed, move camera right.
if self.keyMap["cam-left"]:
self.camera.setX(self.camera, -20 * dt)
if self.keyMap["cam-right"]:
self.camera.setX(self.camera, +20 * dt)
# save ralph's initial position so that we can restore it,
# in case he falls off the map or runs into something.
startpos = self.ralph.getPos()
# If a move-key is pressed, move ralph in the specified direction.
if self.keyMap["left"]:
self.ralph.setH(self.ralph.getH() + 300 * dt)
if self.keyMap["right"]:
self.ralph.setH(self.ralph.getH() - 300 * dt)
if self.keyMap["forward"]:
self.ralph.setY(self.ralph, -25 * dt)
# If ralph is moving, loop the run animation.
# If he is standing still, stop the animation.
if self.keyMap["forward"] or self.keyMap["left"] or self.keyMap["right"]:
if self.isMoving is False:
self.ralph.loop("run")
self.isMoving = True
else:
if self.isMoving:
self.ralph.stop()
self.ralph.pose("walk", 5)
self.isMoving = False
# If the camera is too far from ralph, move it closer.
# If the camera is too close to ralph, move it farther.
camvec = self.ralph.getPos() - self.camera.getPos()
camvec.setZ(0)
camdist = camvec.length()
camvec.normalize()
if camdist > 10.0:
self.camera.setPos(self.camera.getPos() + camvec * (camdist - 10))
camdist = 10.0
if camdist < 5.0:
self.camera.setPos(self.camera.getPos() - camvec * (5 - camdist))
camdist = 5.0
# Normally, we would have to call traverse() to check for collisions.
# However, the class ShowBase that we inherit from has a task to do
# this for us, if we assign a CollisionTraverser to self.cTrav.
#self.cTrav.traverse(render)
# Adjust ralph's Z coordinate. If ralph's ray hit terrain,
# update his Z. If it hit anything else, or didn't hit anything, put
# him back where he was last frame.
entries = list(self.ralphGroundHandler.getEntries())
entries.sort(key=lambda x: x.getSurfacePoint(render).getZ())
if len(entries) > 0 and entries[0].getIntoNode().getName() == "terrain":
self.ralph.setZ(entries[0].getSurfacePoint(render).getZ())
else:
self.ralph.setPos(startpos)
# Keep the camera at one foot above the terrain,
# or two feet above ralph, whichever is greater.
entries = list(self.camGroundHandler.getEntries())
entries.sort(key=lambda x: x.getSurfacePoint(render).getZ())
if len(entries) > 0 and entries[0].getIntoNode().getName() == "terrain":
self.camera.setZ(entries[0].getSurfacePoint(render).getZ() + 1.0)
if self.camera.getZ() < self.ralph.getZ() + 2.0:
self.camera.setZ(self.ralph.getZ() + 2.0)
# The camera should look in ralph's direction,
# but it should also try to stay horizontal, so look at
# a floater which hovers above ralph's head.
self.camera.lookAt(self.floater)
return task.cont
demo = RoamingRalphDemo()
demo.run()
| |
#!/usr/bin/env python
"""
API v1.0 Command and Control (C2) direct access routes:
Enter direct access mode: /c2/instrument/<string:reference_designator>/direct_access/start
Execute direct access command /c2/instrument/<string:reference_designator>/direct_access/execute
Exit direct access mode: /c2/instrument/<string:reference_designator>/direct_access/exit
Get sniffer data from instrument /c2/instrument/<string:reference_designator>/direct_access/sniffer
"""
__author__ = 'Edna Donoughe'
from flask import jsonify, current_app, request
from ooiservices.app.decorators import scope_required
from ooiservices.app.main import api
from ooiservices.app.main.errors import bad_request
from ooiservices.app.main.authentication import auth
from ooiservices.app.main.c2 import _c2_get_instrument_driver_status, uframe_post_instrument_driver_command
from requests.exceptions import ConnectionError, Timeout
import socket as sock
import ast
import json
# Direct Access start.
# todo deprecate 'GET'?
@api.route('/c2/instrument/<string:reference_designator>/direct_access/start', methods=['POST', 'GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_direct_access_start(reference_designator):
""" Start direct access. (when button 'Start Direct' is selected.)
(Transition from 'DRIVER_STATE_COMMAND' to 'DRIVER_STATE_DIRECT_ACCESS'.)
POST Sample:
http://uft21.ooi.rutgers.edu:12572/instrument/api/RS10ENGC-XX00X-00-FLORDD001/start
Command: "DRIVER_EVENT_START_DIRECT"
"""
rd = reference_designator
NOT_NONE = 'NOT_NONE'
state_DRIVER_STATE_COMMAND = 'DRIVER_STATE_COMMAND'
capability_DRIVER_EVENT_START_DIRECT = 'DRIVER_EVENT_START_DIRECT'
target_state = 'DRIVER_STATE_DIRECT_ACCESS'
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare to execute - direct access start command
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate reference_designator
_state, _capabilities, result = direct_access_get_state_and_capabilities(rd)
if _state == target_state:
return jsonify(result)
# Verify _state and _capabilities match expected state and capabilities
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=state_DRIVER_STATE_COMMAND,
expected_capability=capability_DRIVER_EVENT_START_DIRECT)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Execute driver command 'DRIVER_EVENT_START_DIRECT' on upstream server
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Execute driver command
suffix = 'command=%22DRIVER_EVENT_START_DIRECT%22&timeout=60000'
response = uframe_post_instrument_driver_command(reference_designator, 'execute', suffix)
if response.status_code != 200:
message = '(%s) execute %s failed.' % (str(response.status_code), capability_DRIVER_EVENT_START_DIRECT)
if response.content:
message = '(%s) %s' % (str(response.status_code), str(response.content))
raise Exception(message)
# Validate reference_designator
_state, _capabilities, result = direct_access_get_state_and_capabilities(rd)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Final - direct access response final checks for success or failure
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify _state and _capabilities match expected state and capabilities
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=target_state,
expected_capability=NOT_NONE)
return jsonify(result)
except Exception as err:
message = '(%s) exception: %s' % (rd, err.message)
current_app.logger.info(message)
return bad_request(err.message)
# Direct Access execute command.
@api.route('/c2/instrument/<string:reference_designator>/direct_access/execute', methods=['POST'])
@auth.login_required
@scope_required(u'command_control')
def c2_direct_access_execute(reference_designator):
""" Execute direct access command.
While in 'DRIVER_STATE_DIRECT_ACCESS', execute commands sent from direct access terminal window.
Process direct access terminal commands:
Receive content, send to instrument driver.
[Upon receipt of response from instrument, forward response to UI.] Use sniffer.
Note valid commands in direct_access_buttons list:
"direct_access_buttons": [
"Interrupt",
"Print Menu",
"Print Metadata",
"Read Data",
"Restore Factory Defaults",
"Restore Settings",
"Run Settings",
"Run Wiper",
"Save Settings",
"Set Clock>",
"Set Date>",
"Set>"
],
"input_dict": {
"Interrupt": "!!!!!",
"Print Menu": "$mnu\r\n",
"Print Metadata": "$met\r\n",
"Read Data": "$get\r\n",
"Restore Factory Defaults": "$rfd\r\n",
"Restore Settings": "$rls\r\n",
"Run Settings": "$run\r\n",
"Run Wiper": "$mvs\r\n",
"Save Settings": "$sto\r\n",
"Set Clock>": "$clk ",
"Set Date>": "$date \r\n",
"Set>": "set "
},
POST request.data shall provide attribute 'command' or 'command_text':
{
"command": "Print Metadata"
"title": "FLOR"
}
where valid command value is one of items in direct_access_buttons dictionary (key for input_config).
OR
{
"command_text": "$mnu\r\n"
"title": "FLOR"
}
"""
rd = reference_designator
TRIPS = '"""'
NOT_NONE = 'NOT_NONE'
state_DRIVER_STATE_DIRECT_ACCESS = 'DRIVER_STATE_DIRECT_ACCESS'
target_state = state_DRIVER_STATE_DIRECT_ACCESS
try:
command_request = None
command_text = None
command_request_value = None
using_command_request = True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get request data, process required items.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if not request.data:
message = 'Direct access execute command requires request.data for POST.'
raise Exception(message)
# Get request data and process
request_data = json.loads(request.data)
if request_data is None:
message = 'Direct access execute command did not receive request data (%s).' % rd
raise Exception(message)
if 'title' not in request_data:
message = 'Malformed direct access execute command, missing title (%s).' % rd
raise Exception(message)
if ('command' not in request_data) and ('command_text' not in request_data):
message = 'Malformed direct access execute command, missing command or command text (%s).' % rd
raise Exception(message)
# Get title, and command_request or command_text.
title = request_data['title']
if 'command' in request_data:
command_request = request_data['command']
command_text = None
elif 'command_text' in request_data:
command_text = request_data['command_text']
command_request = None
using_command_request = False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify required fields are not None.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if title is None:
message = 'No direct access title data provided for instrument %s.' % rd
raise Exception(message)
if using_command_request:
if command_request is None:
message = 'No direct access command data provided for instrument %s.' % rd
raise Exception(message)
else:
if command_text is None:
message = 'No direct access command_text data provided for instrument %s.' % rd
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare to execute - get state, capabilities and status.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
_state, _capabilities, result = direct_access_get_state_and_capabilities(rd)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify current _state and _capabilities match expected state and capabilities
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=state_DRIVER_STATE_DIRECT_ACCESS,
expected_capability=NOT_NONE)
if using_command_request:
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get valid direct access commands from direct_access_buttons
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
valid_commands = []
if result:
if 'direct_access_buttons' in result:
valid_commands = result['direct_access_buttons']
else:
message = 'Instrument %s missing direct_access_buttons dictionary.' % rd
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify there are valid commands; otherwise error.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if not valid_commands:
message = 'Instrument %s direct_access_buttons list is empty.' % rd
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify command_request from request data is a valid command; otherwise error.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if command_request not in valid_commands:
message = 'Instrument %s command received \'%s\' not in list of available commands.' % \
(rd, command_request)
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify direct_config available; otherwise error.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if 'direct_config' not in result['value']:
message = 'Instrument %s has missing direct access direct_config list.' % rd
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify direct_config is not empty; otherwise error.
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if not result['value']['direct_config']:
message = 'Instrument %s has empty direct access direct_config list.' % rd
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# If direct_config has contents, process list of dictionaries
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ip = None
data = None
eol = None
located_requested_item = False
for direct_config in result['value']['direct_config']:
# Get and check title; if title in dictionary does not match requested title; go to next item.
_title = None
if 'title' in direct_config:
_title = direct_config['title']
if _title != title:
continue
# Identified item in direct_config; process item
located_requested_item = True
# Get and check ip from direct_config dictionary
ip = None
if 'ip' in direct_config:
ip = direct_config['ip']
if ip is None or not ip:
message = 'Instrument %s has invalid ip: \'%r\'.' % (rd, ip)
raise Exception(message)
# Get and check data from direct_config dictionary
data = None
if 'data' in direct_config:
data = direct_config['data']
if data is None or not data:
message = 'Instrument %s has invalid data: \'%r\'.' % (rd, data)
raise Exception(message)
# Get and check eol from direct_config dictionary
eol = None
if 'eol' in direct_config:
eol = direct_config['eol']
if eol is None or not eol:
message = 'Instrument %s has invalid or empty eol: \'%r\'.' % (rd, eol)
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# If processing a command_request, get remaining items for processing
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if using_command_request:
# Verify input_dict is in direct_config
if 'input_dict' not in direct_config:
message = 'Instrument %s has missing direct access input_dict dictionary.' % rd
raise Exception(message)
# Get command_request_values; verify command_request in list and therefore valid.
command_request_values = direct_config['input_dict']
if command_request not in command_request_values:
message = 'Instrument %s direct access command %s not found in direct_config.' % rd
raise Exception(message)
# Get command_request_value from input_dict provided.
command_request_value = command_request_values[command_request]
# Was the requested title located in the direct_config? If not, error.
if not located_requested_item:
message = 'Instrument %s did not have a matching title \'%s\' in direct access direct_config.' % (rd, title)
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare command value to send to instrument.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if using_command_request:
# Using command button value
command_value = command_request_value
else:
# Using command_text, prepare command_value
try:
command_value = ast.literal_eval(TRIPS + command_text + TRIPS)
if eol:
command_value += eol
except Exception as err:
message = 'Exception processing command value (literal_eval): %s' % str(err)
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Execute - direct access command.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
send_command(rd, command_value, ip, data)
except Exception as err:
message = 'Exception processing command: %s' % str(err)
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Final - Verify _state and _capabilities match expected state and capabilities.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=target_state,
expected_capability=NOT_NONE)
return jsonify(result)
except Exception as err:
message = '(%s) exception: %s' % (rd, err.message)
current_app.logger.info(message)
return bad_request(err.message)
# Direct Access exit
@api.route('/c2/instrument/<string:reference_designator>/direct_access/exit', methods=['POST', 'GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_direct_access_exit(reference_designator):
""" Exit direct access. (when button 'Exit Direct' is selected.)
Transition from 'DRIVER_STATE_DIRECT_ACCESS' to 'DRIVER_STATE_COMMAND' (execute 'DRIVER_EVENT_STOP_DIRECT')
"""
rd = reference_designator
NOT_NONE = 'NOT_NONE'
state_DRIVER_STATE_DIRECT_ACCESS = 'DRIVER_STATE_DIRECT_ACCESS'
capability_DRIVER_EVENT_STOP_DIRECT = 'DRIVER_EVENT_STOP_DIRECT'
target_state = 'DRIVER_STATE_COMMAND'
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prepare to execute - direct access start command
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate reference_designator
_state, _capabilities, result = direct_access_get_state_and_capabilities(rd)
# If current state is the same as target state, return status result
if _state == target_state:
return jsonify(result)
# Verify current _state and _capabilities match expected state and capabilities
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=state_DRIVER_STATE_DIRECT_ACCESS,
expected_capability=capability_DRIVER_EVENT_STOP_DIRECT)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Execute driver command 'DRIVER_EVENT_STOP_DIRECT' on upstream server
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
suffix = 'command=%22DRIVER_EVENT_STOP_DIRECT%22&timeout=60000'
# Execute driver command
response = uframe_post_instrument_driver_command(reference_designator, 'execute', suffix)
if response.status_code != 200:
message = '(%s) execute %s failed.' % (str(response.status_code), capability_DRIVER_EVENT_STOP_DIRECT)
if response.content:
message = '(%s) %s' % (str(response.status_code), str(response.content))
raise Exception(message)
# Validate reference_designator
_state, _capabilities, result = direct_access_get_state_and_capabilities(rd)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Final - direct access response final checks for success or failure
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify _state and _capabilities match expected state and capabilities
verify_state_and_capabilities(rd, _state, _capabilities,
expected_state=target_state,
expected_capability=NOT_NONE)
return jsonify(result)
except Exception as err:
message = '(%s) exception: %s' % (rd, err.message)
current_app.logger.info(message)
return bad_request(err.message)
# Direct Access sniffer
@api.route('/c2/instrument/<string:reference_designator>/direct_access/sniffer', methods=['POST', 'GET'])
@auth.login_required
@scope_required(u'command_control')
def c2_direct_access_sniffer(reference_designator):
""" Sniff port/ip/title for data, return data
Sample request:
http://localhost:4000/c2/instrument/RS10ENGC-XX00X-00-FLORDD001/direct_access/sniffer
(using hardcoded message: message = '{"ip": "128.6.240.37", "port": 54366}' )
Sample response:
{
"msg": "3671820966.2507 : PA_HEARTBEAT : CRC OK : 'HB'\n"
}
curl -H "Content-Type: application/json" -X POST --upload-file post_sniff_flord.txt http://localhost:4000/c2/instrument/RS10ENGC-XX00X-00-FLORDD001/direct_access/sniffer
curl -H "Content-Type: application/json" -X POST --upload-file post_sniff_vadcp_1.txt http://localhost:4000/c2/instrument/RS10ENGC-XX00X-00-VADCPA011/direct_access/sniffer
"""
# VADCP
#message = '{"ip": "128.6.240.37", "port": 34868, "title": "Beams 1-4"}'
#message = '{"ip": "128.6.240.37", "port": 48989, "title": "5th Beam"}'
# FLORD
#message = '{"ip": "128.6.240.37", "port": 54366, "title": "FLOR"}'
#message = '{"ip": "128.6.240.37", "port": 54366}'
# {"ip": "128.6.240.37", "port": 54366}
_data = None
rd = reference_designator
required_variables = ['ip', 'port', 'title']
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get request data, process required items.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if not request.data:
message = 'Direct access sniffer requires request.data for POST.'
raise Exception(message)
# Get request data and process
request_data = json.loads(request.data)
if request_data is None:
message = 'Direct access sniffer request data is None. (%s).' % rd
raise Exception(message)
# Verify required items are provided in request.data and not empty
for item in required_variables:
if item not in request_data:
message = 'Malformed direct access sniffer request, missing %s (%s).' % (item, rd)
raise Exception(message)
else:
if not request_data[item] or request_data[item] is None:
message = 'Malformed direct access sniffer request, %s is empty (%s).' % (item, rd)
raise Exception(message)
# Get ip, port and title
ip = request_data['ip']
port = request_data['port']
title = request_data['title']
# Issue request to sniffer process
s = None
try:
s = sock.socket(sock.AF_INET, sock.SOCK_STREAM)
s.connect((ip, port))
_data = None
try:
_data = s.recv(4096)
except Exception:
pass
if s is not None:
s.close()
#if _data:
# if debug: print 'Received: ', repr(_data)
except Exception:
if s is not None:
s.close()
pass
return jsonify(msg=_data)
except Exception as err:
message = '(%s) exception: %s' % (reference_designator, err.message)
current_app.logger.info(message)
return bad_request(err.message)
#==================================================================
# SUPPORTING FUNCTIONS...
#==================================================================
def direct_access_get_state_and_capabilities(reference_designator):
""" Get current state and capabilities information for an instrument.
Overview:
Get instrument status
Get state rom resulting status
Get capabilities from resulting status
Add 'direct_access_buttons' dict to _status
Return state, capabilities and _status
"""
state = None
capabilities = []
try:
# Get instrument status.
try:
_status = _c2_get_instrument_driver_status(reference_designator)
except Exception as err:
message = err.message
raise Exception(message)
if _status is None:
message = 'Instrument (%s) status failed to .' % reference_designator
raise Exception(message)
# Verify state is DRIVER_STATE_COMMAND, otherwise raise exception
if 'value' in _status:
if 'state' in _status['value']:
state = _status['value']['state']
# Verify capabilities
if 'value' in _status:
if 'capabilities' in _status['value']:
capabilities = _status['value']['capabilities'][0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get 'direct_access_buttons' (list of button names for direct access)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
try:
direct_config = None
if _status['value']['direct_config']:
direct_config = _status['value']['direct_config']
temp = {}
if direct_config:
temp = get_direct_access_buttons(direct_config)
_status['direct_access_buttons'] = temp
except Exception:
_status['direct_access_buttons'] = {}
pass
return state, capabilities, _status
except Exception as err:
message = err.message
raise Exception(message)
def verify_state_and_capabilities(reference_designator, state, capabilities, expected_state, expected_capability):
""" Verify current state and capabilities match expected state and capability. Raise if not.
"""
NOT_NONE = 'NOT_NONE'
try:
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify state
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# If expected_state is None, raise exception
if expected_state is None:
message = 'Instrument (%s) not in %s, current state is %s.' % \
(reference_designator, expected_state, state)
raise Exception(message)
# If current state is not the state expected, raise exception
if state != expected_state:
message = 'Instrument (%s) not in %s state, current state is %s.' % \
(reference_designator, expected_state, state)
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Verify capability
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# If not capabilities provided in current state, raise exception
if not capabilities:
message = 'Instrument (%s) did not return capabilities; current state %s.' % \
(reference_designator, state)
raise Exception(message)
# If capabilities expected but unknown use NOT_NONE, there are no capabilities, raise exception
if expected_capability == NOT_NONE:
if not capabilities:
message = 'Instrument (%s) does not have any capabilities; current state %s.' % \
(reference_designator, state)
raise Exception(message)
# If expected capability not provided, raise exception
elif expected_capability not in capabilities:
message = 'Instrument (%s) does not have %s capability; current state %s.' % \
(reference_designator, expected_capability, state)
raise Exception(message)
except Exception:
raise
def send_command(rd, command, ip, data):
""" Send command to rd using ip and data [port]. Sample command: '$met\r\n'
"""
try:
c = sock.socket(sock.AF_INET, sock.SOCK_STREAM)
c.connect((ip, data))
content = command
c.sendall(content)
c.shutdown(sock.SHUT_WR)
c.close()
return
except ConnectionError:
message = 'ConnectionError for direct access during send_command.'
current_app.logger.info(message)
raise Exception(message)
except Timeout:
message = 'Timeout for direct access during send_command.'
current_app.logger.info(message)
raise Exception(message)
except Exception as err:
message = 'Instrument %s exception during send command %s. Error: %s' % (rd, command, str(err))
current_app.logger.info(message)
raise
def get_direct_access_buttons(direct_config):
""" Get READ_ONLY and IMMUTABLE display values for UI from instrument 'parameters' dictionary.
Sample Input:
"direct_config": [
{
"character_delay": 0.0,
"data": 40291,
"eol": "\r\n",
"input_dict": {
"Interrupt": "!!!!!",
"Print Menu": "$mnu\r\n",
"Print Metadata": "$met\r\n",
"Read Data": "$get\r\n",
"Restore Factory Defaults": "$rfd\r\n",
"Restore Settings": "$rls\r\n",
"Run Settings": "$run\r\n",
"Run Wiper": "$mvs\r\n",
"Save Settings": "$sto\r\n",
"Set Clock>": "$clk ",
"Set Date>": "$date \r\n",
"Set>": "set "
},
"ip": "uft20",
"sniffer": 60641,
"title": "FLOR"
}
],
. . .
Sample Output:
['Interrupt', 'Print Menu', 'Print Metadata', 'Read Data', 'Restore Factory Defaults',
'Restore Settings', 'Run Settings', 'Run Wiper', 'Save Settings', 'Set Clock>', 'Set Date>', 'Set>']
. . .
"""
result = []
try:
# If no direct_config, then return empty dict.
if not direct_config:
return result
# If direct_config does not have attribute 'input_dict', raise error.
if 'input_dict' not in direct_config[0]:
return result
# If direct_config attribute 'input_dict' is empty, raise error.
if not direct_config[0]['input_dict']:
return result
# Create list of direct access buttons
input_dict = direct_config[0]['input_dict']
result = input_dict.keys()
result.sort()
return result
except Exception as err:
current_app.logger.info(err.message)
raise
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
# float('inf') does not work on Windows (pre-2.6).
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
_NAN = _INFINITY * 0
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
out.write(field.enum_type.values_by_number[value].name)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
# Enum can be specified by a number (the enum value), or by
# a string literal (the enum name).
enum_descriptor = field.enum_type
if tokenizer.LookingAtInteger():
number = tokenizer.ConsumeInt32()
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
else:
identifier = tokenizer.ConsumeIdentifier()
enum_value = enum_descriptor.values_by_name.get(identifier, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, identifier))
value = enum_value.number
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker()]
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def LookingAtInteger(self):
"""Checks if the current token is an integer.
Returns:
True iff the current token is an integer.
"""
if not self.token:
return False
c = self.token[0]
return (c >= '0' and c <= '9') or c == '-' or c == '+'
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
text = self.token
if self._FLOAT_INFINITY.match(text):
self.NextToken()
if text.startswith('-'):
return -_INFINITY
return _INFINITY
if self._FLOAT_NAN.match(text):
self.NextToken()
return _NAN
try:
result = float(text)
except ValueError, e:
raise self._FloatParseError(e)
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if self.token in ('true', 't', '1'):
self.NextToken()
return True
elif self.token in ('false', 'f', '0'):
self.NextToken()
return False
else:
raise self._ParseError('Expected "true" or "false".')
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Exptected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def _ParseInteger(self, text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
pos = 0
if text.startswith('-'):
pos += 1
base = 10
if text.startswith('0x', pos) or text.startswith('0X', pos):
base = 16
elif text.startswith('0', pos):
base = 8
# Do the actual parsing. Exception handling is propagated to caller.
result = int(text, base)
# Check if the integer is sane. Exceptions handled by callers.
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column - len(self.token) + 1, message))
def _IntegerParseError(self, e):
return self._ParseError('Couldn\'t parse integer: ' + str(e))
def _FloatParseError(self, e):
return self._ParseError('Couldn\'t parse number: ' + str(e))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
| |
"""
Platform that supports scanning iCloud.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.icloud/
"""
import logging
import random
import os
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, DOMAIN, ATTR_ATTRIBUTES, ENTITY_ID_FORMAT)
from homeassistant.components.zone import active_zone
from homeassistant.helpers.event import track_utc_time_change
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from homeassistant.util.location import distance
from homeassistant.loader import get_component
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pyicloud==0.9.1']
CONF_IGNORED_DEVICES = 'ignored_devices'
CONF_ACCOUNTNAME = 'account_name'
# entity attributes
ATTR_ACCOUNTNAME = 'account_name'
ATTR_INTERVAL = 'interval'
ATTR_DEVICENAME = 'device_name'
ATTR_BATTERY = 'battery'
ATTR_DISTANCE = 'distance'
ATTR_DEVICESTATUS = 'device_status'
ATTR_LOWPOWERMODE = 'low_power_mode'
ATTR_BATTERYSTATUS = 'battery_status'
ICLOUDTRACKERS = {}
_CONFIGURING = {}
DEVICESTATUSSET = ['features', 'maxMsgChar', 'darkWake', 'fmlyShare',
'deviceStatus', 'remoteLock', 'activationLocked',
'deviceClass', 'id', 'deviceModel', 'rawDeviceModel',
'passcodeLength', 'canWipeAfterLock', 'trackingInfo',
'location', 'msg', 'batteryLevel', 'remoteWipe',
'thisDevice', 'snd', 'prsId', 'wipeInProgress',
'lowPowerMode', 'lostModeEnabled', 'isLocating',
'lostModeCapable', 'mesg', 'name', 'batteryStatus',
'lockedTimestamp', 'lostTimestamp', 'locationCapable',
'deviceDisplayName', 'lostDevice', 'deviceColor',
'wipedTimestamp', 'modelDisplayName', 'locationEnabled',
'isMac', 'locFoundEnabled']
DEVICESTATUSCODES = {'200': 'online', '201': 'offline', '203': 'pending',
'204': 'unregistered'}
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ACCOUNTNAME): vol.All(cv.ensure_list, [cv.slugify]),
vol.Optional(ATTR_DEVICENAME): cv.slugify,
vol.Optional(ATTR_INTERVAL): cv.positive_int,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(ATTR_ACCOUNTNAME): cv.slugify,
})
def setup_scanner(hass, config: dict, see):
"""Set up the iCloud Scanner."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
account = config.get(CONF_ACCOUNTNAME, slugify(username.partition('@')[0]))
icloudaccount = Icloud(hass, username, password, account, see)
if icloudaccount.api is not None:
ICLOUDTRACKERS[account] = icloudaccount
else:
_LOGGER.error("No ICLOUDTRACKERS added")
return False
def lost_iphone(call):
"""Call the lost iphone function if the device is found."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].lost_iphone(devicename)
hass.services.register(DOMAIN, 'icloud_lost_iphone', lost_iphone,
schema=SERVICE_SCHEMA)
def update_icloud(call):
"""Call the update function of an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].update_icloud(devicename)
hass.services.register(DOMAIN, 'icloud_update', update_icloud,
schema=SERVICE_SCHEMA)
def reset_account_icloud(call):
"""Reset an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].reset_account_icloud()
hass.services.register(DOMAIN, 'icloud_reset_account',
reset_account_icloud, schema=SERVICE_SCHEMA)
def setinterval(call):
"""Call the update function of an icloud account."""
accounts = call.data.get(ATTR_ACCOUNTNAME, ICLOUDTRACKERS)
interval = call.data.get(ATTR_INTERVAL)
devicename = call.data.get(ATTR_DEVICENAME)
for account in accounts:
if account in ICLOUDTRACKERS:
ICLOUDTRACKERS[account].setinterval(interval, devicename)
hass.services.register(DOMAIN, 'icloud_set_interval', setinterval,
schema=SERVICE_SCHEMA)
# Tells the bootstrapper that the component was successfully initialized
return True
class Icloud(object):
"""Represent an icloud account in Home Assistant."""
def __init__(self, hass, username, password, name, see):
"""Initialize an iCloud account."""
self.hass = hass
self.username = username
self.password = password
self.api = None
self.accountname = name
self.devices = {}
self.seen_devices = {}
self._overridestates = {}
self._intervals = {}
self.see = see
self._trusted_device = None
self._verification_code = None
self._attrs = {}
self._attrs[ATTR_ACCOUNTNAME] = name
self.reset_account_icloud()
randomseconds = random.randint(10, 59)
track_utc_time_change(
self.hass, self.keep_alive,
second=randomseconds
)
def reset_account_icloud(self):
"""Reset an icloud account."""
from pyicloud import PyiCloudService
from pyicloud.exceptions import (
PyiCloudFailedLoginException, PyiCloudNoDevicesException)
icloud_dir = self.hass.config.path('icloud')
if not os.path.exists(icloud_dir):
os.makedirs(icloud_dir)
try:
self.api = PyiCloudService(
self.username, self.password,
cookie_directory=icloud_dir,
verify=True)
except PyiCloudFailedLoginException as error:
self.api = None
_LOGGER.error('Error logging into iCloud Service: %s', error)
return
try:
self.devices = {}
self._overridestates = {}
self._intervals = {}
for device in self.api.devices:
status = device.status(DEVICESTATUSSET)
devicename = slugify(status['name'].replace(' ', '', 99))
if devicename not in self.devices:
self.devices[devicename] = device
self._intervals[devicename] = 1
self._overridestates[devicename] = None
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def icloud_trusted_device_callback(self, callback_data):
"""The trusted device is chosen."""
self._trusted_device = int(callback_data.get('0', '0'))
self._trusted_device = self.api.trusted_devices[self._trusted_device]
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = get_component('configurator')
configurator.request_done(request_id)
def icloud_need_trusted_device(self):
"""We need a trusted device."""
configurator = get_component('configurator')
if self.accountname in _CONFIGURING:
return
devicesstring = ''
devices = self.api.trusted_devices
for i, device in enumerate(devices):
devicesstring += "{}: {};".format(i, device.get('deviceName'))
_CONFIGURING[self.accountname] = configurator.request_config(
self.hass, 'iCloud {}'.format(self.accountname),
self.icloud_trusted_device_callback,
description=(
'Please choose your trusted device by entering'
' the index from this list: ' + devicesstring),
entity_picture="/static/images/config_icloud.png",
submit_caption='Confirm',
fields=[{'id': '0'}]
)
def icloud_verification_callback(self, callback_data):
"""The trusted device is chosen."""
self._verification_code = callback_data.get('0')
if self.accountname in _CONFIGURING:
request_id = _CONFIGURING.pop(self.accountname)
configurator = get_component('configurator')
configurator.request_done(request_id)
def icloud_need_verification_code(self):
"""We need a verification code."""
configurator = get_component('configurator')
if self.accountname in _CONFIGURING:
return
if self.api.send_verification_code(self._trusted_device):
self._verification_code = 'waiting'
_CONFIGURING[self.accountname] = configurator.request_config(
self.hass, 'iCloud {}'.format(self.accountname),
self.icloud_verification_callback,
description=('Please enter the validation code:'),
entity_picture="/static/images/config_icloud.png",
submit_caption='Confirm',
fields=[{'code': '0'}]
)
def keep_alive(self, now):
"""Keep the api alive."""
from pyicloud.exceptions import PyiCloud2FARequiredError
if self.api is None:
self.reset_account_icloud()
if self.api is None:
return
if self.api.requires_2fa:
try:
self.api.authenticate()
except PyiCloud2FARequiredError:
if self._trusted_device is None:
self.icloud_need_trusted_device()
return
if self._verification_code is None:
self.icloud_need_verification_code()
return
if self._verification_code == 'waiting':
return
if self.api.validate_verification_code(
self._trusted_device, self._verification_code):
self._verification_code = None
else:
self.api.authenticate()
currentminutes = dt_util.now().hour * 60 + dt_util.now().minute
for devicename in self.devices:
interval = self._intervals.get(devicename, 1)
if ((currentminutes % interval == 0) or
(interval > 10 and
currentminutes % interval in [2, 4])):
self.update_device(devicename)
def determine_interval(self, devicename, latitude, longitude, battery):
"""Calculate new interval."""
distancefromhome = None
zone_state = self.hass.states.get('zone.home')
zone_state_lat = zone_state.attributes['latitude']
zone_state_long = zone_state.attributes['longitude']
distancefromhome = distance(latitude, longitude, zone_state_lat,
zone_state_long)
distancefromhome = round(distancefromhome / 1000, 1)
currentzone = active_zone(self.hass, latitude, longitude)
if ((currentzone is not None and
currentzone == self._overridestates.get(devicename)) or
(currentzone is None and
self._overridestates.get(devicename) == 'away')):
return
self._overridestates[devicename] = None
if currentzone is not None:
self._intervals[devicename] = 30
return
if distancefromhome is None:
return
if distancefromhome > 25:
self._intervals[devicename] = round(distancefromhome / 2, 0)
elif distancefromhome > 10:
self._intervals[devicename] = 5
else:
self._intervals[devicename] = 1
if battery is not None and battery <= 33 and distancefromhome > 3:
self._intervals[devicename] = self._intervals[devicename] * 2
def update_device(self, devicename):
"""Update the device_tracker entity."""
from pyicloud.exceptions import PyiCloudNoDevicesException
# An entity will not be created by see() when track=false in
# 'known_devices.yaml', but we need to see() it at least once
entity = self.hass.states.get(ENTITY_ID_FORMAT.format(devicename))
if entity is None and devicename in self.seen_devices:
return
attrs = {}
kwargs = {}
if self.api is None:
return
try:
for device in self.api.devices:
if str(device) != str(self.devices[devicename]):
continue
status = device.status(DEVICESTATUSSET)
dev_id = status['name'].replace(' ', '', 99)
dev_id = slugify(dev_id)
attrs[ATTR_DEVICESTATUS] = DEVICESTATUSCODES.get(
status['deviceStatus'], 'error')
attrs[ATTR_LOWPOWERMODE] = status['lowPowerMode']
attrs[ATTR_BATTERYSTATUS] = status['batteryStatus']
attrs[ATTR_ACCOUNTNAME] = self.accountname
status = device.status(DEVICESTATUSSET)
battery = status.get('batteryLevel', 0) * 100
location = status['location']
if location:
self.determine_interval(
devicename, location['latitude'],
location['longitude'], battery)
interval = self._intervals.get(devicename, 1)
attrs[ATTR_INTERVAL] = interval
accuracy = location['horizontalAccuracy']
kwargs['dev_id'] = dev_id
kwargs['host_name'] = status['name']
kwargs['gps'] = (location['latitude'],
location['longitude'])
kwargs['battery'] = battery
kwargs['gps_accuracy'] = accuracy
kwargs[ATTR_ATTRIBUTES] = attrs
self.see(**kwargs)
self.seen_devices[devicename] = True
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def lost_iphone(self, devicename):
"""Call the lost iphone function if the device is found."""
if self.api is None:
return
self.api.authenticate()
for device in self.api.devices:
if devicename is None or device == self.devices[devicename]:
device.play_sound()
def update_icloud(self, devicename=None):
"""Authenticate against iCloud and scan for devices."""
from pyicloud.exceptions import PyiCloudNoDevicesException
if self.api is None:
return
try:
if devicename is not None:
if devicename in self.devices:
self.devices[devicename].update_icloud()
else:
_LOGGER.error("devicename %s unknown for account %s",
devicename, self._attrs[ATTR_ACCOUNTNAME])
else:
for device in self.devices:
self.devices[device].update_icloud()
except PyiCloudNoDevicesException:
_LOGGER.error('No iCloud Devices found!')
def setinterval(self, interval=None, devicename=None):
"""Set the interval of the given devices."""
devs = [devicename] if devicename else self.devices
for device in devs:
devid = DOMAIN + '.' + device
devicestate = self.hass.states.get(devid)
if interval is not None:
if devicestate is not None:
self._overridestates[device] = active_zone(
self.hass,
float(devicestate.attributes.get('latitude', 0)),
float(devicestate.attributes.get('longitude', 0)))
if self._overridestates[device] is None:
self._overridestates[device] = 'away'
self._intervals[device] = interval
else:
self._overridestates[device] = None
self.update_device(device)
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Layer that transforms VNC config objects to ifmap representation
"""
from cfgm_common.zkclient import ZookeeperClient, IndexAllocator
from gevent import ssl, monkey
monkey.patch_all()
import gevent
import gevent.event
import sys
import time
from pprint import pformat
from lxml import etree, objectify
import StringIO
import re
import socket
import errno
import subprocess
import netaddr
from bitarray import bitarray
from cfgm_common.ifmap.client import client, namespaces
from cfgm_common.ifmap.request import NewSessionRequest, RenewSessionRequest,\
EndSessionRequest, PublishRequest, SearchRequest, SubscribeRequest,\
PurgeRequest, PollRequest
from cfgm_common.ifmap.id import IPAddress, MACAddress, Device,\
AccessRequest, Identity, CustomIdentity
from cfgm_common.ifmap.operations import PublishUpdateOperation,\
PublishNotifyOperation, PublishDeleteOperation, SubscribeUpdateOperation,\
SubscribeDeleteOperation
from cfgm_common.ifmap.util import attr, link_ids
from cfgm_common.ifmap.response import Response, newSessionResult
from cfgm_common.ifmap.metadata import Metadata
from cfgm_common import obj_to_json
from cfgm_common.exceptions import ResourceExhaustionError, ResourceExistsError
import copy
import json
import uuid
import datetime
import pycassa
import pycassa.util
import pycassa.cassandra.ttypes
from pycassa.system_manager import *
from datetime import datetime
from pycassa.util import *
import kombu
#from cfgm_common import vnc_type_conv
from provision_defaults import *
import cfgm_common.imid
from cfgm_common.exceptions import *
from gen.vnc_ifmap_client_gen import *
from gen.vnc_cassandra_client_gen import *
import logging
logger = logging.getLogger(__name__)
class VncIfmapClient(VncIfmapClientGen):
def __init__(self, db_client_mgr, ifmap_srv_ip, ifmap_srv_port,
uname, passwd, ssl_options, ifmap_srv_loc=None):
super(VncIfmapClient, self).__init__()
# TODO username/passwd from right place
self._CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
self._IPERMS_NAME = "id-perms"
self._IPERMS_FQ_NAME = "contrail:" + self._IPERMS_NAME
self._SUBNETS_NAME = "contrail:subnets"
self._IPAMS_NAME = "contrail:ipams"
self._SG_RULE_NAME = "contrail:sg_rules"
self._POLICY_ENTRY_NAME = "contrail:policy_entry"
self._NAMESPACES = {
'env': "http://www.w3.org/2003/05/soap-envelope",
'ifmap': "http://www.trustedcomputinggroup.org/2010/IFMAP/2",
'meta':
"http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2",
'contrail': self._CONTRAIL_XSD
}
self._db_client_mgr = db_client_mgr
# launch mapserver
if ifmap_srv_loc:
self._launch_mapserver(ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc)
mapclient = client(("%s" % (ifmap_srv_ip), "%s" % (ifmap_srv_port)),
uname, passwd, self._NAMESPACES, ssl_options)
self._mapclient = mapclient
connected = False
while not connected:
try:
result = mapclient.call('newSession', NewSessionRequest())
connected = True
except socket.error as e:
time.sleep(3)
mapclient.set_session_id(newSessionResult(result).get_session_id())
mapclient.set_publisher_id(newSessionResult(result).get_publisher_id())
# Initialize ifmap-id handler (alloc|convert|parse etc.)
self._imid_handler = Imid()
imid = self._imid_handler
# Publish init config (TODO this should come from api-server init)
# config-root
buf = cStringIO.StringIO()
perms = Provision.defaults.perms['config-root']
perms.exportChildren(buf, level=1, pretty_print=False)
id_perms_xml = buf.getvalue()
buf.close()
meta = str(Metadata(self._IPERMS_NAME, '',
{'ifmap-cardinality': 'singleValue'},
ns_prefix='contrail', elements=id_perms_xml))
self._publish_id_self_meta("contrail:config-root:root", meta)
# end __init__
def get_imid_handler(self):
return self._imid_handler
# end get_imid_handler
# Parse ifmap-server returned search results and create list of tuples
# of (ident-1, ident-2, link-attribs)
def parse_result_items(self, srch_result, my_imid):
xpath_expr = \
'/env:Envelope/env:Body/ifmap:response/searchResult/resultItem'
result_items = self._parse(srch_result, xpath_expr)
return cfgm_common.imid.parse_result_items(result_items, my_imid)
# end parse_result_items
# In list of (ident-1, ident-2, link-attribs) tuples, return list of
# ifmap-ids of other idents
def get_others_in_result_list(self, result_list, my_imid):
other_imid_list = []
for result_elem in result_list:
ident_1, ident_2, meta = result_elem
if (ident_1 is None) or (ident_2 is None):
continue
other_imid = None
if ident_1.attrib['name'] == my_imid:
other_imid = ident_2.attrib['name']
elif ident_2.attrib['name'] == my_imid:
other_imid = ident_1.attrib['name']
other_imid_list.append(other_imid)
return other_imid_list
# end get_others_in_result_list
def _ensure_port_not_listened(self, server_ip, server_port):
try:
s = socket.create_connection((server_ip, server_port))
s.close()
print "IP %s port %s already listened on"\
% (server_ip, server_port)
except Exception as err:
if err.errno == errno.ECONNREFUSED:
return # all is well
# end _ensure_port_not_listened
def _block_till_port_listened(self, server_name, server_ip, server_port):
svr_running = False
while not svr_running:
try:
s = socket.create_connection((server_ip, server_port))
s.close()
svr_running = True
except Exception as err:
if err.errno == errno.ECONNREFUSED:
print "%s not up, retrying in 2 secs" % (server_name)
time.sleep(2)
else:
raise err
# end _block_till_port_listened
# launch ifmap server
def _launch_mapserver(self, ifmap_srv_ip, ifmap_srv_port, ifmap_srv_loc):
print 'Starting IFMAP server ...'
self._ensure_port_not_listened(ifmap_srv_ip, ifmap_srv_port)
logf_out = open('ifmap-server.out', 'w')
logf_err = open('ifmap-server.err', 'w')
self._mapserver = subprocess.Popen(['java', '-jar', 'build/irond.jar'],
cwd=ifmap_srv_loc, stdout=logf_out,
stderr=logf_err)
self._block_till_port_listened(
'ifmap-server', ifmap_srv_ip, ifmap_srv_port)
# end _launch_mapserver
# Helper routines for IFMAP
def _publish_id_self_meta(self, self_imid, meta):
mapclient = self._mapclient
pubreq = PublishRequest(mapclient.get_session_id(),
str(PublishUpdateOperation(
id1=str(Identity(
name=self_imid,
type="other",
other_type="extended")),
metadata=meta,
lifetime='forever')))
result = mapclient.call('publish', pubreq)
# end _publish_id_self_meta
def _delete_id_self_meta(self, self_imid, meta_name):
mapclient = self._mapclient
pubreq = PublishRequest(mapclient.get_session_id(),
str(PublishDeleteOperation(
id1=str(Identity(
name=self_imid,
type="other",
other_type="extended")),
filter=meta_name)))
result = mapclient.call('publish', pubreq)
# end _delete_id_self_meta
def _publish_id_pair_meta(self, id1, id2, metadata):
mapclient = self._mapclient
pubreq = PublishRequest(mapclient.get_session_id(),
str(PublishUpdateOperation(
id1=str(Identity(name=id1,
type="other",
other_type="extended")),
id2=str(Identity(name=id2,
type="other",
other_type="extended")),
metadata=metadata,
lifetime='forever')))
result = mapclient.call('publish', pubreq)
# end _publish_id_pair_meta
def _delete_id_pair_meta(self, id1, id2, metadata):
mapclient = self._mapclient
pubreq = PublishRequest(mapclient.get_session_id(),
str(PublishDeleteOperation(
id1=str(Identity(
name=id1,
type="other",
other_type="extended")),
id2=str(Identity(
name=id2,
type="other",
other_type="extended")),
filter=metadata)))
result = mapclient.call('publish', pubreq)
# end _delete_id_pair_meta
def _search(self, start_id, match_meta=None, result_meta=None,
max_depth=1):
# set ifmap search parmeters
srch_params = {}
srch_params['max-depth'] = str(max_depth)
if match_meta is not None:
srch_params['match-links'] = match_meta
if result_meta is not None:
# all => don't set result-filter, so server returns all id + meta
if result_meta == "all":
pass
else:
srch_params['result-filter'] = result_meta
else:
# default to return match_meta metadata types only
srch_params['result-filter'] = match_meta
mapclient = self._mapclient
srch_req = SearchRequest(mapclient.get_session_id(), start_id,
search_parameters=srch_params
)
result = mapclient.call('search', srch_req)
return result
# end _search
def _parse(self, srch_result, xpath_expr):
soap_doc = etree.parse(StringIO.StringIO(srch_result))
result_items = soap_doc.xpath(xpath_expr,
namespaces=self._NAMESPACES)
return result_items
# end _parse
def _search_and_parse(self, start_id, xpath_expr,
match_meta=None, result_meta=None, max_depth=0):
result = self._search(start_id, match_meta, result_meta, max_depth)
result_items = self._parse(result, xpath_expr)
return result_items
# end _search_and_parse
def _get_id_meta_refs(self, result_items, self_type, parent_type):
# Given parsed result items from search, returns # of idents + metadata
# referring to this ident (incl self + parent). In addition, parent's
# name and names of non-parent, non-self idents referring to this ident
# are returned. TODO should this be moved to cfgm/common
ref_cnt = 0
ref_set = set()
ref_names = ""
parent_imid = ""
imid = self._imid_handler
for r_item in result_items:
if r_item.tag == 'identity':
ident_name = r_item.attrib['name']
ident_type = cfgm_common.imid.ifmap_id_to_type(ident_name)
# No action if already encountered
if ident_name in ref_set:
continue
ref_cnt = ref_cnt + 1
ref_set.add(ident_name)
if (ident_type == self_type):
continue
if (ident_type == parent_type):
parent_imid = r_item.attrib['name']
continue
# non-parent, non-self refs
ref_names = "%s %s" % (ref_names, ident_name)
elif r_item.tag == 'metadata':
# TBI figure out meta only belonging to self
ref_cnt = ref_cnt + 1
meta_elem = r_item.getchildren()[0]
meta_name = re.sub("{.*}", "", meta_elem.tag)
ref_names = "%s %s" % (ref_names, meta_name)
return ref_cnt, parent_imid, ref_names
# end _get_id_meta_refs
def fq_name_to_ifmap_id(self, obj_type, fq_name):
return cfgm_common.imid.get_ifmap_id_from_fq_name(obj_type, fq_name)
# end fq_name_to_ifmap_id
def ifmap_id_to_fq_name(self, ifmap_id):
return cfgm_common.imid.get_fq_name_from_ifmap_id(ifmap_id)
# end ifmap_id_to_fq_name
# end class VncIfmapClient
class Imid(ImidGen):
pass
# end class Imid
class VncCassandraClient(VncCassandraClientGen):
# Name to ID mapping keyspace + tables
_UUID_KEYSPACE_NAME = 'config_db_uuid'
# TODO describe layout
_OBJ_UUID_CF_NAME = 'obj_uuid_table'
# TODO describe layout
_OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
# has obj uuid as rowkey; ascii as column type; <fq_name>, <ifmap_id>
# <obj_json> <child_cf_col_name> as column values
_UUID_CF_NAME = 'uuid_table'
# has type:fq_name as rowkey; ascii as column type; <obj uuid> <ifmap_id>
# as column values
_FQ_NAME_CF_NAME = 'fq_name_table'
# has ifmap_id as rowkey; ascii as column type
# <obj uuid>, <fq_name> as column values
# ifmap_id itself is contrail:<type>:<fq-name delimited by ':'>
_IFMAP_ID_CF_NAME = 'ifmap_id_table'
# has obj uuid:<child-type> as rowkey; timeuuid column type; <child obj
# uuid> as column values
_CHILDREN_CF_NAME = 'children_table'
_SUBNET_CF_NAME = 'subnet_bitmask_table'
# Useragent datastore keyspace + tables (used by quantum plugin currently)
_USERAGENT_KEYSPACE_NAME = 'useragent'
_USERAGENT_KV_CF_NAME = 'useragent_keyval_table'
def __init__(self, db_client_mgr, cass_srv_list, reset_config):
super(VncCassandraClient, self).__init__()
self._db_client_mgr = db_client_mgr
self._reset_config = reset_config
self._cache_uuid_to_fq_name = {}
self._cassandra_init(cass_srv_list)
# end __init__
# Helper routines for cassandra
def _cassandra_init(self, server_list):
# 1. Ensure keyspace and schema/CFs exist
# 2. Read in persisted data and publish to ifmap server
uuid_ks_name = VncCassandraClient._UUID_KEYSPACE_NAME
obj_uuid_cf_info = (VncCassandraClient._OBJ_UUID_CF_NAME, None)
obj_fq_name_cf_info = (VncCassandraClient._OBJ_FQ_NAME_CF_NAME, None)
uuid_cf_info = (VncCassandraClient._UUID_CF_NAME, None)
fq_name_cf_info = (VncCassandraClient._FQ_NAME_CF_NAME, None)
ifmap_id_cf_info = (VncCassandraClient._IFMAP_ID_CF_NAME, None)
subnet_cf_info = (VncCassandraClient._SUBNET_CF_NAME, None)
children_cf_info = (
VncCassandraClient._CHILDREN_CF_NAME, TIME_UUID_TYPE)
self._cassandra_ensure_keyspace(
server_list, uuid_ks_name,
[obj_uuid_cf_info, obj_fq_name_cf_info,
uuid_cf_info, fq_name_cf_info, ifmap_id_cf_info,
subnet_cf_info, children_cf_info])
useragent_ks_name = VncCassandraClient._USERAGENT_KEYSPACE_NAME
useragent_kv_cf_info = (VncCassandraClient._USERAGENT_KV_CF_NAME, None)
self._cassandra_ensure_keyspace(server_list, useragent_ks_name,
[useragent_kv_cf_info])
uuid_pool = pycassa.ConnectionPool(
uuid_ks_name, server_list, max_overflow=-1,
pool_timeout=300, max_retries=100, timeout=300)
useragent_pool = pycassa.ConnectionPool(
useragent_ks_name, server_list, max_overflow=-1,
pool_timeout=300, max_retries=100, timeout=300)
rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM
self._obj_uuid_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._OBJ_UUID_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._obj_fq_name_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._OBJ_FQ_NAME_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._useragent_kv_cf = pycassa.ColumnFamily(
useragent_pool, VncCassandraClient._USERAGENT_KV_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
self._subnet_cf = pycassa.ColumnFamily(
uuid_pool, VncCassandraClient._SUBNET_CF_NAME,
read_consistency_level = rd_consistency,
write_consistency_level = wr_consistency)
# end _cassandra_init
def _cassandra_ensure_keyspace(self, server_list,
keyspace_name, cf_info_list):
# Retry till cassandra is up
server_idx = 0
num_dbnodes = len(server_list)
connected = False
while not connected:
try:
cass_server = server_list[server_idx]
sys_mgr = SystemManager(cass_server)
connected = True
except Exception as e:
# TODO do only for
# thrift.transport.TTransport.TTransportException
server_idx = (server_idx + 1) % num_dbnodes
time.sleep(3)
if self._reset_config:
try:
sys_mgr.drop_keyspace(keyspace_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
try:
sys_mgr.create_keyspace(keyspace_name, SIMPLE_STRATEGY,
{'replication_factor': str(num_dbnodes)})
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
for cf_info in cf_info_list:
try:
(cf_name, comparator_type) = cf_info
if comparator_type:
sys_mgr.create_column_family(
keyspace_name, cf_name,
comparator_type=comparator_type)
else:
sys_mgr.create_column_family(keyspace_name, cf_name)
except pycassa.cassandra.ttypes.InvalidRequestException as e:
# TODO verify only EEXISTS
print "Warning! " + str(e)
# end _cassandra_ensure_keyspace
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
bch.insert(obj_uuid, {'prop:%s' % (prop_name): json.dumps(prop_val)})
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
bch.remove(obj_uuid, columns=['prop:' + prop_name])
else:
bch.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])})
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.insert(parent_uuid, child_col)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): json.dumps(None)}
bch.insert(child_uuid, parent_col)
# end _create_child
def _read_child(self, result, obj_uuid, child_type,
child_uuid, child_tstamp):
if '%ss' % (child_type) not in result:
result['%ss' % (child_type)] = []
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['href'] = self._db_client_mgr.generate_url(
child_type, child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_type)].append(child_info)
# end _read_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): json.dumps(None)}
bch.remove(parent_uuid, columns=[
'children:%s:%s' % (child_type, child_uuid)])
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_type,
ref_uuid, ref_data):
bch.insert(
obj_uuid, {'ref:%s:%s' %
(ref_type, ref_uuid): json.dumps(ref_data)})
if obj_type == ref_type:
bch.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
else:
bch.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(ref_data)})
# end _create_ref
def _read_ref(self, result, obj_uuid, ref_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_type) not in result:
result['%s_refs' % (ref_type)] = []
ref_data = json.loads(ref_data_json)
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError as e:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['href'] = self._db_client_mgr.generate_url(
ref_type, ref_uuid)
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_type,
back_ref_uuid, back_ref_data_json):
if '%s_back_refs' % (back_ref_type) not in result:
result['%s_back_refs' % (back_ref_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = json.loads(back_ref_data_json)
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['href'] = self._db_client_mgr.generate_url(
back_ref_type, back_ref_uuid)
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_type)].append(back_ref_info)
# end _read_back_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_type,
old_ref_uuid, new_ref_infos):
if ref_type not in new_ref_infos:
# update body didn't touch this type, nop
return
if old_ref_uuid not in new_ref_infos[ref_type]:
# remove old ref
bch.remove(obj_uuid, columns=[
'ref:%s:%s' % (ref_type, old_ref_uuid)])
if obj_type == ref_type:
bch.remove(old_ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(old_ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
self._db_client_mgr.dbe_cache_invalidate({'uuid':
old_ref_uuid})
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_type][old_ref_uuid]
bch.insert(
obj_uuid,
{'ref:%s:%s' %
(ref_type, old_ref_uuid): json.dumps(new_ref_data)})
if obj_type == ref_type:
bch.insert(
old_ref_uuid,
{'ref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
else:
bch.insert(
old_ref_uuid,
{'backref:%s:%s' %
(obj_type, obj_uuid): json.dumps(new_ref_data)})
self._db_client_mgr.dbe_cache_invalidate({'uuid':
old_ref_uuid})
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_type][old_ref_uuid]
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_type, ref_uuid):
send = False
if bch is None:
send = True
bch = self._cassandra_db._obj_uuid_cf.batch()
bch.remove(obj_uuid, columns=['ref:%s:%s' % (ref_type, ref_uuid)])
if obj_type == ref_type:
bch.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)])
else:
bch.remove(ref_uuid, columns=[
'backref:%s:%s' % (obj_type, obj_uuid)])
if send:
bch.send()
# end _delete_ref
def is_latest(self, id, tstamp):
id_perms_json = self._obj_uuid_cf.get(
id, columns=['prop:id_perms'])['prop:id_perms']
id_perms = json.loads(id_perms_json)
if id_perms['last_modified'] == tstamp:
return True
else:
return False
# end is_latest
def cache_uuid_to_fq_name_add(self, id, fq_name):
self._cache_uuid_to_fq_name[id] = fq_name
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
try:
del self._cache_uuid_to_fq_name[id]
except KeyError:
pass
# end cache_uuid_to_fq_name_del
def update_last_modified(self, bch, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = json.loads(self._obj_uuid_cf.get(obj_uuid, ['prop:id_perms'])['prop:id_perms'])
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
# end update_last_modified
def uuid_to_fq_name(self, id):
try:
#TODO remove from cache on delete_notify
return self._cache_uuid_to_fq_name[id]
except KeyError:
try:
fq_name_json = self._obj_uuid_cf.get(
id, columns=['fq_name'])['fq_name']
except pycassa.NotFoundException:
raise NoIdError(id)
fq_name = json.loads(fq_name_json)
self.cache_uuid_to_fq_name_add(id, fq_name)
return fq_name
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
type_json = self._obj_uuid_cf.get(id, columns=['type'])['type']
except pycassa.NotFoundException:
raise NoIdError(id)
return json.loads(type_json)
# end uuid_to_fq_name
def fq_name_to_uuid(self, obj_type, fq_name):
method_name = obj_type.replace('-', '_')
fq_name_str = ':'.join(fq_name)
col_start = '%s:' % (fq_name_str)
col_fin = '%s;' % (fq_name_str)
try:
col_info_iter = self._obj_fq_name_cf.xget(
method_name, column_start=col_start, column_finish=col_fin)
except pycassa.NotFoundException:
raise NoIdError('%s %s' % (obj_type, fq_name))
col_infos = list(col_info_iter)
if len(col_infos) == 0:
raise NoIdError('%s %s' % (obj_type, fq_name))
for (col_name, col_val) in col_infos:
obj_uuid = col_name.split(':')[-1]
return obj_uuid
# end fq_name_to_uuid
def uuid_to_obj_dict(self, id):
try:
obj_cols = self._obj_uuid_cf.get(id)
except pycassa.NotFoundException:
raise NoIdError(id)
return obj_cols
# end uuid_to_obj_dict
def uuid_to_obj_perms(self, id):
try:
id_perms_json = self._obj_uuid_cf.get(
id, columns=['prop:id_perms'])['prop:id_perms']
id_perms = json.loads(id_perms_json)
except pycassa.NotFoundException:
raise NoIdError(id)
return id_perms
# end uuid_to_obj_perms
def useragent_kv_store(self, key, value):
columns = {'value': value}
self._useragent_kv_cf.insert(key, columns)
# end useragent_kv_store
def useragent_kv_retrieve(self, key):
if key:
try:
columns = self._useragent_kv_cf.get(key)
except pycassa.NotFoundException:
raise NoUserAgentKey
return columns['value']
else: # no key specified, return entire contents
kv_list = []
for ua_key, ua_cols in self._useragent_kv_cf.get_range():
kv_list.append({'key': ua_key, 'value': ua_cols['value']})
return kv_list
# end useragent_kv_retrieve
def useragent_kv_delete(self, key):
self._useragent_kv_cf.remove(key)
# end useragent_kv_delete
def subnet_add_cols(self, subnet_fq_name, col_dict):
self._subnet_cf.insert(subnet_fq_name, col_dict)
# end subnet_add_cols
def subnet_delete_cols(self, subnet_fq_name, col_names):
self._subnet_cf.remove(subnet_fq_name, col_names)
# end subnet_delete_cols
def subnet_retrieve(self, subnet_fq_name):
try:
cols_iter = self._subnet_cf.xget(subnet_fq_name)
except pycassa.NotFoundException:
# ok to fail as not all subnets will have in-use addresses
return None
cols_dict = dict((k, v) for k, v in cols_iter)
return cols_dict
# end subnet_retrieve
def subnet_delete(self, subnet_fq_name):
try:
self._subnet_cf.remove(subnet_fq_name)
except pycassa.NotFoundException:
# ok to fail as not all subnets will have bitmask allocated
return None
# end subnet_delete
def walk(self, fn):
walk_results = []
for obj_uuid, _ in self._obj_uuid_cf.get_range():
obj_cols_iter = self._obj_uuid_cf.xget(obj_uuid)
obj_cols = dict((k, v) for k, v in obj_cols_iter)
result = fn(obj_uuid, obj_cols)
if result:
walk_results.append(result)
return walk_results
# end walk
# end class VncCassandraClient
class VncKombuClient(object):
def _init_server_conn(self, rabbit_ip, rabbit_user, rabbit_password, rabbit_vhost):
while True:
try:
self._conn = kombu.Connection(hostname=rabbit_ip,
userid=rabbit_user,
password=rabbit_password,
virtual_host=rabbit_vhost)
self._obj_update_q = self._conn.SimpleQueue(self._update_queue_obj)
old_subscribe_greenlet = self._dbe_oper_subscribe_greenlet
self._dbe_oper_subscribe_greenlet = gevent.spawn(self._dbe_oper_subscribe)
if old_subscribe_greenlet:
old_subscribe_greenlet.kill()
break
except Exception as e:
print "Exception in _init_server_conn: %s" %(str(e))
time.sleep(2)
# end _init_server_conn
def __init__(self, db_client_mgr, rabbit_ip, ifmap_db, rabbit_user, rabbit_password, rabbit_vhost):
self._db_client_mgr = db_client_mgr
self._ifmap_db = ifmap_db
self._rabbit_ip = rabbit_ip
self._rabbit_user = rabbit_user
self._rabbit_password = rabbit_password
self._rabbit_vhost = rabbit_vhost
obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout',
durable=False)
listen_port = self._db_client_mgr.get_server_port()
q_name = 'vnc_config.%s-%s' %(socket.gethostname(), listen_port)
self._update_queue_obj = kombu.Queue(q_name, obj_upd_exchange)
self._dbe_oper_subscribe_greenlet = None
if self._rabbit_vhost == "__NONE__":
return
self._init_server_conn(self._rabbit_ip, self._rabbit_user, self._rabbit_password, self._rabbit_vhost)
# end __init__
def _obj_update_q_put(self, *args, **kwargs):
if self._rabbit_vhost == "__NONE__":
return
while True:
try:
self._obj_update_q.put(*args, **kwargs)
break
except socket.error as e:
time.sleep(1)
self._init_server_conn(self._rabbit_ip, self._rabbit_user, self._rabbit_password, self._rabbit_vhost)
# end _obj_update_q_put
def _dbe_oper_subscribe(self):
if self._rabbit_vhost == "__NONE__":
return
self._db_client_mgr.wait_for_resync_done()
obj_upd_exchange = kombu.Exchange('object-update-xchg', 'fanout',
durable=False)
with self._conn.SimpleQueue(self._update_queue_obj) as queue:
while True:
try:
message = queue.get()
except socket.error as e:
self._init_server_conn(self._rabbit_ip, self._rabbit_user, self._rabbit_password, self._rabbit_vhost)
# never reached
continue
try:
oper_info = message.payload
print "\nNotification Message: %s\n" %(pformat(oper_info))
if oper_info['oper'] == 'CREATE':
self._dbe_create_notification(oper_info)
if oper_info['oper'] == 'UPDATE':
self._dbe_update_notification(oper_info)
elif oper_info['oper'] == 'DELETE':
self._dbe_delete_notification(oper_info)
except Exception as e:
print "Exception in _dbe_oper_subscribe: " + str(e)
finally:
try:
message.ack()
except socket.error as e:
self._init_server_conn(self._rabbit_ip, self._rabbit_user, self._rabbit_password, self._rabbit_vhost)
# never reached
#end _dbe_oper_subscribe
def dbe_create_publish(self, obj_type, obj_ids, obj_dict):
oper_info = {'oper': 'CREATE', 'type': obj_type, 'obj_dict': obj_dict}
oper_info.update(obj_ids)
self._obj_update_q_put(oper_info, serializer='json')
# end dbe_create_publish
def _dbe_create_notification(self, obj_info):
obj_dict = obj_info['obj_dict']
r_class = self._db_client_mgr.get_resource_class(obj_info['type'])
if r_class:
r_class.dbe_create_notification(obj_info, obj_dict)
method_name = obj_info['type'].replace('-', '_')
method = getattr(self._ifmap_db, "_ifmap_%s_create" % (method_name))
(ok, result) = method(obj_info, obj_dict)
if not ok:
raise Exception(result)
#end _dbe_create_notification
def dbe_update_publish(self, obj_type, obj_ids):
oper_info = {'oper': 'UPDATE', 'type': obj_type}
oper_info.update(obj_ids)
self._obj_update_q_put(oper_info, serializer='json')
# end dbe_update_publish
def _dbe_update_notification(self, obj_info):
r_class = self._db_client_mgr.get_resource_class(obj_info['type'])
if r_class:
r_class.dbe_update_notification(obj_info)
ifmap_id = self._db_client_mgr.uuid_to_ifmap_id(obj_info['type'],
obj_info['uuid'])
(ok, result) = self._db_client_mgr.dbe_read(obj_info['type'], obj_info)
if not ok:
raise Exception(result)
new_obj_dict = result
method_name = obj_info['type'].replace('-', '_')
method = getattr(self._ifmap_db, "_ifmap_%s_update" % (method_name))
(ok, ifmap_result) = method(ifmap_id, new_obj_dict)
if not ok:
raise Exception(ifmap_result)
#end _dbe_update_notification
def dbe_delete_publish(self, obj_type, obj_ids, obj_dict):
oper_info = {'oper': 'DELETE', 'type': obj_type, 'obj_dict': obj_dict}
oper_info.update(obj_ids)
self._obj_update_q_put(oper_info, serializer='json')
# end dbe_delete_publish
def _dbe_delete_notification(self, obj_info):
obj_dict = obj_info['obj_dict']
db_client_mgr = self._db_client_mgr
db_client_mgr._cassandra_db.cache_uuid_to_fq_name_del(obj_dict['uuid'])
r_class = self._db_client_mgr.get_resource_class(obj_info['type'])
if r_class:
r_class.dbe_delete_notification(obj_info, obj_dict)
method_name = obj_info['type'].replace('-', '_')
method = getattr(self._ifmap_db, "_ifmap_%s_delete" % (method_name))
(ok, ifmap_result) = method(obj_info)
if not ok:
raise Exception(ifmap_result)
#end _dbe_delete_notification
# end class VncKombuClient
class VncZkClient(object):
_SUBNET_PATH = "/api-server/subnets"
_FQ_NAME_TO_UUID_PATH = "/fq-name-to-uuid"
def __init__(self, instance_id, zk_server_ip, reset_config):
while True:
try:
self._zk_client = ZookeeperClient("api-" + instance_id, zk_server_ip)
break
except gevent.event.Timeout as e:
pass
if reset_config:
self._zk_client.delete_node(self._SUBNET_PATH, True);
self._zk_client.delete_node(self._FQ_NAME_TO_UUID_PATH, True);
self._subnet_allocators = {}
# end __init__
def create_subnet_allocator(self, subnet, first, last):
# TODO handle subnet resizing change, ignore for now
if subnet not in self._subnet_allocators:
self._subnet_allocators[subnet] = IndexAllocator(
self._zk_client, self._SUBNET_PATH+'/'+subnet+'/',
size=last-first, start_idx=first, reverse=True)
# end create_subnet_allocator
def delete_subnet_allocator(self, subnet):
IndexAllocator.delete_all(self._zk_client,
self._SUBNET_PATH+'/'+subnet+'/')
# end delete_subnet_allocator
def _get_subnet_allocator(self, subnet):
return self._subnet_allocators.get(subnet)
# end _get_subnet_allocator
def subnet_alloc_req(self, subnet, addr=None):
allocator = self._get_subnet_allocator(subnet)
try:
if addr is not None:
if allocator.read(addr) is not None:
return addr
else:
return allocator.reserve(addr)
else:
return allocator.alloc()
except ResourceExhaustionError:
return None
# end subnet_alloc_req
def subnet_free_req(self, subnet, addr):
allocator = self._get_subnet_allocator(subnet)
if allocator:
allocator.delete(addr)
# end subnet_free_req
def create_fq_name_to_uuid_mapping(self, obj_type, fq_name, id):
fq_name_str = ':'.join(fq_name)
zk_path = self._FQ_NAME_TO_UUID_PATH+'/%s:%s' %(obj_type.replace('-', '_'),
fq_name_str)
self._zk_client.create_node(zk_path, id)
# end create_fq_name_to_uuid_mapping
def delete_fq_name_to_uuid_mapping(self, obj_type, fq_name):
fq_name_str = ':'.join(fq_name)
zk_path = self._FQ_NAME_TO_UUID_PATH+'/%s:%s' %(obj_type.replace('-', '_'),
fq_name_str)
self._zk_client.delete_node(zk_path)
# end delete_fq_name_to_uuid_mapping
# end VncZkClient
class VncDbClient(object):
def __init__(self, api_svr_mgr, ifmap_srv_ip, ifmap_srv_port, uname,
passwd, cass_srv_list,
rabbit_server, rabbit_user, rabbit_password, rabbit_vhost,
reset_config=False, ifmap_srv_loc=None,
zk_server_ip=None):
self._api_svr_mgr = api_svr_mgr
# certificate auth
ssl_options = None
if api_svr_mgr._args.use_certs:
ssl_options = {
'keyfile': api_svr_mgr._args.keyfile,
'certfile': api_svr_mgr._args.certfile,
'ca_certs': api_svr_mgr._args.ca_certs,
'cert_reqs': ssl.CERT_REQUIRED,
'ciphers': 'ALL'
}
self._db_resync_done = gevent.event.Event()
logger.info("connecting to ifmap on %s:%s as %s" % (ifmap_srv_ip, ifmap_srv_port, uname))
self._ifmap_db = VncIfmapClient(
self, ifmap_srv_ip, ifmap_srv_port,
uname, passwd, ssl_options, ifmap_srv_loc)
logger.info("connecting to cassandra on %s" % (cass_srv_list,))
self._cassandra_db = VncCassandraClient(
self, cass_srv_list, reset_config)
self._msgbus = VncKombuClient(self, rabbit_server, self._ifmap_db,
rabbit_user, rabbit_password,
rabbit_vhost)
self._zk_db = VncZkClient(api_svr_mgr._args.worker_id, zk_server_ip,
reset_config)
# end __init__
def db_resync(self):
# Read contents from cassandra and publish to ifmap
self._cassandra_db.walk(self._dbe_resync)
self._db_resync_done.set()
# end db_resync
def wait_for_resync_done(self):
self._db_resync_done.wait()
# end db_resync
def db_check(self):
# Read contents from cassandra and report any read exceptions
check_results = self._cassandra_db.walk(self._dbe_check)
return check_results
# end db_check
def db_read(self):
# Read contents from cassandra
read_results = self._cassandra_db.walk(self._dbe_read)
return read_results
# end db_check
def _uuid_to_longs(self, id):
msb_id = id.int >> 64
lsb_id = id.int & ((1 << 64) - 1)
return msb_id, lsb_id
# end _uuid_to_longs
def set_uuid(self, obj_type, obj_dict, id, persist=True):
if persist:
# set the mapping from name to uuid in zk to ensure single creator
fq_name = obj_dict['fq_name']
self._zk_db.create_fq_name_to_uuid_mapping(obj_type, fq_name, str(id))
# set uuid in the perms meta
mslong, lslong = self._uuid_to_longs(id)
obj_dict['id_perms']['uuid'] = {}
obj_dict['id_perms']['uuid']['uuid_mslong'] = mslong
obj_dict['id_perms']['uuid']['uuid_lslong'] = lslong
obj_dict['uuid'] = str(id)
return True
# end set_uuid
def _alloc_set_uuid(self, obj_type, obj_dict):
id = uuid.uuid4()
ok = self.set_uuid(obj_type, obj_dict, id)
return (ok, obj_dict['uuid'])
# end _alloc_set_uuid
def match_uuid(self, obj_dict, obj_uuid):
new_mslong, new_lslong = self._uuid_to_longs(uuid.UUID(obj_uuid))
old_mslong = obj_dict['id_perms']['uuid']['uuid_mslong']
old_lslong = obj_dict['id_perms']['uuid']['uuid_lslong']
if new_mslong == old_mslong and new_lslong == old_lslong:
return True
return False
# end
def _dbe_resync(self, obj_uuid, obj_cols):
obj_type = None
try:
obj_type = json.loads(obj_cols['type'])
method = getattr(self._cassandra_db,
"_cassandra_%s_read" % (obj_type))
(ok, obj_dicts) = method([obj_uuid])
obj_dict = obj_dicts[0]
# TODO remove backward compat create mapping in zk
try:
self._zk_db.create_fq_name_to_uuid_mapping(obj_type,
obj_dict['fq_name'], obj_uuid)
except ResourceExistsError:
pass
if (obj_type == 'virtual_network' and
'logical_router_refs' in obj_dict):
for router in obj_dict['logical_router_refs']:
self._cassandra_db._delete_ref(None, obj_type, obj_uuid,
'logical_router',
router['uuid'])
except Exception as e:
self.config_object_error(
obj_uuid, None, obj_type, 'dbe_resync:cassandra_read', str(e))
return
try:
parent_type = obj_dict.get('parent_type', None)
method = getattr(self._ifmap_db, "_ifmap_%s_alloc" % (obj_type))
(ok, result) = method(parent_type, obj_dict['fq_name'])
(my_imid, parent_imid) = result
except Exception as e:
self.config_object_error(
obj_uuid, None, obj_type, 'dbe_resync:ifmap_alloc', str(e))
return
try:
obj_ids = {'uuid': obj_uuid, 'imid': my_imid,
'parent_imid': parent_imid}
method = getattr(self._ifmap_db, "_ifmap_%s_create" % (obj_type))
(ok, result) = method(obj_ids, obj_dict)
except Exception as e:
self.config_object_error(
obj_uuid, None, obj_type, 'dbe_resync:ifmap_create', str(e))
return
# end _dbe_resync
def _dbe_check(self, obj_uuid, obj_cols):
obj_type = None
try:
obj_type = json.loads(obj_cols['type'])
method = getattr(self._cassandra_db,
"_cassandra_%s_read" % (obj_type))
(ok, obj_dict) = method([obj_uuid])
except Exception as e:
return {'uuid': obj_uuid, 'type': obj_type, 'error': str(e)}
# end _dbe_check
def _dbe_read(self, obj_uuid, obj_cols):
obj_type = None
try:
obj_type = json.loads(obj_cols['type'])
method = getattr(self._cassandra_db,
"_cassandra_%s_read" % (obj_type))
(ok, obj_dict) = method([obj_uuid])
result_dict = obj_dict[0]
result_dict['type'] = obj_type
result_dict['uuid'] = obj_uuid
return result_dict
except Exception as e:
return {'uuid': obj_uuid, 'type': obj_type, 'error': str(e)}
# end _dbe_read
# Public Methods
# Returns created ifmap_id
def dbe_alloc(self, obj_type, obj_dict, uuid_requested=None):
try:
if uuid_requested:
obj_uuid = uuid_requested
ok = self.set_uuid(obj_type, obj_dict, uuid.UUID(uuid_requested), False)
else:
(ok, obj_uuid) = self._alloc_set_uuid(obj_type, obj_dict)
except ResourceExistsError:
return (409, '' + pformat(obj_dict['fq_name']) +
' already exists with uuid: ' + obj_dict['uuid'])
parent_type = obj_dict.get('parent_type', None)
method_name = obj_type.replace('-', '_')
method = getattr(self._ifmap_db, "_ifmap_%s_alloc" % (method_name))
(ok, result) = method(parent_type, obj_dict['fq_name'])
if not ok:
self.dbe_release(obj_type, obj_dict['fq_name'])
return False, result
(my_imid, parent_imid) = result
obj_ids = {
'uuid': obj_dict['uuid'],
'imid': my_imid, 'parent_imid': parent_imid}
return (True, obj_ids)
# end dbe_alloc
def dbe_create(self, obj_type, obj_ids, obj_dict):
#self._cassandra_db.uuid_create(obj_type, obj_ids, obj_dict)
method_name = obj_type.replace('-', '_')
method = getattr(
self._cassandra_db, "_cassandra_%s_create" % (method_name))
(ok, result) = method(obj_ids, obj_dict)
# publish to ifmap via msgbus
self._msgbus.dbe_create_publish(obj_type, obj_ids, obj_dict)
return (ok, result)
# end dbe_create
# input id is ifmap-id + uuid
def dbe_read(self, obj_type, obj_ids, obj_fields=None):
method_name = obj_type.replace('-', '_')
method = getattr(
self._cassandra_db, "_cassandra_%s_read" % (method_name))
try:
(ok, cassandra_result) = method([obj_ids['uuid']], obj_fields)
except NoIdError as e:
return (False, str(e))
return (ok, cassandra_result[0])
# end dbe_read
def dbe_read_multi(self, obj_type, obj_ids_list, obj_fields=None):
method_name = obj_type.replace('-', '_')
method = getattr(
self._cassandra_db, "_cassandra_%s_read" % (method_name))
try:
(ok, cassandra_result) = method([obj_id['uuid']
for obj_id in obj_ids_list],
obj_fields)
except NoIdError as e:
return (False, str(e))
return (ok, cassandra_result)
# end dbe_read_multi
def dbe_is_latest(self, obj_ids, tstamp):
try:
is_latest = self._cassandra_db.is_latest(obj_ids['uuid'], tstamp)
return (True, is_latest)
except Exception as e:
return (False, str(e))
# end dbe_is_latest
def dbe_update(self, obj_type, obj_ids, new_obj_dict):
method_name = obj_type.replace('-', '_')
method = getattr(self._cassandra_db,
"_cassandra_%s_update" % (method_name))
(ok, cassandra_result) = method(obj_ids['uuid'], new_obj_dict)
# publish to ifmap via redis
self._msgbus.dbe_update_publish(obj_type, obj_ids)
return (ok, cassandra_result)
# end dbe_update
def dbe_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False):
method_name = obj_type.replace('-', '_')
method = getattr(
self._cassandra_db, "_cassandra_%s_list" % (method_name))
(ok, cassandra_result) = method(parent_uuids=parent_uuids,
back_ref_uuids=back_ref_uuids,
obj_uuids=obj_uuids,
count=count)
return (ok, cassandra_result)
# end dbe_list
def dbe_delete(self, obj_type, obj_ids, obj_dict):
method_name = obj_type.replace('-', '_')
method = getattr(
self._cassandra_db, "_cassandra_%s_delete" % (method_name))
(ok, cassandra_result) = method(obj_ids['uuid'])
# publish to ifmap via redis
self._msgbus.dbe_delete_publish(obj_type, obj_ids, obj_dict)
# finally remove mapping in zk
fq_name = cfgm_common.imid.get_fq_name_from_ifmap_id(obj_ids['imid'])
self.dbe_release(obj_type, fq_name)
return ok, cassandra_result
# end dbe_delete
def dbe_release(self, obj_type, obj_fq_name):
self._zk_db.delete_fq_name_to_uuid_mapping(obj_type, obj_fq_name)
# end dbe_release
def dbe_cache_invalidate(self, obj_ids):
pass
# end dbe_cache_invalidate
def useragent_kv_store(self, key, value):
self._cassandra_db.useragent_kv_store(key, value)
# end useragent_kv_store
def useragent_kv_retrieve(self, key):
return self._cassandra_db.useragent_kv_retrieve(key)
# end useragent_kv_retrieve
def useragent_kv_delete(self, key):
return self._cassandra_db.useragent_kv_delete(key)
# end useragent_kv_delete
def subnet_alloc_req(self, subnet, addr=None):
return self._zk_db.subnet_alloc_req(subnet, addr)
# end subnet_alloc_req
def subnet_free_req(self, subnet, addr):
self._zk_db.subnet_free_req(subnet, addr)
# end subnet_free_req
def subnet_create_allocator(self, subnet, first, last):
self._zk_db.create_subnet_allocator(subnet, first, last)
# end subnet_create_allocator
def subnet_delete_allocator(self, subnet):
self._zk_db.delete_subnet_allocator(subnet)
# end subnet_delete_allocator
def uuid_vnlist(self):
return self._cassandra_db.uuid_vnlist()
# end uuid_vnlist
def uuid_to_ifmap_id(self, obj_type, id):
fq_name = self.uuid_to_fq_name(id)
return self.fq_name_to_ifmap_id(obj_type, fq_name)
# end uuid_to_ifmap_id
def fq_name_to_uuid(self, obj_type, fq_name):
obj_uuid = self._cassandra_db.fq_name_to_uuid(obj_type, fq_name)
return obj_uuid
# end fq_name_to_uuid
def uuid_to_fq_name(self, obj_uuid):
return self._cassandra_db.uuid_to_fq_name(obj_uuid)
# end uuid_to_fq_name
def uuid_to_obj_type(self, obj_uuid):
return self._cassandra_db.uuid_to_obj_type(obj_uuid)
# end uuid_to_obj_type
def ifmap_id_to_fq_name(self, ifmap_id):
return self._ifmap_db.ifmap_id_to_fq_name(ifmap_id)
# end ifmap_id_to_fq_name
def fq_name_to_ifmap_id(self, obj_type, fq_name):
return self._ifmap_db.fq_name_to_ifmap_id(obj_type, fq_name)
# end fq_name_to_ifmap_id
def uuid_to_obj_dict(self, obj_uuid):
return self._cassandra_db.uuid_to_obj_dict(obj_uuid)
# end uuid_to_obj_dict
def uuid_to_obj_perms(self, obj_uuid):
return self._cassandra_db.uuid_to_obj_perms(obj_uuid)
# end uuid_to_obj_perms
def ref_update(self, obj_type, obj_uuid, ref_type, ref_uuid, ref_data, operation):
obj_uuid_cf = self._cassandra_db._obj_uuid_cf
bch = obj_uuid_cf.batch()
if operation == 'ADD':
self._cassandra_db._create_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid, ref_data)
elif operation == 'DELETE':
self._cassandra_db._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
else:
pass
self._cassandra_db.update_last_modified(bch, obj_uuid)
bch.send()
self._msgbus.dbe_update_publish(obj_type.replace('_', '-'), {'uuid':obj_uuid})
return obj_uuid
# ref_update
def get_resource_class(self, resource_type):
return self._api_svr_mgr.get_resource_class(resource_type)
# end get_resource_class
# Helper routines for REST
def generate_url(self, obj_type, obj_uuid):
return self._api_svr_mgr.generate_url(obj_type, obj_uuid)
# end generate_url
def config_object_error(self, id, fq_name_str, obj_type,
operation, err_str):
self._api_svr_mgr.config_object_error(
id, fq_name_str, obj_type, operation, err_str)
# end config_object_error
def get_server_port(self):
return self._api_svr_mgr.get_server_port()
# end get_server_port
# end class VncDbClient
| |
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from google.appengine.ext import db
from google.appengine.api import users
from soc.models.user import User
from soc.logic import accounts
from soc.logic import cleaning
from soc.logic import site
from soc.logic import user
from soc.models import user
from soc.models import group
from soc.models import school
from soc.models import student
from soc.models import organization
from tests.test_utils import GSoCDjangoTestCase
class Form(object):
"""A dummy form class for CleaningTest.
"""
def __init__(self):
"""Initialization.
"""
self.cleaned_data = {}
self._errors = {}
class CleaningTest(GSoCDjangoTestCase):
"""Tests related to cleaning logic.
"""
def setUp(self):
"""Set up required for the cleaning logic tests.
"""
self.init()
# Ensure that current user is created
user_properties = {
'account': users.get_current_user(),
'link_id': 'current_user',
'key_name': 'current_user',
'name': 'Current User',
}
self.user = User(**user_properties)
self.user.put()
# Create another user
another_user_properties = {
'account': users.User(email="another_user@email.com"),
'link_id': 'another_user',
'key_name': 'another_user',
'name': 'Another User',
}
self.another_user = User(**another_user_properties)
self.another_user.put()
# Create a dummy form object
self.form = Form()
def testCleanEmptyField(self):
"""Tests that empty field can be cleaned.
"""
field_name = 'field'
clean_field = cleaning.clean_empty_field(field_name)
# Test that the same value will be returned, the cleaned_data of form
# does not change and there is no error message if the value of field
# is not empty
field_value = 'v1_@?'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertEqual(clean_field(self.form), field_value)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
# Test that None will be returned, the cleaned_data of form does not change
# and there is no error message if the value of field is empty
field_value = ''
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertEqual(clean_field(self.form), None)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
def testCleanEmail(self):
"""Tests that an email is cleaned.
"""
field_name = 'email'
clean_field = cleaning.clean_email(field_name)
#Test that the same value is returned, the cleaned_data of the from does
#not change and there is no error message if the value of the field has a
#valid email
field_value = 'test@example.com'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertEqual(clean_field(self.form), field_value)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
#Test that forms.ValidationError is raised if email is not valid.
field_value = '#$test&*('
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertRaises(forms.ValidationError, clean_field, self.form)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
def testCleanLinkId(self):
"""Tests that link_id field can be cleaned.
"""
field_name = 'link_id'
clean_field = cleaning.clean_link_id(field_name)
# Test that the value will be returned, the cleaned_data of form does not
# change and there is no error message if the value of field has a valid
# link_id format
field_value = 'valid_link_id'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertEqual(clean_field(self.form), field_value)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
# Test that forms.ValidationError will be raised, the cleaned_data of form
# does not change and there is no error message if the value of field has
# not a valid link_id format
field_value = 'v1_@?'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertRaises(forms.ValidationError, clean_field, self.form)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
def testCleanScopePath(self):
"""Tests that scope_path field can be cleaned.
"""
field_name = 'scope_path'
clean_field = cleaning.clean_scope_path(field_name)
# Test that the value will be returned if the value of field
# has a valid scope_path format
field_value = 'valid_scope_path'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertEqual(clean_field(self.form), field_value)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
# Test that forms.ValidationError will be raised if the value of field
# has not a valid scope_path format
field_value = 'v1_@?'
cleaned_data_before = {field_name: field_value}
self.form.cleaned_data = cleaned_data_before.copy()
self.assertRaises(forms.ValidationError, clean_field, self.form)
self.assertEqual(self.form.cleaned_data, cleaned_data_before)
self.assertEqual(self.form._errors, {})
def testCleanExistingUser(self):
"""Tests that the user field can be cleaned for existing users.
"""
field_name = 'user'
clean_field = cleaning.clean_existing_user(field_name)
# Test that the user will be returned if the value of field
# is an existent user's link_id
field_value = self.user.link_id
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.link_id, self.user.link_id)
# Test that forms.ValidationError will be raised if the value of field
# is not an existent user's link_id
field_value = 'non_existent_user'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanUserIsCurrent(self):
"""Tests that the user field can be cleaned for current users.
"""
field_name = 'user'
clean_field = cleaning.clean_user_is_current(field_name)
# Test that the user will be returned if the value of field is
# an existent user's link_id
field_value = self.user.link_id
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.link_id, self.user.link_id)
# Test that forms.ValidationError will be raised if the value of field
# is a user's link_id other than the current user's
field_value = self.another_user.link_id
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that forms.ValidationError will be raised if the value of field
# is not an existent user's link_id
field_value = 'non_existent_user'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanUserNotExist(self):
"""Tests that the user field can be cleaned for non-existent users.
"""
field_name = 'user'
clean_field = cleaning.clean_user_not_exist(field_name)
# Test that the value will be returned if the value of field
# is not an existent user's link_id
field_value = 'non_existent_user'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), field_value)
# Test that forms.ValidationError will be raised if the value of field
# is an existent user's link_id
field_value = self.user.link_id
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanUsersNotSame(self):
"""Tests that the user field can be cleaned for non current users.
"""
field_name = 'not_current_user'
clean_field = cleaning.clean_users_not_same(field_name)
# Test that forms.ValidationError will be raised if the value of field
# is the current user's link_id
field_value = self.user.link_id
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that the user will be returned if the value of field is
# a user's link_id other than the current user
field_value = self.another_user.link_id
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form).link_id, self.another_user.link_id)
# Test that forms.ValidationError will be raised if the value of field
# is not an existent user's link_id
field_value = 'non_existent_user'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanUserAccount(self):
"""Test that user account can be cleaned.
"""
field_name = 'user_account'
clean_field = cleaning.clean_user_account(field_name)
# Test that a new account will be returned if the value of field is
# a valid new email address
field_value = 'user_name@email.com'
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.email(), field_value)
# Test that the existing account will be returned if the value of field is
# an existent user's email address
field_value = self.user.account.email()
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.email(), field_value)
self.assertEqual(cleaned_data_after, self.user.account)
# Test that a new account will be returned even if the value of field is
# an invalid email address
field_value = 'invalid_*mail'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form).email(), field_value)
def testCleanUserAccountNotInUse(self):
"""Tests that user account can be cleaned for non-existent user accounts.
"""
field_name = 'user_account_not_in_use'
clean_field = cleaning.clean_user_account_not_in_use(field_name)
# Test that a new account will be created and returned
# if the value of field is a valid new email address
field_value = 'user_name@email.com'
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.email(), field_value)
# Test that forms.ValidationError will be raised if the value of field is
# an existent user's email address
field_value = self.user.account.email()
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that a new account will be created and returned
# even if the value of field is an invalid email address
field_value = 'invalid_*mail'
self.form.cleaned_data = {field_name: field_value}
cleaned_data_after = clean_field(self.form)
self.assertEqual(cleaned_data_after.email(), field_value)
def testCleanValidShippingChars(self):
"""Tests that the shipping fields can be cleaned.
"""
field_name = 'ascii'
clean_field = cleaning.clean_valid_shipping_chars(field_name)
# Test that the value will be returned if the value of field is valid
field_value = 'ab12'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), field_value)
# Test that forms.ValidationError will be raised if the value of field
# is not valid ascii
field_value = u'\ua000'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanContentLength(self):
"""Tests that content length can be cleaned.
"""
field_name = 'content_length'
clean_field = cleaning.clean_content_length(field_name, 3, 5)
# Test that the value will be returned if the length of the value of field
# is within min_length and max_length
field_value = 'a1&'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), field_value)
# Test that forms.ValidationError will be raised if the length of the value
# of field is less than min_length
field_value = 'ab'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that forms.ValidationError will be raised if the length of the value
# of field is more than max_length
field_value = 'ab12&*'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanPhoneNumber(self):
"""Tests that phone number can be cleaned.
"""
field_name = 'phone'
clean_field = cleaning.clean_phone_number(field_name)
# Test that the phone number will be returned if it contains digits only
field_value = '0010208636479'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), field_value)
# Test that forms.ValidationError will be raised if
# the phone number contains non digits (except '+')
field_value = '001-020-8636479'
self.form.cleaned_data[field_name] = field_value
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that the '+' will be replaced with 00 and then the modified number
# will be returned if the phone number starts with a '+'
field_value = '+10208636479'
self.form.cleaned_data[field_name] = field_value
expected = '00' + field_value[1:]
self.assertEqual(clean_field(self.form), expected)
# Test that forms.ValidationError will be raised if
# a '+' is in the middle of the phone number
field_value = '1+0208636479'
self.form.cleaned_data[field_name] = field_value
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that forms.ValidationError will be raised if
# a '+' is at the end of the phone number
field_value = '10208636479+'
self.form.cleaned_data[field_name] = field_value
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanFeedUrl(self):
"""Tests that feed url can be cleaned.
Note: unlike other cleaning functions, it has not used a decorator.
So, the field name 'feed_url' is hardwired in the code.
"""
field_name = 'feed_url'
clean_field = cleaning.clean_feed_url(field_name)
# Test that the value of the feed url field will be returned if
# the value of the feed url field is an existent feed url
field_value = 'http://rss.cnn.com/rss/edition.rss'
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), field_value)
# Test that None will be returned if the value of the feed url field is
# an empty string
field_value = ''
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), None)
# Test that forms.ValidationError error will be raised if the value of
# the feed url field is not an existent feed url
field_value = 'http://example.com/invalidfeed/'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testCleanHtmlContent(self):
"""Tests that html content can be cleaned.
"""
field_name = 'html'
clean_field = cleaning.clean_html_content(field_name)
# Test that normal html can be cleaned
expected = html = '<div>f9-+@4</div>'
self.form.cleaned_data = {field_name: html}
self.assertEqual(clean_field(self.form), expected)
# Test that normal html can be cleaned
html = '<html>f9-+@4</html>'
self.form.cleaned_data = {field_name: html}
expected = html[6:-7]
self.assertEqual(clean_field(self.form), expected)
# Test that unicode is also supported
expected = html = u'\ua000'
self.form.cleaned_data = {field_name: html}
self.assertEqual(clean_field(self.form), expected)
# Test that input with scripts will raise forms.ValidationError
html = '<script></script>'
self.form.cleaned_data = {field_name: html}
self.assertRaises(forms.ValidationError, clean_field, self.form)
# Test that input can contain scripts when the current user is a developer
self.user.is_developer = True
self.user.put()
expected = html = '<script></script>'
self.form.cleaned_data = {field_name: html}
self.assertEqual(clean_field(self.form), expected)
def testCleanUrl(self):
"""Tests that url can be cleaned.
"""
field_name = 'url'
clean_field = cleaning.clean_url(field_name)
# Test that the value of the url field will be returned
# if it is a valid url
field_value = 'http://exampleabc.com/'
self.form.cleaned_data = {field_name: field_value}
self.form.fields = {field_name: forms.URLField()}
self.assertEqual(clean_field(self.form), field_value)
# Test that None will be returned if the value of the url field
# is an empty string
field_value = ''
self.form.cleaned_data = {field_name: field_value}
self.assertEqual(clean_field(self.form), None)
# Test that forms.ValidationError error will be raised
# if the value of the url field is not a valid url
field_value = 'exampleabc'
self.form.cleaned_data = {field_name: field_value}
self.assertRaises(forms.ValidationError, clean_field, self.form)
def testStr2Set(self):
"""Tests if symbol separated strings are cleaned.
"""
string_field = 'string_field'
clean_field = cleaning.str2set(string_field, separator=',')
string_field_value = "a,b,c"
cleaned_data_before = {string_field: string_field_value}
self.form.cleaned_data = cleaned_data_before
expected = string_field_value.split(',')
self.assertEqual(clean_field(self.form), expected)
string_field_value = "a"
cleaned_data_before = {string_field: string_field_value}
self.form.cleaned_data = cleaned_data_before
expected = string_field_value.split()
self.assertEqual(clean_field(self.form), expected)
string_field_value = "a b c"
clean_field = cleaning.str2set(string_field, separator=' ')
cleaned_data_before = {string_field: string_field_value}
self.form.cleaned_data = cleaned_data_before
expected = string_field_value.split()
self.assertEqual(clean_field(self.form), expected)
string_field_value = "a, b, c, a"
clean_field = cleaning.str2set(string_field, separator=',')
cleaned_data_before = {string_field: string_field_value}
self.form.cleaned_data = cleaned_data_before
temp = string_field_value.split(',')
expected = set([char.strip() for char in temp])
actual = set(clean_field(self.form))
self.assertEqual(expected, actual)
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for rally data.
"""
import uuid
from oslo_db.sqlalchemy.compat import utils as compat_utils
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import schema
from sqlalchemy import types
from rally.common.db.sqlalchemy import types as sa_types
from rally import consts
BASE = declarative_base()
def UUID():
return str(uuid.uuid4())
class RallyBase(models.ModelBase):
metadata = None
created_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow())
updated_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(),
onupdate=lambda: timeutils.utcnow())
def save(self, session=None):
# NOTE(LimingWu): We can't direct import the api module. that will
# result in the cyclic reference import since the api has imported
# this module.
from rally.common.db.sqlalchemy import api as sa_api
if session is None:
session = sa_api.get_session()
super(RallyBase, self).save(session=session)
class Deployment(BASE, RallyBase):
"""Represent a deployment of OpenStack."""
__tablename__ = "deployments"
__table_args__ = (
sa.Index("deployment_uuid", "uuid", unique=True),
sa.Index("deployment_parent_uuid", "parent_uuid"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
parent_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(uuid, use_alter=True, name="fk_parent_uuid"),
default=None,
)
name = sa.Column(sa.String(255), unique=True)
started_at = sa.Column(sa.DateTime)
completed_at = sa.Column(sa.DateTime)
# XXX(akscram): Do we need to explicitly store a name of the
# deployment engine?
# engine_name = sa.Column(sa.String(36))
config = sa.Column(
sa_types.MutableJSONEncodedDict,
default={},
nullable=False,
)
credentials = sa.Column(types.PickleType, default=[], nullable=False)
status = sa.Column(
sa.Enum(*consts.DeployStatus, name="enum_deploy_status"),
name="enum_deployments_status",
default=consts.DeployStatus.DEPLOY_INIT,
nullable=False,
)
parent = sa.orm.relationship(
"Deployment",
backref=sa.orm.backref("subdeploys"),
remote_side=[uuid],
foreign_keys=parent_uuid,
)
# TODO(rpromyshlennikov): remove admin after credentials refactoring
@property
def admin(self):
return self.credentials[0][1]["admin"]
@admin.setter
def admin(self, value):
pass
# TODO(rpromyshlennikov): remove users after credentials refactoring
@property
def users(self):
return self.credentials[0][1]["users"]
@users.setter
def users(self, value):
pass
class Resource(BASE, RallyBase):
"""Represent a resource of a deployment."""
__tablename__ = "resources"
__table_args__ = (
sa.Index("resource_deployment_uuid", "deployment_uuid"),
sa.Index("resource_provider_name", "deployment_uuid", "provider_name"),
sa.Index("resource_type", "deployment_uuid", "type"),
sa.Index("resource_provider_name_and_type", "deployment_uuid",
"provider_name", "type"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
provider_name = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
info = sa.Column(
sa_types.MutableJSONEncodedDict,
default={},
nullable=False,
)
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
deployment = sa.orm.relationship(
Deployment,
backref=sa.orm.backref("resources"),
foreign_keys=deployment_uuid,
primaryjoin=(deployment_uuid == Deployment.uuid),
)
class Task(BASE, RallyBase):
"""Represents a Benchmark task."""
__tablename__ = "tasks"
__table_args__ = (
sa.Index("task_uuid", "uuid", unique=True),
sa.Index("task_status", "status"),
sa.Index("task_deployment", "deployment_uuid"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
status = sa.Column(sa.Enum(*list(consts.TaskStatus),
name="enum_tasks_status"),
default=consts.TaskStatus.INIT,
nullable=False)
verification_log = sa.Column(sa.Text, default="")
tag = sa.Column(sa.String(64), default="")
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
deployment = sa.orm.relationship(
Deployment,
backref=sa.orm.backref("tasks"),
foreign_keys=deployment_uuid,
primaryjoin=(deployment_uuid == Deployment.uuid),
)
class TaskResult(BASE, RallyBase):
__tablename__ = "task_results"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
key = sa.Column(sa_types.MutableJSONEncodedDict, nullable=False)
data = sa.Column(sa_types.BigMutableJSONEncodedDict, nullable=False)
task_uuid = sa.Column(sa.String(36), sa.ForeignKey("tasks.uuid"))
task = sa.orm.relationship(Task,
backref=sa.orm.backref("results"),
foreign_keys=task_uuid,
primaryjoin="TaskResult.task_uuid == Task.uuid")
class Verification(BASE, RallyBase):
"""Represents a verifier result."""
__tablename__ = "verifications"
__table_args__ = (
sa.Index("verification_uuid", "uuid", unique=True),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
status = sa.Column(sa.Enum(*list(consts.TaskStatus),
name="enum_tasks_status"),
default=consts.TaskStatus.INIT,
nullable=False)
set_name = sa.Column(sa.String(20))
tests = sa.Column(sa.Integer, default=0)
# TODO(andreykurilin): remove this variable, when rally will support db
# migrations. Reason: It is not used anywhere :)
errors = sa.Column(sa.Integer, default=0)
failures = sa.Column(sa.Integer, default=0)
time = sa.Column(sa.Float, default=0.0)
class VerificationResult(BASE, RallyBase):
__tablename__ = "verification_results"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
verification_uuid = sa.Column(sa.String(36),
sa.ForeignKey("verifications.uuid"))
data = sa.Column(sa_types.BigMutableJSONEncodedDict, nullable=False)
class Worker(BASE, RallyBase):
__tablename__ = "workers"
__table_args__ = (
schema.UniqueConstraint("hostname", name="uniq_worker@hostname"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
hostname = sa.Column(sa.String(255))
# TODO(boris-42): Remove it after oslo.db > 1.4.1 will be released.
def drop_all_objects(engine):
"""Drop all database objects.
Drops all database objects remaining on the default schema of the given
engine. Per-db implementations will also need to drop items specific to
those systems, such as sequences, custom types (e.g. pg ENUM), etc.
"""
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
if engine.name == "postgresql":
if compat_utils.sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def drop_db():
# NOTE(LimingWu): We can't direct import the api module. that will
# result in the cyclic reference import since the api has imported
# this module.
from rally.common.db.sqlalchemy import api as sa_api
drop_all_objects(sa_api.get_engine())
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import tensorflow as tf
from tensorflow._api.v2 import v2 as tf_v2
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = 'tensorflow/tools/api/golden/v1'
_API_GOLDEN_FOLDER_V2 = 'tensorflow/tools/api/golden/v2'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
_NON_CORE_PACKAGES = ['estimator']
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
base_filename_without_ext)
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
f.rsplit('/')[-1].startswith(pre) for pre in filtered_package_prefixes
):
continue
filtered_file_list.append(f)
return filtered_file_list
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
print('Issue %d\t: %s' % (i + 1, messages[i]), file=sys.stderr)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf_v2.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'] = ['contrib']
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
@test_util.run_v1_only('b/120545219')
def testAPIBackwardsCompatibility(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']})
# Also check that V1 API has contrib
self.assertTrue(
'tensorflow.python.util.lazy_loader.LazyLoader'
in str(type(tf.contrib)))
@test_util.run_v1_only('b/120545219')
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(tf_v2.compat.v1, golden_file_pattern,
api_version)
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf_v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=False,
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import re
from textwrap import dedent
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'language', 'v1',
artman_output_name='google-cloud-ruby/google-cloud-language'
)
s.copy(v1_library / 'acceptance')
s.copy(v1_library / 'lib/google/cloud/language/v1')
s.copy(v1_library / 'lib/google/cloud/language/v1.rb')
s.copy(v1_library / 'lib/google/cloud/language/v1')
s.copy(v1_library / 'lib/google/cloud/language.rb')
s.copy(v1_library / 'test/google/cloud/language/v1')
s.copy(v1_library / 'README.md')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-language.gemspec', merge=ruby.merge_gemspec)
v1beta2_library = gapic.ruby_library(
'language', 'v1beta2',
artman_output_name='google-cloud-ruby/google-cloud-language'
)
s.copy(v1beta2_library / 'lib/google/cloud/language/v1beta2')
s.copy(v1beta2_library / 'lib/google/cloud/language/v1beta2.rb')
s.copy(v1beta2_library / 'lib/google/cloud/language/v1beta2')
s.copy(v1beta2_library / 'test/google/cloud/language/v1beta2')
# Copy common templates
templates = gcp.CommonTemplates().ruby_library()
s.copy(templates)
# Support for service_address
s.replace(
[
'lib/google/cloud/language.rb',
'lib/google/cloud/language/v*.rb',
'lib/google/cloud/language/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/language/v*.rb',
'lib/google/cloud/language/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/language/v*.rb',
'lib/google/cloud/language/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/language/v*/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/language/v*/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# https://github.com/googleapis/gapic-generator/issues/2196
s.replace(
[
'README.md',
'lib/google/cloud/language.rb',
'lib/google/cloud/language/v1.rb',
'lib/google/cloud/language/v1beta2.rb'
],
'\\[Product Documentation\\]: https://cloud\\.google\\.com/language\n',
'[Product Documentation]: https://cloud.google.com/natural-language\n')
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/language/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/gapic-generator/issues/2393
s.replace(
'google-cloud-language.gemspec',
'gem.add_development_dependency "rubocop".*$',
'gem.add_development_dependency "rubocop", "~> 0.64.0"'
)
s.replace(
'lib/google/cloud/language/**/credentials.rb',
'\n'.join([
'"https://www.googleapis.com/auth/cloud-language",',
' "https://www.googleapis.com/auth/cloud-platform"'
]),
'"https://www.googleapis.com/auth/cloud-platform"'
)
s.replace(
'google-cloud-language.gemspec',
'gem.add_dependency "google-gax", "~> 1\\.[\\d\\.]+"',
"\n".join([
'gem.add_dependency "google-gax", "~> 1.7"',
' gem.add_dependency "googleapis-common-protos", ">= 1.3.9", "< 2.0"'
])
)
s.replace(
'google-cloud-language.gemspec',
'"README.md", "LICENSE"',
'"README.md", "AUTHENTICATION.md", "LICENSE"'
)
s.replace(
'.yardopts',
'README.md\n',
'README.md\nAUTHENTICATION.md\nLICENSE\n'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'google-cloud-language.gemspec',
'\nGem::Specification.new do',
'require File.expand_path("../lib/google/cloud/language/version", __FILE__)\n\nGem::Specification.new do'
)
s.replace(
'google-cloud-language.gemspec',
'(gem.version\s+=\s+).\d+.\d+.\d.*$',
'\\1Google::Cloud::Language::VERSION'
)
for version in ['v1', 'v1beta2']:
s.replace(
f'lib/google/cloud/language/{version}/*_client.rb',
f'(require \".*credentials\"\n)\n',
f'\\1require "google/cloud/language/version"\n\n'
)
s.replace(
f'lib/google/cloud/language/{version}/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::Language::VERSION'
)
# Fix links for devsite migration
for file in ['lib/**/*.rb', '*.md']:
s.replace(
file,
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'*.md',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-language/latest/file.AUTHENTICATION.html'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-language/latest/file.AUTHENTICATION.html'
)
s.replace(
'README.md',
'github.io/google-cloud-ruby/#/docs/google-cloud-language/latest/.*$',
'dev/ruby/google-cloud-language/latest'
)
| |
from __future__ import division, print_function
from collections import defaultdict
import itertools
import operator
import re
import os
import subprocess
import tempfile
import warnings
import time
import sys
import functools
import array
try:
from scipy import sparse
except ImportError:
sparse = None
try:
import numpy as np
from .munkres import linear_assignment
except ImportError:
np = None
try: # Py3k
range = xrange
zip = itertools.izip
values = dict.viewvalues
keys = dict.viewkeys
except NameError:
values = dict.values
keys = dict.keys
# TODO: Blanc and standard clustering metrics (e.g. http://scikit-learn.org/stable/modules/clustering.html)
# TODO: cite originating papers
# XXX: perhaps use set (or list) of sets rather than dict of sets
######## Debug mode comparison to reference implementation ########
def _get_reference_coref_scorer_path():
path = os.environ.get('COREFSCORER', None)
if path is None:
return None
if os.path.isdir(path):
path = os.path.join(path, 'scorer.pl')
if not os.path.isfile(path):
warnings.warn('Not using coreference metric debug mode:'
'{} is not a file'.format(path))
return path
REFERENCE_COREF_SCORER_PATH = _get_reference_coref_scorer_path()
def _parse_reference_coref_scorer(output):
sections = output.split('\nMETRIC ')
if len(sections) > 1:
sections = sections[1:] # strip preamble
one_metric = False
else:
one_metric = True
res = {}
for section in sections:
match = re.match(r'''
.*
\s
Recall:\s
\(([^/]+)/([^)]+)\)
.*?
Precision:\s
\(([^/]+)/([^)]+)\)
''',
section,
re.DOTALL | re.VERBOSE)
r_num, r_den, p_num, p_den = map(float, match.groups())
stats = p_num, p_den, r_num, r_den
if one_metric:
return stats
else:
metric = section[:section.index(':')]
res[metric] = stats
return res
def _run_reference_coref_scorer(true, pred, metric='all'):
true_file = tempfile.NamedTemporaryFile(prefix='coreftrue', delete=False)
pred_file = tempfile.NamedTemporaryFile(prefix='corefpred', delete=False)
write_conll_coref(true, pred, true_file, pred_file)
true_file.close()
pred_file.close()
start = time.time()
output = subprocess.check_output([REFERENCE_COREF_SCORER_PATH,
metric, true_file.name,
pred_file.name])
their_time = time.time() - start
#print('Ran perl scorer', metric, 'in ', their_time, file=sys.stderr)
#print(output[-400:], file=sys.stderr)
os.unlink(true_file.name)
os.unlink(pred_file.name)
return _parse_reference_coref_scorer(output)
def _cross_check(metric):
"""A wrapper that will assert our output matches reference implementation
Applies only if the environment variable COREFSCORER points to the
reference implementation.
"""
def decorator(fn):
if REFERENCE_COREF_SCORER_PATH is None:
return fn
@functools.wraps(fn)
def wrapper(true, pred):
start = time.time()
our_results = fn(true, pred)
our_time = time.time() - start
#print('Ran our', metric, 'in ', our_time, file=sys.stderr)
ref_results = _prf(*_run_reference_coref_scorer(true, pred, metric))
for our_val, ref_val, name in zip(_prf(*our_results), ref_results, 'PRF'):
if abs(our_val - ref_val) > 1e-3:
msg = 'Our {} {}={}; reference {}={}'.format(metric,
name, our_val,
name, ref_val)
raise AssertionError(msg)
return our_results
return wrapper
return decorator
######## Utilities ########
def mapping_to_sets(mapping):
"""
Input: {cluster_item: cluster_name} dictionary
Output: {cluster_name: set([cluster_items])} dictionary
"""
s = defaultdict(set)
for m, k in mapping.items():
s[k].add(m)
s.default_factory = None # disable defaulting
return s
def sets_to_mapping(s):
"""
Input: {cluster_name: set([cluster_items])} dictionary
Output: {cluster_item: cluster_name} dictionary
"""
return {m: k for k, ms in s.items() for m in ms}
def read_conll_coref(f):
res = defaultdict(set)
# TODO: handle annotations over document boundary
i = 0
stack = []
for l in f:
if l.startswith('#'):
continue
l = l.split()
if not l:
assert not stack
continue
i += 1
tag = l[-1]
closed_here = []
for match in re.finditer(r'\(?[0-9]+\)?', tag):
match = match.group()
cid = match.strip('()')
if match.startswith('('):
stack.append((cid, i))
if match.endswith(')'):
start_cid, start = stack.pop()
assert start_cid == cid
closed_here.append((cid, start))
# keep only one mention of those with identical spans
for _, mentions in itertools.groupby(closed_here,
operator.itemgetter(1)):
cid, start = list(mentions)[-1] # keep the outermost
res[cid].add((start, i))
res.default_factory = None # disable defaulting
return res
def write_conll_coref(true, pred, true_file, pred_file):
"""Artificially aligns mentions as CoNLL coreference data
"""
# relabel clusters
true = {'({})'.format(i + 1): s for i, s in enumerate(values(true))}
pred = {'({})'.format(i + 1): s for i, s in enumerate(values(pred))}
# make lookups
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
# headers
print('#begin document (XX); part 000', file=true_file)
print('#begin document (XX); part 000', file=pred_file)
# print all mentions
for mention in set(true_mapping).union(pred_mapping):
print('XX', true_mapping.get(mention, '-'), file=true_file)
print('XX', pred_mapping.get(mention, '-'), file=pred_file)
# footers
print('#end document', file=true_file)
print('#end document', file=pred_file)
def _f1(a, b):
if a + b:
return 2 * a * b / (a + b)
return 0.
def _prf(p_num, p_den, r_num, r_den):
p = p_num / p_den if p_den > 0 else 0.
r = r_num / r_den if r_den > 0 else 0.
return p, r, _f1(p, r)
def twinless_adjustment(true, pred):
"""Adjusts predictions for differences in mentions
Following Cai and Strube's (SIGDIAL'10) `sys` variants on B-cubed and CEAF.
This produces a different true, pred pair for each of precision and recall
calculation.
Thus for precision:
* twinless true mentions -> pred singletons
* twinless pred singletons -> discard
* twinless pred non-singletons -> true singletons
For recall:
* twinless true -> pred singletons
* twinless pred -> discard
Returns : p_true, p_pred, r_true, r_pred
"""
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
# common: twinless true -> pred singletons
twinless_true = set(true_mapping) - set(pred_mapping)
for i, mention in enumerate(twinless_true):
pred_mapping[mention] = ('twinless_true', i)
# recall: twinless pred -> discard
r_pred = mapping_to_sets({m: k
for m, k in pred_mapping.items()
if m in true_mapping})
# precision: twinless pred singletons -> discard; non-singletons -> true
for i, (m, k) in enumerate(list(pred_mapping.items())):
if m in true_mapping:
continue
if len(pred[k]) > 1:
true_mapping[m] = ('twinless_pred', i)
else:
del pred_mapping[m]
p_true = mapping_to_sets(true_mapping)
p_pred = mapping_to_sets(pred_mapping)
return p_true, p_pred, true, r_pred
def sets_to_matrices(true, pred):
if sparse is None:
raise RuntimeError('Cannot vectorize without scipy')
# TODO: perhaps cache vectorized `true`
vocabulary = defaultdict(None)
vocabulary.default_factory = vocabulary.__len__
true_indptr = array.array('i', [0])
true_indices = array.array('i')
for true_cluster in values(true):
for item in true_cluster:
true_indices.append(vocabulary[item])
true_indptr.append(len(vocabulary))
pred_indptr = array.array('i', [0])
pred_indices = array.array('i')
for pred_cluster in values(pred):
for item in pred_cluster:
pred_indices.append(vocabulary[item])
pred_indptr.append(len(pred_indices))
true_data = np.ones(len(true_indices), dtype=int)
true_matrix = sparse.csr_matrix((true_data, true_indices, true_indptr),
shape=(len(true), len(vocabulary)))
pred_data = np.ones(len(pred_indices), dtype=int)
pred_matrix = sparse.csr_matrix((pred_data, pred_indices, pred_indptr),
shape=(len(pred), len(vocabulary)))
#true_matrix.check_format(full_check=True)
#pred_matrix.check_format(full_check=True)
return true_matrix, pred_matrix, vocabulary
######## Cluster comparison ########
def dice(a, b):
"""
"Entity-based" measure in CoNLL; #4 in CEAF paper
"""
if a and b:
return len(a & b) / (len(a) + len(b))
return 0.
def _vectorized_dice(true_matrix, pred_matrix):
overlap = _vectorized_overlap(true_matrix, pred_matrix).astype(float)
# The following should be no-ops
assert overlap.format == true_matrix.format == pred_matrix.format == 'csr'
true_sizes = np.diff(true_matrix.indptr)
pred_sizes = np.diff(pred_matrix.indptr)
denom = np.repeat(true_sizes, np.diff(overlap.indptr))
denom += pred_sizes.take(overlap.indices)
overlap.data /= denom
return overlap
dice.vectorized = _vectorized_dice
def overlap(a, b):
"""Intersection of sets
"Mention-based" measure in CoNLL; #3 in CEAF paper
"""
return len(a & b)
def _vectorized_overlap(true_matrix, pred_matrix):
return true_matrix * pred_matrix.T
overlap.vectorized = _vectorized_overlap
######## Coreference metrics ########
class OptionalDependencyWarning(Warning):
pass
def _disjoint_max_assignment(similarities):
global sparse
if sparse is None:
if hasattr(similarities, 'toarray'):
# Due to supporting scipy without connected_components
similarities = similarities.toarray()
start = time.time()
indices = linear_assignment(-similarities)
runtime = time.time() - start
if runtime > 1:
warnings.warn('The assignment step in CEAF took a long time. '
'We may be able to calculate it faster if you '
'install scipy.', OptionalDependencyWarning)
return similarities[indices[:, 0], indices[:, 1]].sum()
# form n*n adjacency matrix
where_true, where_pred = similarities.nonzero()
where_pred = where_pred + similarities.shape[0]
n = sum(similarities.shape)
A = sparse.coo_matrix((np.ones(len(where_true)), (where_true, where_pred)),
shape=(n, n))
try:
n_components, components = sparse.csgraph.connected_components(A, directed=False)
except (AttributeError, TypeError):
warnings.warn('Could not use scipy.sparse.csgraph.connected_components.'
'Please update your scipy installation. '
'Calculating max-score assignment the slow way.')
# HACK!
sparse = None
return _disjoint_max_assignment(similarities)
if hasattr(similarities, 'toarray'):
# faster to work in dense
similarities = similarities.toarray()
total = 0
for i in range(n_components):
mask = components == i
component_true = np.flatnonzero(mask[:similarities.shape[0]])
component_pred = np.flatnonzero(mask[similarities.shape[0]:])
component_sim = similarities[component_true, :][:, component_pred]
if component_sim.shape == (1, 1):
total += component_sim[0, 0]
else:
indices = linear_assignment(-component_sim)
total += component_sim[indices[:, 0], indices[:, 1]].sum()
#assert total == similarities[tuple(linear_assignment(-similarities).T)].sum()
return total
def ceaf(true, pred, similarity=dice):
"Luo (2005). On coreference resolution performance metrics. In EMNLP."
if np is None:
warnings.warn('numpy is required to calculate CEAF. '
'Returning 0', OptionalDependencyWarning)
return 0, 0, 0, 0
if sparse is None or not hasattr(similarity, 'vectorized'):
X = np.empty((len(true), len(pred)))
pred = list(values(pred))
for R, Xrow in zip(values(true), X):
Xrow[:] = [similarity(R, S) for S in pred]
p_num = r_num = _disjoint_max_assignment(X)
r_den = sum(similarity(R, R) for R in values(true))
p_den = sum(similarity(S, S) for S in pred)
else:
true, pred, _ = sets_to_matrices(true, pred)
X = similarity.vectorized(true, pred)
p_num = r_num = _disjoint_max_assignment(X)
r_den = similarity.vectorized(true, true).sum()
p_den = similarity.vectorized(pred, pred).sum()
return p_num, p_den, r_num, r_den
def cs_ceaf(true, pred, similarity=dice):
"""CEAF with twinless adjustment from Cai and Strube (2010)"""
p_true, p_pred, r_true, r_pred = twinless_adjustment(true, pred)
# XXX: there is probably a better way to do this
p_num, p_den, _, _ = ceaf(p_true, p_pred, similarity)
_, _, r_num, r_den = ceaf(r_true, r_pred, similarity)
return p_num, p_den, r_num, r_den
@_cross_check('ceafm')
def mention_ceaf(true, pred):
"Luo (2005) phi_3"
return ceaf(true, pred, similarity=overlap)
@_cross_check('ceafe')
def entity_ceaf(true, pred):
"Luo (2005) phi_4"
return ceaf(true, pred, similarity=dice)
def mention_cs_ceaf(true, pred):
return cs_ceaf(true, pred, similarity=overlap)
def entity_cs_ceaf(true, pred):
return cs_ceaf(true, pred, similarity=dice)
def _b_cubed(A, B, EMPTY=frozenset([])):
A_mapping = sets_to_mapping(A)
B_mapping = sets_to_mapping(B)
res = 0.
for m, k in A_mapping.items():
A_cluster = A.get(k, EMPTY)
res += len(A_cluster & B.get(B_mapping.get(m), EMPTY)) / len(A_cluster)
return res, len(A_mapping)
@_cross_check('bcub')
def b_cubed(true, pred):
"""
Bagga and Baldwin (1998). Algorithms for scoring coreference chains.
In LREC Linguistic Coreference Workshop.
TODO: tests
"""
p_num, p_den = _b_cubed(pred, true)
r_num, r_den = _b_cubed(true, pred)
return p_num, p_den, r_num, r_den
def cs_b_cubed(true, pred):
"""b_cubed with twinless adjustment from Cai and Strube (2010)"""
p_true, p_pred, r_true, r_pred = twinless_adjustment(true, pred)
p_num, p_den = _b_cubed(p_pred, p_true)
r_num, r_den = _b_cubed(r_true, r_pred)
return p_num, p_den, r_num, r_den
def _positive_pairs(C):
"Return pairs of instances across all clusters in C"
return frozenset(itertools.chain(
*[itertools.combinations(sorted(c), 2) for c in C]))
def _negative_pairs(C):
return frozenset(tuple(sorted(item_pair))
for cluster_pair in itertools.combinations(C, 2)
for item_pair in itertools.product(*cluster_pair))
def _pairwise(true, pred):
"""Return numerators and denominators for precision and recall,
as well as size of symmetric difference, used in negative pairwise."""
p_num = r_num = len(true & pred)
p_den = len(pred)
r_den = len(true)
return p_num, p_den, r_num, r_den
def pairwise(true, pred):
"""Return p_num, p_den, r_num, r_den over item pairs
As used in calcualting BLANC (see Luo, Pradhan, Recasens and Hovy (2014).
>>> pairwise({1: {'a', 'b', 'c'}, 2: {'d'}},
... {1: {'b', 'c'}, 2: {'d', 'e'}})
(1, 2, 1, 3)
"""
return _pairwise(_positive_pairs(values(true)),
_positive_pairs(values(pred)))
def _triangle(n):
return n * (n - 1) // 2
def pairwise_negative(true, pred):
"""Return p_num, p_den, r_num, r_den over noncoreferent item pairs
As used in calcualting BLANC (see Luo, Pradhan, Recasens and Hovy (2014).
>>> pairwise_negative({1: {'a', 'b', 'c'}, 2: {'d'}},
... {1: {'b', 'c'}, 2: {'d', 'e'}})
(2, 4, 2, 3)
"""
true_pairs = _positive_pairs(values(true))
pred_pairs = _positive_pairs(values(pred))
n_pos_agreements = len(true_pairs & pred_pairs)
true_mapping = sets_to_mapping(true)
pred_mapping = sets_to_mapping(pred)
extra_mentions = keys(true_mapping) ^ keys(pred_mapping)
disagreements = {p for p in true_pairs ^ pred_pairs
if p[0] not in extra_mentions
and p[1] not in extra_mentions}
n_common_mentions = len(keys(true_mapping) & keys(pred_mapping))
n_neg_agreements = (_triangle(n_common_mentions) - n_pos_agreements -
len(disagreements))
# Total number of negatives in each of pred and true:
p_den = _triangle(len(pred_mapping)) - len(pred_pairs)
r_den = _triangle(len(true_mapping)) - len(true_pairs)
return n_neg_agreements, p_den, n_neg_agreements, r_den
def _slow_pairwise_negative(true, pred):
"""For testing comparison"""
return _pairwise(_negative_pairs(values(true)),
_negative_pairs(values(pred)))
def _vilain(A, B_mapping):
numerator = 0
denominator = 0
for cluster in A.values():
corresponding = set()
n_unaligned = 0
for m in cluster:
if m not in B_mapping:
n_unaligned += 1
else:
corresponding.add(B_mapping[m])
numerator += len(cluster) - n_unaligned - len(corresponding)
denominator += len(cluster) - 1
return numerator, denominator
@_cross_check('muc')
def muc(true, pred):
"""The MUC evaluation metric defined in Vilain et al. (1995)
This calculates recall error for each true cluster C as the number of
response clusters that would need to be merged in order to produce a
superset of C.
"""
p_num, p_den = _vilain(pred, sets_to_mapping(true))
r_num, r_den = _vilain(true, sets_to_mapping(pred))
return p_num, p_den, r_num, r_den
if REFERENCE_COREF_SCORER_PATH is not None:
if _run_reference_coref_scorer({}, {}).get('bcub') != (0., 0., 0., 0.):
warnings.warn('Not using coreference metric debug mode:'
'The script is producing invalid output')
REFERENCE_COREF_SCORER_PATH = None
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser(description='CoNLL2011-2 coreference evaluator')
ap.add_argument('key_file', type=argparse.FileType('r'))
ap.add_argument('response_file', type=argparse.FileType('r'))
args = ap.parse_args()
METRICS = {
'bcubed': b_cubed,
'ceafe': entity_ceaf,
'ceafm': mention_ceaf,
'muc': muc,
'pairs': pairwise,
'negpairs': pairwise_negative,
}
key = read_conll_coref(args.key_file)
response = read_conll_coref(args.response_file)
print('Metric', 'P', 'R', 'F1', sep='\t')
for name, fn in sorted(METRICS.items()):
print(name, *('{:0.2f}'.format(100 * x) for x in _prf(*fn(key, response))), sep='\t')
| |
from functools import partial
import logging
import six
import simplejson as json
from bravado_core import schema
from bravado_core.content_type import APP_JSON
from bravado_core.exception import SwaggerMappingError
from bravado_core.marshal import marshal_schema_object
from bravado_core.unmarshal import unmarshal_schema_object
from bravado_core.validate import validate_schema_object
log = logging.getLogger(__name__)
# 'multi' left out intentionally - http client lib should handle it
COLLECTION_FORMATS = {
'csv': ',',
'ssv': ' ',
'tsv': '\t',
'pipes': '|'
}
def stringify_body(value):
"""Json dump the value to string if not already in string
"""
if not value or isinstance(value, six.string_types):
return value
return json.dumps(value)
class Param(object):
"""Thin wrapper around a param_spec dict that provides convenience functions
for commonly requested parameter information.
"""
def __init__(self, swagger_spec, op, param_spec):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:type op: :class:`bravado_core.operation.Operation`
:type param_spec: parameter specification in dict form
"""
self.op = op
self.swagger_spec = swagger_spec
self.param_spec = param_spec
@property
def name(self):
return self.param_spec['name']
@property
def location(self):
# not using 'in' as the name since it is a keyword in python
return self.param_spec['in']
@property
def description(self):
return self.param_spec.get('description', None)
@property
def required(self):
return self.param_spec.get('required', False)
def has_default(self):
return 'default' in self.param_spec
@property
def default(self):
return self.param_spec['default']
def get_param_type_spec(param):
"""
The spec for the parameter 'type' is not always in the same place for a
parameter. The notable exception is when the location is 'body' and the
schema for the type is in param_spec['schema']
:rtype: dict
:return: the param spec that contains 'type'
:raises: SwaggerMappingError when param location is not valid
"""
location = param.location
if location in ('path', 'query', 'header', 'formData'):
return param.param_spec
if location == 'body':
return param.param_spec['schema']
raise SwaggerMappingError(
"Don't know how to handle location {0}".format(location))
def marshal_param(param, value, request):
"""
Given an operation parameter and its value, marshal the value and place it
in the proper request destination.
Destination is one of:
- path - can accept primitive and array of primitive types
- query - can accept primitive and array of primitive types
- header - can accept primitive and array of primitive types
- body - can accept any type
- formData - can accept primitive and array of primitive types
:type param: :class:`bravado_core.param.Param`
:param value: The value to assign to the parameter
:type request: dict
"""
spec = get_param_type_spec(param)
location = param.location
value = marshal_schema_object(param.swagger_spec, spec, value)
if param.swagger_spec.config['validate_requests']:
validate_schema_object(spec, value)
if spec['type'] == 'array' and location != 'body':
value = marshal_collection_format(spec, value)
if location == 'path':
token = u'{%s}' % param.name
# Don't do any escaping/encoding - http_client will take care of it
request['url'] = request['url'].replace(token, six.text_type(value))
elif location == 'query':
request['params'][param.name] = value
elif location == 'header':
request['headers'][param.name] = value
elif location == 'formData':
if spec['type'] == 'file':
add_file(param, value, request)
else:
request.setdefault('data', {})[param.name] = value
elif location == 'body':
request['headers']['Content-Type'] = APP_JSON
request['data'] = json.dumps(value)
else:
raise SwaggerMappingError(
"Don't know how to marshal_param with location {0}".
format(location))
def unmarshal_param(param, request):
"""Unmarshal the given parameter from the passed in request like object.
:type param: :class:`bravado_core.param.Param`
:type request: :class:`bravado_core.request.RequestLike`
"""
param_spec = get_param_type_spec(param)
location = param.location
cast_param = partial(cast_request_param, param_spec['type'], param.name)
default_value = schema.get_default(param_spec)
if location == 'path':
raw_value = cast_param(request.path.get(param.name, None))
elif location == 'query':
raw_value = cast_param(request.query.get(param.name, default_value))
elif location == 'header':
raw_value = cast_param(request.headers.get(param.name, default_value))
elif location == 'formData':
if param_spec['type'] == 'file':
raw_value = request.files.get(param.name, None)
else:
raw_value = cast_param(request.form.get(param.name, default_value))
elif location == 'body':
# TODO: verify content-type header
raw_value = request.json()
else:
raise SwaggerMappingError(
"Don't know how to unmarshal_param with location {0}".
format(location))
if param_spec['type'] == 'array' and location != 'body':
raw_value = unmarshal_collection_format(param_spec, raw_value)
if param.swagger_spec.config['validate_requests']:
validate_schema_object(param_spec, raw_value)
value = unmarshal_schema_object(param.swagger_spec, param_spec, raw_value)
return value
CAST_TYPE_TO_FUNC = {
'integer': int,
'number': float,
'boolean': bool
}
def cast_request_param(param_type, param_name, param_value):
"""Try to cast a request param (e.g. query arg, POST data) from a string to
its specified type in the schema. This allows validating non-string params.
:param param_type: name of the type to be casted to
:type param_type: string
:param param_name: param name
:type param_name: string
:param param_value: param value
:type param_value: string
"""
if param_value is None:
return None
try:
return CAST_TYPE_TO_FUNC.get(param_type, lambda x: x)(param_value)
except ValueError:
log.warn("Failed to cast %s value of %s to %s",
param_name, param_value, param_type)
# Ignore type error, let jsonschema validation handle incorrect types
return param_value
def add_file(param, value, request):
"""Add a parameter of type 'file' to the given request.
:type param: :class;`bravado_core.param.Param`
:param value: The raw content of the file to be uploaded
:type request: dict
"""
if request.get('files') is None:
# support multiple files by default by setting to an empty array
request['files'] = []
# The http client should take care of setting the content-type header
# to 'multipart/form-data'. Just verify that the swagger spec is
# conformant
expected_mime_type = 'multipart/form-data'
# TODO: Remove after https://github.com/Yelp/swagger_spec_validator/issues/22 is implemented # noqa
if expected_mime_type not in param.op.consumes:
raise SwaggerMappingError((
"Mime-type '{0}' not found in list of supported mime-types for "
"parameter '{1}' on operation '{2}': {3}").format(
expected_mime_type,
param.name,
param.op.operation_id,
param.op.consumes
))
file_tuple = (param.name, (param.name, value))
request['files'].append(file_tuple)
def marshal_collection_format(spec, value):
"""
For an array, apply the collection format and return the result.
:param spec: spec of the parameter with 'type': 'array'
:param value: array value of the parmaeter
:return: transformed value as a string
"""
collection_format = spec.get('collectionFormat', 'csv')
if collection_format == 'multi':
# http client lib should handle this
return value
sep = COLLECTION_FORMATS[collection_format]
return sep.join(str(element) for element in value)
def unmarshal_collection_format(spec, value):
"""
For a non-body parameter of type array, unmarshal the value into an array
of elements.
Input:
spec = {
'name': 'status'
'in': 'query',
'collectionFormat': 'psv', # pipe separated value
'type': 'array',
'items': {
'type': 'string',
}
}
value="pending|completed|started"
Output:
['pending', 'completed', 'started']
:param spec: spec of the parameter with 'type': 'array'
:type spec: dict
:param value: parameter value
:type value: string
:rtype: list
"""
collection_format = spec.get('collectionFormat', 'csv')
if collection_format == 'multi':
# http client lib should have already unmarshaled to an array
return value
sep = COLLECTION_FORMATS[collection_format]
return [
cast_request_param(spec['items']['type'], spec['name'], item)
for item in value.split(sep)
]
| |
from pymeta.bootbase import BootBase as GrammarBase
import string
class BootOMetaGrammar(GrammarBase):
globals = globals()
def rule_hspace(self):
_locals = {'self': self}
self.locals['hspace'] = _locals
def _G_or_1():
_G_exactly_1, lastError = self.exactly(' ')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('\t')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_vspace(self):
_locals = {'self': self}
self.locals['vspace'] = _locals
def _G_or_1():
_G_match_string_1, lastError = self.match_string('\r\n')
self.considerError(lastError)
return (_G_match_string_1, self.currentError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('\r')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_or_3():
_G_exactly_1, lastError = self.exactly('\n')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_or_4, lastError = self._or([_G_or_1, _G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_emptyline(self):
_locals = {'self': self}
self.locals['emptyline'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_hspace, "hspace", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_vspace, "vspace", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
def rule_indentation(self):
_locals = {'self': self}
self.locals['indentation'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_emptyline, "emptyline", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
def _G_many1_3():
_G_apply_1, lastError = self._apply(self.rule_hspace, "hspace", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many1_4, lastError = self.many(_G_many1_3, _G_many1_3())
self.considerError(lastError)
return (_G_many1_4, self.currentError)
def rule_noindentation(self):
_locals = {'self': self}
self.locals['noindentation'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_emptyline, "emptyline", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
def _G_not_3():
_G_apply_1, lastError = self._apply(self.rule_hspace, "hspace", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_not_4, lastError = self._not(_G_not_3)
self.considerError(lastError)
return (_G_not_4, self.currentError)
def rule_number(self):
_locals = {'self': self}
self.locals['number'] = _locals
_G_apply_1, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('-')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError)
_locals['x'] = _G_apply_2
_G_python_3, lastError = eval('self.builder.exactly(-x)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_3():
_G_apply_1, lastError = self._apply(self.rule_barenumber, "barenumber", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
_G_python_2, lastError = eval('self.builder.exactly(x)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_barenumber(self):
_locals = {'self': self}
self.locals['barenumber'] = _locals
def _G_or_1():
_G_exactly_1, lastError = self.exactly('0')
self.considerError(lastError)
def _G_or_2():
def _G_or_1():
_G_exactly_1, lastError = self.exactly('x')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('X')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
def _G_many_4():
_G_apply_1, lastError = self._apply(self.rule_hexdigit, "hexdigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_5, lastError = self.many(_G_many_4)
self.considerError(lastError)
_locals['hs'] = _G_many_5
_G_python_6, lastError = eval("int(''.join(hs), 16)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_3():
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_octaldigit, "octaldigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_locals['ds'] = _G_many_2
_G_python_3, lastError = eval("int('0'+''.join(ds), 8)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def _G_or_2():
def _G_many1_1():
_G_apply_1, lastError = self._apply(self.rule_digit, "digit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many1_2, lastError = self.many(_G_many1_1, _G_many1_1())
self.considerError(lastError)
_locals['ds'] = _G_many1_2
_G_python_3, lastError = eval("int(''.join(ds))", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_octaldigit(self):
_locals = {'self': self}
self.locals['octaldigit'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_pred_2():
_G_python_1, lastError = eval('x in string.octdigits', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_3, lastError = self.pred(_G_pred_2)
self.considerError(lastError)
_G_python_4, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def rule_hexdigit(self):
_locals = {'self': self}
self.locals['hexdigit'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_pred_2():
_G_python_1, lastError = eval('x in string.hexdigits', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_3, lastError = self.pred(_G_pred_2)
self.considerError(lastError)
_G_python_4, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def rule_escapedChar(self):
_locals = {'self': self}
self.locals['escapedChar'] = _locals
_G_exactly_1, lastError = self.exactly('\\')
self.considerError(lastError)
def _G_or_2():
_G_exactly_1, lastError = self.exactly('n')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\n"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_3():
_G_exactly_1, lastError = self.exactly('r')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\r"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_4():
_G_exactly_1, lastError = self.exactly('t')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\t"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_5():
_G_exactly_1, lastError = self.exactly('b')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\b"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_6():
_G_exactly_1, lastError = self.exactly('f')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\f"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_7():
_G_exactly_1, lastError = self.exactly('"')
self.considerError(lastError)
_G_python_2, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_8():
_G_exactly_1, lastError = self.exactly("'")
self.considerError(lastError)
_G_python_2, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_9():
_G_exactly_1, lastError = self.exactly('\\')
self.considerError(lastError)
_G_python_2, lastError = eval('"\\\\"', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_10, lastError = self._or([_G_or_2, _G_or_3, _G_or_4, _G_or_5, _G_or_6, _G_or_7, _G_or_8, _G_or_9])
self.considerError(lastError)
return (_G_or_10, self.currentError)
def rule_character(self):
_locals = {'self': self}
self.locals['character'] = _locals
_G_python_1, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_many_3():
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
def _G_not_1():
_G_exactly_1, lastError = self.exactly("'")
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_not_2, lastError = self._not(_G_not_1)
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
_G_many_4, lastError = self.many(_G_many_3)
self.considerError(lastError)
_locals['c'] = _G_many_4
_G_python_5, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_6, lastError = self._apply(self.rule_token, "token", [_G_python_5])
self.considerError(lastError)
_G_python_7, lastError = eval("self.builder.exactly(''.join(c))", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def rule_character2(self):
_locals = {'self': self}
self.locals['character2'] = _locals
_G_python_1, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_consumed_by_3():
def _G_many_1():
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
def _G_not_1():
_G_exactly_1, lastError = self.exactly("'")
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_not_2, lastError = self._not(_G_not_1)
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
return (_G_many_2, self.currentError)
_G_consumed_by_4, lastError = self.consumed_by(_G_consumed_by_3)
self.considerError(lastError)
_locals['c'] = _G_consumed_by_4
_G_python_5, lastError = eval('"\'"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_6, lastError = self._apply(self.rule_token, "token", [_G_python_5])
self.considerError(lastError)
_G_python_7, lastError = eval('c', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def rule_range(self):
_locals = {'self': self}
self.locals['range'] = _locals
_G_apply_1, lastError = self._apply(self.rule_character2, "character2", [])
self.considerError(lastError)
_locals['c1'] = _G_apply_1
_G_python_2, lastError = eval('".."', self.globals, _locals), None
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_token, "token", [_G_python_2])
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_character2, "character2", [])
self.considerError(lastError)
_locals['c2'] = _G_apply_4
def _G_pred_5():
_G_python_1, lastError = eval('c1 < c2', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_6, lastError = self.pred(_G_pred_5)
self.considerError(lastError)
_G_python_7, lastError = eval('self.builder.range(c1, c2)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def rule_string(self):
_locals = {'self': self}
self.locals['string'] = _locals
_G_python_1, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_many_3():
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_escapedChar, "escapedChar", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
def _G_not_1():
_G_exactly_1, lastError = self.exactly('"')
self.considerError(lastError)
return (_G_exactly_1, self.currentError)
_G_not_2, lastError = self._not(_G_not_1)
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
_G_many_4, lastError = self.many(_G_many_3)
self.considerError(lastError)
_locals['c'] = _G_many_4
_G_python_5, lastError = eval('\'"\'', self.globals, _locals), None
self.considerError(lastError)
_G_apply_6, lastError = self._apply(self.rule_token, "token", [_G_python_5])
self.considerError(lastError)
_G_python_7, lastError = eval("self.builder.match_string(''.join(c))", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def rule_name(self):
_locals = {'self': self}
self.locals['name'] = _locals
_G_apply_1, lastError = self._apply(self.rule_letter, "letter", [])
self.considerError(lastError)
_locals['x'] = _G_apply_1
def _G_many_2():
_G_apply_1, lastError = self._apply(self.rule_letterOrDigit, "letterOrDigit", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_3, lastError = self.many(_G_many_2)
self.considerError(lastError)
_locals['xs'] = _G_many_3
_G_python_4, lastError = eval('xs.insert(0, x)', self.globals, _locals), None
self.considerError(lastError)
_G_python_5, lastError = eval("''.join(xs)", self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def rule_application(self):
_locals = {'self': self}
self.locals['application'] = _locals
def _G_optional_1():
_G_apply_1, lastError = self._apply(self.rule_indentation, "indentation", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_optional_2():
return (None, self.input.nullError())
_G_or_3, lastError = self._or([_G_optional_1, _G_optional_2])
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['name'] = _G_apply_4
def _G_or_5():
_G_exactly_1, lastError = self.exactly('(')
self.considerError(lastError)
_G_python_2, lastError = eval('self.applicationArgs()', self.globals, _locals), None
self.considerError(lastError)
_locals['args'] = _G_python_2
_G_python_3, lastError = eval('self.builder.apply(name, self.name, *args)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_6():
_G_python_1, lastError = eval('self.builder.apply(name, self.name)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_7, lastError = self._or([_G_or_5, _G_or_6])
self.considerError(lastError)
return (_G_or_7, self.currentError)
def rule_expr1(self):
_locals = {'self': self}
self.locals['expr1'] = _locals
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_application, "application", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_2():
_G_apply_1, lastError = self._apply(self.rule_ruleValue, "ruleValue", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_3():
_G_apply_1, lastError = self._apply(self.rule_semanticPredicate, "semanticPredicate", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_4():
_G_apply_1, lastError = self._apply(self.rule_semanticAction, "semanticAction", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_5():
_G_apply_1, lastError = self._apply(self.rule_number, "number", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_6():
_G_apply_1, lastError = self._apply(self.rule_range, "range", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_7():
_G_apply_1, lastError = self._apply(self.rule_character, "character", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_8():
_G_apply_1, lastError = self._apply(self.rule_string, "string", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
def _G_or_9():
_G_python_1, lastError = eval("'('", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("')'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_10():
_G_python_1, lastError = eval("'['", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("']'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder.listpattern(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_11():
_G_python_1, lastError = eval("'<'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("'>'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder.consumedby(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_12():
_G_python_1, lastError = eval("'@<'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval("'>'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_token, "token", [_G_python_4])
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder.index_consumedby(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
_G_or_13, lastError = self._or([_G_or_1, _G_or_2, _G_or_3, _G_or_4, _G_or_5, _G_or_6, _G_or_7, _G_or_8, _G_or_9, _G_or_10, _G_or_11, _G_or_12])
self.considerError(lastError)
return (_G_or_13, self.currentError)
def rule_expr2(self):
_locals = {'self': self}
self.locals['expr2'] = _locals
def _G_or_1():
_G_python_1, lastError = eval("'~'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
def _G_or_3():
_G_python_1, lastError = eval("'~'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.lookahead(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def _G_or_4():
_G_apply_1, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
_G_python_2, lastError = eval('self.builder._not(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_5, lastError = self._or([_G_or_3, _G_or_4])
self.considerError(lastError)
return (_G_or_5, self.currentError)
def _G_or_2():
_G_apply_1, lastError = self._apply(self.rule_expr1, "expr1", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_expr3(self):
_locals = {'self': self}
self.locals['expr3'] = _locals
def _G_or_1():
_G_apply_1, lastError = self._apply(self.rule_expr2, "expr2", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
def _G_or_2():
_G_exactly_1, lastError = self.exactly('*')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.many(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_3():
_G_exactly_1, lastError = self.exactly('+')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.many1(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_4():
_G_exactly_1, lastError = self.exactly('?')
self.considerError(lastError)
_G_python_2, lastError = eval('self.builder.optional(e)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
def _G_or_5():
_G_python_1, lastError = eval('e', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_6, lastError = self._or([_G_or_2, _G_or_3, _G_or_4, _G_or_5])
self.considerError(lastError)
_locals['r'] = _G_or_6
def _G_or_7():
_G_exactly_1, lastError = self.exactly(':')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_2
_G_python_3, lastError = eval('self.builder.bind(r, n)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_8():
_G_python_1, lastError = eval('r', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_9, lastError = self._or([_G_or_7, _G_or_8])
self.considerError(lastError)
return (_G_or_9, self.currentError)
def _G_or_2():
_G_python_1, lastError = eval("':'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.bind(self.builder.apply("anything", self.name), n)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_expr4(self):
_locals = {'self': self}
self.locals['expr4'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['ne'] = _G_apply_1
def _G_or_2():
def _G_pred_1():
_G_python_1, lastError = eval('ne', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_2, lastError = self.pred(_G_pred_1)
self.considerError(lastError)
def _G_many1_3():
_G_apply_1, lastError = self._apply(self.rule_expr3, "expr3", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many1_4, lastError = self.many(_G_many1_3, _G_many1_3())
self.considerError(lastError)
_locals['es'] = _G_many1_4
_G_python_5, lastError = eval('self.builder.sequence(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def _G_or_3():
def _G_pred_1():
_G_python_1, lastError = eval('not ne', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_2, lastError = self.pred(_G_pred_1)
self.considerError(lastError)
def _G_many_3():
_G_apply_1, lastError = self._apply(self.rule_expr3, "expr3", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_4, lastError = self.many(_G_many_3)
self.considerError(lastError)
_locals['es'] = _G_many_4
_G_python_5, lastError = eval('self.builder.sequence(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_expr5(self):
_locals = {'self': self}
self.locals['expr5'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['ne'] = _G_apply_1
def _G_or_2():
_G_apply_1, lastError = self._apply(self.rule_interleavePart, "interleavePart", [])
self.considerError(lastError)
_locals['e'] = _G_apply_1
def _G_many1_2():
_G_python_1, lastError = eval('"&&"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_interleavePart, "interleavePart", [])
self.considerError(lastError)
return (_G_apply_3, self.currentError)
_G_many1_3, lastError = self.many(_G_many1_2, _G_many1_2())
self.considerError(lastError)
_locals['es'] = _G_many1_3
_G_python_4, lastError = eval('es.insert(0, e)', self.globals, _locals), None
self.considerError(lastError)
_G_python_5, lastError = eval('self.builder.interleave(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def _G_or_3():
_G_python_1, lastError = eval('ne', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_expr4, "expr4", [_G_python_1])
self.considerError(lastError)
return (_G_apply_2, self.currentError)
_G_or_4, lastError = self._or([_G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_interleavePart(self):
_locals = {'self': self}
self.locals['interleavePart'] = _locals
def _G_or_1():
_G_python_1, lastError = eval('"("', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_expr4, "expr4", [_G_python_3])
self.considerError(lastError)
_locals['e'] = _G_apply_4
_G_python_5, lastError = eval('")"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_6, lastError = self._apply(self.rule_token, "token", [_G_python_5])
self.considerError(lastError)
_G_python_7, lastError = eval('["1", e]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_7, self.currentError)
def _G_or_2():
_G_python_1, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_expr4, "expr4", [_G_python_1])
self.considerError(lastError)
_locals['part'] = _G_apply_2
_G_python_3, lastError = eval('part', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_modedIPart, "modedIPart", [_G_python_3])
self.considerError(lastError)
_locals['x'] = _G_apply_4
_G_python_5, lastError = eval('x', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
_G_or_3, lastError = self._or([_G_or_1, _G_or_2])
self.considerError(lastError)
return (_G_or_3, self.currentError)
def rule_modedIPart(self):
_locals = {'self': self}
self.locals['modedIPart'] = _locals
def _G_or_1():
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('Many')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_2
return (_locals['part'], self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_G_python_3, lastError = eval('["*", part, None]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_2():
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('Many1')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_2
return (_locals['part'], self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_G_python_3, lastError = eval('["+", part, None]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_3():
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('Optional')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_2
return (_locals['part'], self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_G_python_3, lastError = eval('["?", part, None]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_4():
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('Bind')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['name'] = _G_apply_2
_G_apply_3, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_3
return (_locals['part'], self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_locals['e'] = _G_listpattern_2
_G_python_3, lastError = eval('part', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_modedIPart, "modedIPart", [_G_python_3])
self.considerError(lastError)
_locals['newpart'] = _G_apply_4
_G_python_5, lastError = eval('newpart[:2] + [name]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def _G_or_5():
def _G_listpattern_1():
_G_exactly_1, lastError = self.exactly('And')
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_2
return (_locals['part'], self.currentError)
_G_listpattern_2, lastError = self.listpattern(_G_listpattern_1)
self.considerError(lastError)
_G_python_3, lastError = eval('part', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_modedIPart, "modedIPart", [_G_python_3])
self.considerError(lastError)
_locals['newpart'] = _G_apply_4
_G_python_5, lastError = eval('newpart', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_5, self.currentError)
def _G_or_6():
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['part'] = _G_apply_1
_G_python_2, lastError = eval('["1", part, None]', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_2, self.currentError)
_G_or_7, lastError = self._or([_G_or_1, _G_or_2, _G_or_3, _G_or_4, _G_or_5, _G_or_6])
self.considerError(lastError)
return (_G_or_7, self.currentError)
def rule_expr(self):
_locals = {'self': self}
self.locals['expr'] = _locals
def _G_or_1():
_G_python_1, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_1])
self.considerError(lastError)
_locals['e'] = _G_apply_2
def _G_many1_3():
_G_python_1, lastError = eval("'|'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_3])
self.considerError(lastError)
return (_G_apply_4, self.currentError)
_G_many1_4, lastError = self.many(_G_many1_3, _G_many1_3())
self.considerError(lastError)
_locals['es'] = _G_many1_4
_G_python_5, lastError = eval('es.insert(0, e)', self.globals, _locals), None
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder._or(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_2():
_G_python_1, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_1])
self.considerError(lastError)
_locals['e'] = _G_apply_2
def _G_many1_3():
_G_python_1, lastError = eval("'||'", self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('True', self.globals, _locals), None
self.considerError(lastError)
_G_apply_4, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_3])
self.considerError(lastError)
return (_G_apply_4, self.currentError)
_G_many1_4, lastError = self.many(_G_many1_3, _G_many1_3())
self.considerError(lastError)
_locals['es'] = _G_many1_4
_G_python_5, lastError = eval('es.insert(0, e)', self.globals, _locals), None
self.considerError(lastError)
_G_python_6, lastError = eval('self.builder._xor(es)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_6, self.currentError)
def _G_or_3():
_G_python_1, lastError = eval('False', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_1])
self.considerError(lastError)
return (_G_apply_2, self.currentError)
_G_or_4, lastError = self._or([_G_or_1, _G_or_2, _G_or_3])
self.considerError(lastError)
return (_G_or_4, self.currentError)
def rule_ruleValue(self):
_locals = {'self': self}
self.locals['ruleValue'] = _locals
_G_python_1, lastError = eval('"->"', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.ruleValueExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_semanticPredicate(self):
_locals = {'self': self}
self.locals['semanticPredicate'] = _locals
_G_python_1, lastError = eval('"?("', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.semanticPredicateExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_semanticAction(self):
_locals = {'self': self}
self.locals['semanticAction'] = _locals
_G_python_1, lastError = eval('"!("', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_python_3, lastError = eval('self.semanticActionExpr()', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def rule_rulePart(self):
_locals = {'self': self}
self.locals['rulePart'] = _locals
_G_apply_1, lastError = self._apply(self.rule_anything, "anything", [])
self.considerError(lastError)
_locals['requiredName'] = _G_apply_1
_G_apply_2, lastError = self._apply(self.rule_noindentation, "noindentation", [])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_3
def _G_pred_4():
_G_python_1, lastError = eval('n == requiredName', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_pred_5, lastError = self.pred(_G_pred_4)
self.considerError(lastError)
_G_python_6, lastError = eval('setattr(self, "name", n)', self.globals, _locals), None
self.considerError(lastError)
_G_python_7, lastError = eval('False', self.globals, _locals), None
self.considerError(lastError)
_G_apply_8, lastError = self._apply(self.rule_expr5, "expr5", [_G_python_7])
self.considerError(lastError)
_locals['args'] = _G_apply_8
def _G_or_9():
_G_python_1, lastError = eval('"="', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_token, "token", [_G_python_1])
self.considerError(lastError)
_G_apply_3, lastError = self._apply(self.rule_expr, "expr", [])
self.considerError(lastError)
_locals['e'] = _G_apply_3
_G_python_4, lastError = eval('self.builder.sequence([args, e])', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
def _G_or_10():
_G_python_1, lastError = eval('args', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_11, lastError = self._or([_G_or_9, _G_or_10])
self.considerError(lastError)
return (_G_or_11, self.currentError)
def rule_rule(self):
_locals = {'self': self}
self.locals['rule'] = _locals
_G_apply_1, lastError = self._apply(self.rule_noindentation, "noindentation", [])
self.considerError(lastError)
def _G_lookahead_2():
_G_apply_1, lastError = self._apply(self.rule_name, "name", [])
self.considerError(lastError)
_locals['n'] = _G_apply_1
return (_locals['n'], self.currentError)
_G_lookahead_3, lastError = self.lookahead(_G_lookahead_2)
self.considerError(lastError)
_G_python_4, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError)
_G_apply_5, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_4])
self.considerError(lastError)
_locals['r'] = _G_apply_5
def _G_or_6():
def _G_many1_1():
_G_python_1, lastError = eval('n', self.globals, _locals), None
self.considerError(lastError)
_G_apply_2, lastError = self._apply(self.rule_rulePart, "rulePart", [_G_python_1])
self.considerError(lastError)
return (_G_apply_2, self.currentError)
_G_many1_2, lastError = self.many(_G_many1_1, _G_many1_1())
self.considerError(lastError)
_locals['rs'] = _G_many1_2
_G_python_3, lastError = eval('self.builder.rule(n, self.builder._or([r] + rs))', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_3, self.currentError)
def _G_or_7():
_G_python_1, lastError = eval('self.builder.rule(n, r)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_1, self.currentError)
_G_or_8, lastError = self._or([_G_or_6, _G_or_7])
self.considerError(lastError)
return (_G_or_8, self.currentError)
def rule_grammar(self):
_locals = {'self': self}
self.locals['grammar'] = _locals
def _G_many_1():
_G_apply_1, lastError = self._apply(self.rule_rule, "rule", [])
self.considerError(lastError)
return (_G_apply_1, self.currentError)
_G_many_2, lastError = self.many(_G_many_1)
self.considerError(lastError)
_locals['rs'] = _G_many_2
_G_apply_3, lastError = self._apply(self.rule_spaces, "spaces", [])
self.considerError(lastError)
_G_python_4, lastError = eval('self.builder.makeGrammar(rs)', self.globals, _locals), None
self.considerError(lastError)
return (_G_python_4, self.currentError)
| |
from jsonrpc import ServerProxy as ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = "gwangcoinrpc"
rpcpass = "BFZmHDbXwyjrwZeoHNJqLYPwS4LABVT8L84UzBNxAFWF"
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9776")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9776")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Gwangcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Gwangcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
"""The tests for Tasmota device triggers."""
import copy
import json
from unittest.mock import Mock, patch
from hatasmota.switch import TasmotaSwitchTriggerConfig
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.tasmota import _LOGGER
from homeassistant.components.tasmota.const import DEFAULT_PREFIX, DOMAIN
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.trigger import async_initialize_triggers
from homeassistant.setup import async_setup_component
from .test_common import DEFAULT_CONFIG
from tests.common import (
assert_lists_same,
async_fire_mqtt_message,
async_get_device_automations,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
async def test_get_triggers_btn(hass, device_reg, entity_reg, mqtt_mock, setup_tasmota):
"""Test we get the expected triggers from a discovered mqtt device."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["btn"][0] = 1
config["btn"][1] = 1
config["so"]["13"] = 1
config["so"]["73"] = 1
mac = config["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_button_1_SINGLE",
"type": "button_short_press",
"subtype": "button_1",
},
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_button_2_SINGLE",
"type": "button_short_press",
"subtype": "button_2",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_get_triggers_swc(hass, device_reg, entity_reg, mqtt_mock, setup_tasmota):
"""Test we get the expected triggers from a discovered mqtt device."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_get_unknown_triggers(
hass, device_reg, entity_reg, mqtt_mock, setup_tasmota
):
"""Test we don't get unknown triggers."""
# Discover a device without device triggers
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = -1
mac = config["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_0_2",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
async def test_get_non_existing_triggers(
hass, device_reg, entity_reg, mqtt_mock, setup_tasmota
):
"""Test getting non existing triggers."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
@pytest.mark.no_fail_on_log_exception
async def test_discover_bad_triggers(
hass, device_reg, entity_reg, mqtt_mock, setup_tasmota
):
"""Test exception handling when discovering trigger."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
# Trigger an exception when the entity is discovered
with patch(
"hatasmota.discovery.get_switch_triggers",
return_value=[object()],
):
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config)
)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
# Trigger an exception when the entity is discovered
class FakeTrigger(TasmotaSwitchTriggerConfig):
"""Bad TasmotaSwitchTriggerConfig to cause exceptions."""
@property
def is_active(self):
return True
with patch(
"hatasmota.discovery.get_switch_triggers",
return_value=[
FakeTrigger(
event=None,
idx=1,
mac=None,
source=None,
subtype=None,
switchname=None,
trigger_topic=None,
type=None,
)
],
):
async_fire_mqtt_message(
hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config)
)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
# Rediscover without exception
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_update_remove_triggers(
hass, device_reg, entity_reg, mqtt_mock, setup_tasmota
):
"""Test triggers can be updated and removed."""
# Discover a device with toggle + hold trigger
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 5
mac = config1["mac"]
# Discover a device with toggle + double press trigger
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["swc"][0] = 8
# Discover a device with no trigger
config3 = copy.deepcopy(DEFAULT_CONFIG)
config3["swc"][0] = -1
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
expected_triggers1 = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_HOLD",
"type": "button_long_press",
"subtype": "switch_1",
},
]
expected_triggers2 = copy.deepcopy(expected_triggers1)
expected_triggers2[1]["type"] = "button_double_press"
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for expected in expected_triggers1:
assert expected in triggers
# Update trigger
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
for expected in expected_triggers2:
assert expected in triggers
# Remove trigger
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config3))
await hass.async_block_till_done()
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers == []
async def test_if_fires_on_mqtt_message_btn(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test button triggers firing."""
# Discover a device with 2 device triggers
config = copy.deepcopy(DEFAULT_CONFIG)
config["btn"][0] = 1
config["btn"][2] = 1
config["so"]["73"] = 1
mac = config["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_button_1_SINGLE",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press_1")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_button_3_SINGLE",
"subtype": "button_3",
"type": "button_short_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press_3")},
},
},
]
},
)
# Fake button 1 single press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Button1":{"Action":"SINGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press_1"
# Fake button 3 single press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Button3":{"Action":"SINGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "short_press_3"
async def test_if_fires_on_mqtt_message_swc(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test switch triggers firing."""
# Discover a device with 2 device triggers
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
config["swc"][1] = 0
config["swc"][2] = 9
config["swn"][2] = "custom_switch"
mac = config["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press_1")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_2_TOGGLE",
"type": "button_short_press",
"subtype": "switch_2",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press_2")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_3_HOLD",
"subtype": "switch_3",
"type": "button_double_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press_3")},
},
},
]
},
)
# Fake switch 1 short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press_1"
# Fake switch 2 short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch2":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "short_press_2"
# Fake switch 3 long press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"custom_switch":{"Action":"HOLD"}}'
)
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "long_press_3"
async def test_if_fires_on_mqtt_message_late_discover(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test triggers firing of MQTT device triggers discovered after setup."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
# Discover a device with 2 device triggers
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["swc"][0] = 0
config2["swc"][3] = 9
config2["swn"][3] = "custom_switch"
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_4_HOLD",
"type": "switch_4",
"subtype": "button_double_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("double_press")},
},
},
]
},
)
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"custom_switch":{"Action":"HOLD"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "double_press"
async def test_if_fires_on_mqtt_message_after_update(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test triggers firing after update."""
# Discover a device with device trigger
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 0
config2["swc"][0] = 0
config2["tp"][1] = "status"
mac = config1["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
# Update the trigger with different topic
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_mqtt_message(
hass, "tasmota_49A3BC/status/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
# Update the trigger with same topic
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
async_fire_mqtt_message(
hass, "tasmota_49A3BC/status/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 3
async def test_no_resubscribe_same_topic(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test subscription to topics without change."""
# Discover a device with device trigger
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
call_count = mqtt_mock.async_subscribe.call_count
assert call_count == 1
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
assert mqtt_mock.async_subscribe.call_count == call_count
async def test_not_fires_on_mqtt_message_after_remove_by_mqtt(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test triggers not firing after removal."""
# Discover a device with device trigger
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the trigger
config["swc"][0] = -1
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
# Rediscover the trigger
config["swc"][0] = 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_not_fires_on_mqtt_message_after_remove_from_registry(
hass, device_reg, calls, mqtt_mock, setup_tasmota
):
"""Test triggers not firing after removal."""
# Discover a device with device trigger
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the device
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test attach and removal of trigger."""
# Discover a device with device trigger
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
calls = []
def callback(trigger, context):
calls.append(trigger["trigger"]["description"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "event 'tasmota_event'"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test attach and removal of trigger."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
# Discover a device with device triggers
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["swc"][0] = 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
calls = []
def callback(trigger, context):
calls.append(trigger["trigger"]["description"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "event 'tasmota_event'"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late2(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test attach and removal of trigger."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
# Discover a device with device triggers
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["swc"][0] = 0
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
calls = []
def callback(trigger, context):
calls.append(trigger["trigger"]["description"])
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Remove the trigger
remove()
await hass.async_block_till_done()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
# Verify the triggers is not active
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_attach_remove_unknown1(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test attach and removal of unknown trigger."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
remove = await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
Mock(),
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Remove the trigger
remove()
await hass.async_block_till_done()
async def test_attach_unknown_remove_device_from_registry(
hass, device_reg, mqtt_mock, setup_tasmota
):
"""Test attach and removal of device with unknown trigger."""
# Discover a device without device triggers
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = -1
mac = config1["mac"]
# Discover a device with device triggers
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["swc"][0] = 0
# Discovery a device with device triggers to load Tasmota device trigger integration
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config2))
await hass.async_block_till_done()
# Forget the trigger
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config1))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
Mock(),
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Remove the device
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
async def test_attach_remove_config_entry(hass, device_reg, mqtt_mock, setup_tasmota):
"""Test trigger cleanup when removing a Tasmota config entry."""
# Discover a device with device trigger
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 0
mac = config["mac"]
mqtt_mock.async_subscribe.reset_mock()
async_fire_mqtt_message(hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config))
await hass.async_block_till_done()
device_entry = device_reg.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, mac)}
)
calls = []
def callback(trigger, context):
calls.append(trigger["trigger"]["description"])
await async_initialize_triggers(
hass,
[
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "00000049A3BC_switch_1_TOGGLE",
"type": "button_short_press",
"subtype": "switch_1",
},
],
callback,
DOMAIN,
"mock-name",
_LOGGER.log,
)
# Fake short press.
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "event 'tasmota_event'"
# Remove the Tasmota config entry
config_entries = hass.config_entries.async_entries("tasmota")
await hass.config_entries.async_remove(config_entries[0].entry_id)
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"TOGGLE"}}'
)
await hass.async_block_till_done()
assert len(calls) == 1
| |
"""
homeassistant.components.http
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
For more details about the RESTful API, please refer to the documentation at
https://home-assistant.io/developers/api/
"""
from datetime import timedelta
import gzip
from http import cookies
from http.server import SimpleHTTPRequestHandler, HTTPServer
import json
import logging
import os
from socketserver import ThreadingMixIn
import ssl
import threading
import time
from urllib.parse import urlparse, parse_qs
import homeassistant.core as ha
from homeassistant.const import (
SERVER_PORT, CONTENT_TYPE_JSON, CONTENT_TYPE_TEXT_PLAIN,
HTTP_HEADER_HA_AUTH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_ACCEPT_ENCODING,
HTTP_HEADER_CONTENT_ENCODING, HTTP_HEADER_VARY, HTTP_HEADER_CONTENT_LENGTH,
HTTP_HEADER_CACHE_CONTROL, HTTP_HEADER_EXPIRES, HTTP_OK, HTTP_UNAUTHORIZED,
HTTP_NOT_FOUND, HTTP_METHOD_NOT_ALLOWED, HTTP_UNPROCESSABLE_ENTITY)
import homeassistant.remote as rem
import homeassistant.util as util
import homeassistant.util.dt as date_util
import homeassistant.bootstrap as bootstrap
DOMAIN = "http"
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
DATA_API_PASSWORD = 'api_password'
# Throttling time in seconds for expired sessions check
SESSION_CLEAR_INTERVAL = timedelta(seconds=20)
SESSION_TIMEOUT_SECONDS = 1800
SESSION_KEY = 'sessionId'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Sets up the HTTP API and debug interface. """
conf = config.get(DOMAIN, {})
api_password = util.convert(conf.get(CONF_API_PASSWORD), str)
# If no server host is given, accept all incoming requests
server_host = conf.get(CONF_SERVER_HOST, '0.0.0.0')
server_port = conf.get(CONF_SERVER_PORT, SERVER_PORT)
development = str(conf.get(CONF_DEVELOPMENT, "")) == "1"
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
try:
server = HomeAssistantHTTPServer(
(server_host, server_port), RequestHandler, hass, api_password,
development, ssl_certificate, ssl_key)
except OSError:
# If address already in use
_LOGGER.exception("Error setting up HTTP server")
return False
hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True).start())
hass.http = server
hass.config.api = rem.API(util.get_local_ip(), api_password, server_port,
ssl_certificate is not None)
return True
# pylint: disable=too-many-instance-attributes
class HomeAssistantHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle HTTP requests in a threaded fashion. """
# pylint: disable=too-few-public-methods
allow_reuse_address = True
daemon_threads = True
# pylint: disable=too-many-arguments
def __init__(self, server_address, request_handler_class,
hass, api_password, development, ssl_certificate, ssl_key):
super().__init__(server_address, request_handler_class)
self.server_address = server_address
self.hass = hass
self.api_password = api_password
self.development = development
self.paths = []
self.sessions = SessionStore()
self.use_ssl = ssl_certificate is not None
# We will lazy init this one if needed
self.event_forwarder = None
if development:
_LOGGER.info("running http in development mode")
if ssl_certificate is not None:
wrap_kwargs = {'certfile': ssl_certificate}
if ssl_key is not None:
wrap_kwargs['keyfile'] = ssl_key
self.socket = ssl.wrap_socket(self.socket, **wrap_kwargs)
def start(self):
""" Starts the HTTP server. """
def stop_http(event):
""" Stops the HTTP server. """
self.shutdown()
self.hass.bus.listen_once(ha.EVENT_HOMEASSISTANT_STOP, stop_http)
protocol = 'https' if self.use_ssl else 'http'
_LOGGER.info(
"Starting web interface at %s://%s:%d",
protocol, self.server_address[0], self.server_address[1])
# 31-1-2015: Refactored frontend/api components out of this component
# To prevent stuff from breaking, load the two extracted components
bootstrap.setup_component(self.hass, 'api')
bootstrap.setup_component(self.hass, 'frontend')
self.serve_forever()
def register_path(self, method, url, callback, require_auth=True):
""" Registers a path with the server. """
self.paths.append((method, url, callback, require_auth))
def log_message(self, fmt, *args):
""" Redirect built-in log to HA logging """
# pylint: disable=no-self-use
_LOGGER.info(fmt, *args)
# pylint: disable=too-many-public-methods,too-many-locals
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handles incoming HTTP requests
We extend from SimpleHTTPRequestHandler instead of Base so we
can use the guess content type methods.
"""
server_version = "HomeAssistant/1.0"
def __init__(self, req, client_addr, server):
""" Contructor, call the base constructor and set up session """
# Track if this was an authenticated request
self.authenticated = False
SimpleHTTPRequestHandler.__init__(self, req, client_addr, server)
def log_message(self, fmt, *arguments):
""" Redirect built-in log to HA logging """
if self.server.api_password is None:
_LOGGER.info(fmt, *arguments)
else:
_LOGGER.info(
fmt, *(arg.replace(self.server.api_password, '*******')
if isinstance(arg, str) else arg for arg in arguments))
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
# Read query input. parse_qs gives a list for each value, we want last
data = {key: data[-1] for key, data in parse_qs(url.query).items()}
# Did we get post input ?
content_length = int(self.headers.get(HTTP_HEADER_CONTENT_LENGTH, 0))
if content_length:
body_content = self.rfile.read(content_length).decode("UTF-8")
try:
data.update(json.loads(body_content))
except (TypeError, ValueError):
# TypeError if JSON object is not a dict
# ValueError if we could not parse JSON
_LOGGER.exception(
"Exception parsing JSON: %s", body_content)
self.write_json_message(
"Error parsing JSON", HTTP_UNPROCESSABLE_ENTITY)
return
self.authenticated = (self.server.api_password is None or
self.headers.get(HTTP_HEADER_HA_AUTH) ==
self.server.api_password or
data.get(DATA_API_PASSWORD) ==
self.server.api_password or
self.verify_session())
if '_METHOD' in data:
method = data.pop('_METHOD')
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
require_auth = True
# Check every handler to find matching result
for t_method, t_path, t_handler, t_auth in self.server.paths:
# we either do string-comparison or regular expression matching
# pylint: disable=maybe-no-member
if isinstance(t_path, str):
path_match = url.path == t_path
else:
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = t_handler
require_auth = t_auth
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# For some calls we need a valid password
if require_auth and not self.authenticated:
self.write_json_message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
return
handle_request_method(self, path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
self.end_headers()
else:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
def do_HEAD(self): # pylint: disable=invalid-name
""" HEAD request handler. """
self._handle_request('HEAD')
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def do_PUT(self): # pylint: disable=invalid-name
""" PUT request handler. """
self._handle_request('PUT')
def do_DELETE(self): # pylint: disable=invalid-name
""" DELETE request handler. """
self._handle_request('DELETE')
def write_json_message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
self.write_json({'message': message}, status_code=status_code)
def write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
if location:
self.send_header('Location', location)
self.set_session_cookie_header()
self.end_headers()
if data is not None:
self.wfile.write(
json.dumps(data, indent=4, sort_keys=True,
cls=rem.JSONEncoder).encode("UTF-8"))
def write_text(self, message, status_code=HTTP_OK):
""" Helper method to return a text message to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_TEXT_PLAIN)
self.set_session_cookie_header()
self.end_headers()
self.wfile.write(message.encode("UTF-8"))
def write_file(self, path, cache_headers=True):
""" Returns a file to the user. """
try:
with open(path, 'rb') as inp:
self.write_file_pointer(self.guess_type(path), inp,
cache_headers)
except IOError:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
_LOGGER.exception("Unable to serve %s", path)
def write_file_pointer(self, content_type, inp, cache_headers=True):
"""
Helper function to write a file pointer to the user.
Does not do error handling.
"""
do_gzip = 'gzip' in self.headers.get(HTTP_HEADER_ACCEPT_ENCODING, '')
self.send_response(HTTP_OK)
self.send_header(HTTP_HEADER_CONTENT_TYPE, content_type)
if cache_headers:
self.set_cache_header()
self.set_session_cookie_header()
if do_gzip:
gzip_data = gzip.compress(inp.read())
self.send_header(HTTP_HEADER_CONTENT_ENCODING, "gzip")
self.send_header(HTTP_HEADER_VARY, HTTP_HEADER_ACCEPT_ENCODING)
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(len(gzip_data)))
else:
fst = os.fstat(inp.fileno())
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(fst[6]))
self.end_headers()
if self.command == 'HEAD':
return
elif do_gzip:
self.wfile.write(gzip_data)
else:
self.copyfile(inp, self.wfile)
def set_cache_header(self):
""" Add cache headers if not in development """
if self.server.development:
return
# 1 year in seconds
cache_time = 365 * 86400
self.send_header(
HTTP_HEADER_CACHE_CONTROL,
"public, max-age={}".format(cache_time))
self.send_header(
HTTP_HEADER_EXPIRES,
self.date_time_string(time.time()+cache_time))
def set_session_cookie_header(self):
""" Add the header for the session cookie and return session id. """
if not self.authenticated:
return None
session_id = self.get_cookie_session_id()
if session_id is not None:
self.server.sessions.extend_validation(session_id)
return session_id
self.send_header(
'Set-Cookie',
'{}={}'.format(SESSION_KEY, self.server.sessions.create())
)
return session_id
def verify_session(self):
""" Verify that we are in a valid session. """
return self.get_cookie_session_id() is not None
def get_cookie_session_id(self):
"""
Extracts the current session id from the
cookie or returns None if not set or invalid
"""
if 'Cookie' not in self.headers:
return None
cookie = cookies.SimpleCookie()
try:
cookie.load(self.headers["Cookie"])
except cookies.CookieError:
return None
morsel = cookie.get(SESSION_KEY)
if morsel is None:
return None
session_id = cookie[SESSION_KEY].value
if self.server.sessions.is_valid(session_id):
return session_id
return None
def destroy_session(self):
""" Destroys session. """
session_id = self.get_cookie_session_id()
if session_id is None:
return
self.send_header('Set-Cookie', '')
self.server.sessions.destroy(session_id)
def session_valid_time():
""" Time till when a session will be valid. """
return date_util.utcnow() + timedelta(seconds=SESSION_TIMEOUT_SECONDS)
class SessionStore(object):
""" Responsible for storing and retrieving http sessions """
def __init__(self):
""" Set up the session store """
self._sessions = {}
self._lock = threading.RLock()
@util.Throttle(SESSION_CLEAR_INTERVAL)
def _remove_expired(self):
""" Remove any expired sessions. """
now = date_util.utcnow()
for key in [key for key, valid_time in self._sessions.items()
if valid_time < now]:
self._sessions.pop(key)
def is_valid(self, key):
""" Return True if a valid session is given. """
with self._lock:
self._remove_expired()
return (key in self._sessions and
self._sessions[key] > date_util.utcnow())
def extend_validation(self, key):
""" Extend a session validation time. """
with self._lock:
if key not in self._sessions:
return
self._sessions[key] = session_valid_time()
def destroy(self, key):
""" Destroy a session by key. """
with self._lock:
self._sessions.pop(key, None)
def create(self):
""" Creates a new session. """
with self._lock:
session_id = util.get_random_string(20)
while session_id in self._sessions:
session_id = util.get_random_string(20)
self._sessions[session_id] = session_valid_time()
return session_id
| |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import json
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
GroupAction,
GroupEntry,
GroupBucket,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import (OFPGT_SELECT, ETH_TYPE_IPv4)
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def delete_groups(ofswitch, group_ids):
for group_id in group_ids:
result = ofswitch.delete_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group '%s' successfully removed from the Controller" %
group_id)
else:
print ("!!!Group '%s' removal error, reason: %s" %
(group_id, status.brief()))
def print_groups(lcfg, loper):
q = 10 # number of list items to be in a single chunk (output string)
print "\n".strip()
s = 'Configured Groups IDs'
if lcfg:
chunks = [lcfg[x:x + q] for x in xrange(0, len(lcfg), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
s = 'Operational Groups IDs'
if loper:
chunks = [loper[x:x + q] for x in xrange(0, len(loper), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
def of_demo_34():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 34 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print "\n".strip()
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
grp_ids_cfg = []
grp_ids_oper = []
print "\n".strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
# Create new group
group_id = 13
group_type = OFPGT_SELECT
group_name = "Example of 'load balancing' group"
weight1 = 60
weight2 = 30
weight3 = 10
out_port1 = 110
out_port2 = 111
out_port3 = 112
print "\n".strip()
print ("<<< Create Group")
print "\n".strip()
print (" Group Type : %s\n"
" Group ID : %s\n"
" Group Name : \"%s\"" %
(group_type.strip('group-').upper(),
group_id, group_name))
print (" Buckets :")
print (" [0] weight : %s" %
weight1)
print (" actions: Output (%s)" %
out_port1)
print (" [1] weight : %s" %
weight2)
print (" actions: Output (%s)" %
out_port2)
print (" [2] weight : %s" %
weight3)
print (" actions: Output (%s)" %
out_port3)
time.sleep(rundelay)
# Allocate a placeholder for the group entry
group_entry = GroupEntry(group_id, group_type)
group_entry.set_group_name(group_name)
# Fill in group entry with action buckets
# ---------
bucket_id = 0
bucket1 = GroupBucket(bucket_id)
bucket1.set_weight(weight1)
action = OutputAction(order=0, port=out_port1)
bucket1.add_action(action)
group_entry.add_bucket(bucket1)
# ---------
bucket_id += 1
bucket2 = GroupBucket(bucket_id)
bucket2.set_weight(weight2)
action = OutputAction(order=0, port=out_port2)
bucket2.add_action(action)
group_entry.add_bucket(bucket2)
# ---------
bucket_id += 1
bucket3 = GroupBucket(bucket_id)
bucket3.set_weight(weight3)
action = OutputAction(order=0, port=out_port3)
bucket3.add_action(action)
group_entry.add_bucket(bucket3)
# Request Controller to create the group
print "\n".strip()
print ("<<< Group to create:")
print group_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_group(group_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group successfully added")
grp_ids_oper = result.get_data()
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' configuration status") % group_id
time.sleep(rundelay)
result = ofswitch.get_configured_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group configuration info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' operational status") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_description(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group operational info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get group '%s' statistics information") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_statistics(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group statistics info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Group action example"
priority = 1000
cookie = 1400
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
print "\n".strip()
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)" %
(match_in_port, hex(match_eth_type)))
print (" Actions: Apply Group (%s)\n" % group_id)
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = GroupAction(action_order)
action.set_group_id(group_id)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
flow_entry1.add_match(match)
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_groups(ofswitch, grp_ids_cfg)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
print ("<<< Remove all flows from the Controller")
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print "\n".strip()
print ("<<< Remove all groups from the Controller")
time.sleep(rundelay)
delete_groups(ofswitch, grp_ids_cfg)
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
print ("\n").strip()
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_34()
| |
import os
import random
import re
import shutil
import signal
import socket
import subprocess
import tempfile
import time
from djblets.util.filesystem import is_exe_in_path
try:
from P4 import P4Exception
except ImportError:
pass
from reviewboard.diffviewer.parser import DiffParser
from reviewboard.scmtools.core import SCMTool, ChangeSet, \
HEAD, PRE_CREATION
from reviewboard.scmtools.errors import SCMError, EmptyChangeSetError, \
AuthenticationError, \
RepositoryNotFoundError
STUNNEL_SERVER, STUNNEL_CLIENT = (0, 1)
class STunnelProxy(object):
def __init__(self, mode, target):
if not is_exe_in_path('stunnel'):
raise OSError('stunnel was not found in the exec path')
if mode not in (STUNNEL_SERVER, STUNNEL_CLIENT):
raise AttributeError
self.mode = mode
self.target = target
self.pid = None
def start_server(self, certfile):
self._start(['-p', certfile])
def start_client(self):
self._start(['-c'])
def _start(self, additional_args):
self.port = self._find_port()
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, 'stunnel.pid')
args = ['stunnel', '-P', filename,
'-d', '127.0.0.1:%d' % self.port,
'-r', self.target] + additional_args
subprocess.check_call(args)
# It can sometimes be racy to immediately open the file. We therefore
# have to wait a fraction of a second =/
time.sleep(0.1)
f = open(filename)
self.pid = int(f.read())
f.close()
shutil.rmtree(tempdir)
def shutdown(self):
if self.pid:
os.kill(self.pid, signal.SIGTERM)
self.pid = None
def _find_port(self):
"""Find an available port."""
# This is slightly racy but shouldn't be too bad.
while True:
port = random.randint(30000, 60000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('127.0.0.1', port))
s.listen(1)
s.shutdown(socket.SHUT_RDWR)
return port
except:
pass
class PerforceClient(object):
def __init__(self, p4port, username, password, encoding, use_stunnel=False):
self.p4port = p4port
self.username = username
self.password = password
self.encoding = encoding
self.use_stunnel = use_stunnel
self.proxy = None
import P4
self.p4 = P4.P4()
if use_stunnel and not is_exe_in_path('stunnel'):
raise AttributeError('stunnel proxy was requested, but stunnel '
'binary is not in the exec path.')
def _connect(self):
"""
Connect to the perforce server.
This connects p4python to the remote server, optionally using a stunnel
proxy.
"""
self.p4.user = self.username
self.p4.password = self.password
if self.encoding:
self.p4.charset = self.encoding
self.p4.exception_level = 1
if self.use_stunnel:
# Spin up an stunnel client and then redirect through that
self.proxy = STunnelProxy(STUNNEL_CLIENT, self.p4port)
self.proxy.start_client()
self.p4.port = '127.0.0.1:%d' % self.proxy.port
else:
self.p4.port = self.p4port
self.p4.connect()
def _disconnect(self):
"""
Disconnect from the perforce server, and also shut down the stunnel
proxy (if it exists).
"""
try:
if self.p4.connected():
self.p4.disconnect()
except AttributeError:
pass
if self.proxy:
try:
self.proxy.shutdown()
except:
pass
self.proxy = None
@staticmethod
def _convert_p4exception_to_scmexception(e):
error = str(e)
if 'Perforce password' in error or 'Password must be set' in error:
raise AuthenticationError(msg=error)
elif 'check $P4PORT' in error:
raise RepositoryNotFoundError
else:
raise SCMError(error)
def _run_worker(self, worker):
result = None
# TODO: Move to using with: when we require a minimum of Python 2.5.
# We should make it auto-disconnect.
try:
self._connect()
result = worker()
self._disconnect()
except P4Exception, e:
self._disconnect()
self._convert_p4exception_to_scmexception(e)
except:
self._disconnect();
raise
return result
def _get_changeset(self, changesetid):
return self.p4.run_describe('-s', str(changesetid))
def get_changeset(self, changesetid):
"""
Get the contents of a changeset description.
"""
return self._run_worker(lambda: self._get_changeset(changesetid))
def _get_pending_changesets(self, userid):
changesets = self.p4.run_changes('-s', 'pending', '-u', userid)
return map(self._get_changeset, [x.split()[1] for x in changesets])
def get_pending_changesets(self, userid):
"""
Get a list of changeset descriptions for all pending changesets for a
given user.
"""
return self._run_worker(lambda: self._get_pending_changesets(userid))
def _get_file(self, path, revision):
if revision == PRE_CREATION:
return ''
elif revision == HEAD:
depot_path = path
else:
depot_path = '%s#%s' % (path, revision)
args = ['p4', '-p', self.p4.port]
if self.p4.user:
args.extend(['-u', self.p4.user])
if self.p4.password:
args.extend(['-P', self.p4.password])
if self.p4.charset:
args.extend(['-C', self.p4.charset])
args.extend(['print', '-q', depot_path])
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result, errdata = p.communicate()
failure = p.poll()
if failure:
error = errdata.splitlines()
# The command-line output is the same as the contents of a
# P4Exception except they're prefixed with a line that says
# "Perforce client error:", and the lines of the error are indented
# with tabs.
if error[0].startswith('Perforce client error:'):
error = error[1:]
text = '\n'.join(line.lstrip('\t') for line in error)
self._convert_p4exception_to_scmexception(Exception(text))
else:
return result
def get_file(self, path, revision):
"""
Get the contents of a file, at a specific revision.
"""
return self._run_worker(lambda: self._get_file(path, revision))
def _get_files_at_revision(self, revision_str):
return self.p4.run_files(revision_str)
def get_files_at_revision(self, revision_str):
"""
Get a list of files at a specific revision. This is a simple interface
to 'p4 files'
"""
return self._run_worker(
lambda: self._get_files_at_revision(revision_str))
class PerforceTool(SCMTool):
name = "Perforce"
uses_atomic_revisions = True
supports_authentication = True
dependencies = {
'modules': ['P4'],
}
def __init__(self, repository):
SCMTool.__init__(self, repository)
self.client = self._create_client(
str(repository.mirror_path or repository.path),
str(repository.username),
str(repository.password),
str(repository.encoding))
@staticmethod
def _create_client(path, username, password, encoding=''):
if path.startswith('stunnel:'):
path = path[8:]
use_stunnel = True
else:
use_stunnel = False
return PerforceClient(path, username, password, encoding, use_stunnel)
@staticmethod
def _convert_p4exception_to_scmexception(e):
error = str(e)
if 'Perforce password' in error or 'Password must be set' in error:
raise AuthenticationError(msg=error)
elif 'check $P4PORT' in error:
raise RepositoryNotFoundError
else:
raise SCMError(error)
@classmethod
def check_repository(cls, path, username=None, password=None,
local_site_name=None):
"""
Performs checks on a repository to test its validity.
This should check if a repository exists and can be connected to.
The result is returned as an exception. The exception may contain extra
information, such as a human-readable description of the problem. If the
repository is valid and can be connected to, no exception will be
thrown.
"""
super(PerforceTool, cls).check_repository(path, username, password,
local_site_name)
client = cls._create_client(str(path), str(username), str(password))
client.get_changeset(1)
def get_pending_changesets(self, userid):
return self.client.get_pending_changesets(userid)
def get_changeset(self, changesetid, allow_empty=False):
changeset = self.client.get_changeset(changesetid)
if changeset:
return self.parse_change_desc(changeset[0], changesetid, allow_empty)
else:
return None
def get_diffs_use_absolute_paths(self):
return True
def get_file(self, path, revision=HEAD):
return self.client.get_file(path, revision)
def parse_diff_revision(self, file_str, revision_str, *args, **kwargs):
# Perforce has this lovely idiosyncracy that diffs show revision #1 both
# for pre-creation and when there's an actual revision.
filename, revision = revision_str.rsplit('#', 1)
if len(self.client.get_files_at_revision(revision_str)) == 0:
revision = PRE_CREATION
return filename, revision
def get_filenames_in_revision(self, revision):
return self.get_changeset(revision).files
@staticmethod
def parse_change_desc(changedesc, changenum, allow_empty=False):
if not changedesc:
return None
changeset = ChangeSet()
changeset.changenum = changenum
# At it's most basic, a perforce changeset description has three
# sections.
#
# ---------------------------------------------------------
# Change <num> by <user>@<client> on <timestamp> *pending*
#
# description...
# this can be any number of lines
#
# Affected files ...
#
# //depot/branch/etc/file.cc#<revision> branch
# //depot/branch/etc/file.hh#<revision> delete
# ---------------------------------------------------------
#
# At the moment, we only care about the description and the list of
# files. We take the first line of the description as the summary.
#
# We parse the username out of the first line to check that one user
# isn't attempting to "claim" another's changelist. We then split
# everything around the 'Affected files ...' line, and process the
# results.
changeset.username = changedesc['user']
changeset.description = changedesc['desc']
if changedesc['status'] == "pending":
changeset.pending = True
try:
changeset.files = changedesc['depotFile']
except KeyError:
if not allow_empty:
raise EmptyChangeSetError(changenum)
split = changeset.description.find('\n\n')
if split >= 0 and split < 100:
changeset.summary = \
changeset.description.split('\n\n', 1)[0].replace('\n', ' ')
else:
changeset.summary = changeset.description.split('\n', 1)[0]
return changeset
def get_fields(self):
return ['changenum', 'diff_path']
def get_parser(self, data):
return PerforceDiffParser(data)
class PerforceDiffParser(DiffParser):
SPECIAL_REGEX = re.compile("^==== ([^#]+)#(\d+) ==([AMD])== (.*) ====$")
def __init__(self, data):
DiffParser.__init__(self, data)
def parse_diff_header(self, linenum, info):
m = self.SPECIAL_REGEX.match(self.lines[linenum])
if m:
info['origFile'] = m.group(1)
info['origInfo'] = "%s#%s" % (m.group(1), m.group(2))
info['newFile'] = m.group(4)
info['newInfo'] = ""
linenum += 1
if linenum < len(self.lines) and \
(self.lines[linenum].startswith("Binary files ") or
self.lines[linenum].startswith("Files ")):
info['binary'] = True
linenum += 1
if m.group(3) == 'D':
info['deleted'] = True
# In this case, this *is* our diff header. We don't want to
# let the next line's real diff header be a part of this one,
# so return early and don't invoke the next.
return linenum
return super(PerforceDiffParser, self).parse_diff_header(linenum, info)
| |
"""
**REP** wrappers are derived from :class:`Classifier` and :class:`Regressor`
depending on the problem of interest.
Below you can see the standard methods available in the wrappers.
"""
from __future__ import division, print_function, absolute_import
from abc import ABCMeta, abstractmethod
import numpy
import pandas
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import _get_features
__author__ = 'Tatiana Likhomanenko, Alex Rogozhnikov'
_docs = \
"""
Interface to train different **{}** models from different machine learning libraries, like **sklearn, TMVA, XGBoost**, ...
:param features: features used to train a model
:type features: list[str] or None
.. note::
* if `features` aren't set (**None**), then all features in the training dataset will be used
* Datasets should be `pandas.DataFrame`, not `numpy.array`.
Provided this, you'll be able to choose features used in training by setting e.g.
`features=['mass', 'momentum']` in the constructor.
* It works fine with `numpy.array` as well, but in this case all the features will be used.
"""
class Classifier(BaseEstimator, ClassifierMixin):
__doc__ = _docs.format('classification') + \
"""
* Classes values must be from 0 to n_classes-1!
"""
__metaclass__ = ABCMeta
def __init__(self, features=None):
self.features = list(features) if features is not None else features
def _get_features(self, X, allow_nans=False):
"""
Return data with the necessary features.
:param pandas.DataFrame X: training data
:return: pandas.DataFrame with necessary features
"""
X_prepared, self.features = _get_features(self.features, X, allow_nans=allow_nans)
return X_prepared
def _set_classes(self, y):
self.classes_, indices = numpy.unique(y, return_index=True)
self.n_classes_ = len(self.classes_)
assert self.n_classes_ >= 2, "Number of labels must be >= 2 (data contain {})".format(self.n_classes_)
assert numpy.all(self.classes_ == numpy.arange(self.n_classes_)), \
'Labels must be from 0..n_classes-1, instead of {}'.format(self.classes_)
return indices
@abstractmethod
def fit(self, X, y, sample_weight=None):
"""
Train a classification model on the data.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:param y: labels of samples, array-like of shape [n_samples]
:param sample_weight: weight of samples,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
pass
def predict(self, X):
"""
Predict labels for all samples in the dataset.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples] with integer labels
"""
proba = self.predict_proba(X)
return self.classes_.take(numpy.argmax(proba, axis=1), axis=0)
@abstractmethod
def predict_proba(self, X):
"""
Predict probabilities for each class label for samples.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples, n_classes] with probabilities
"""
pass
@abstractmethod
def staged_predict_proba(self, X):
"""
Predict probabilities for data for each class label on each stage (i.e. for boosting algorithms).
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:rtype: iterator
"""
pass
def get_feature_importances(self):
"""
Return features importance.
:rtype: pandas.DataFrame with `index=self.features`
"""
try:
return pandas.DataFrame({"effect": self.feature_importances_}, index=self.features)
except AttributeError:
raise AttributeError("Haven't feature_importances_ property")
def fit_lds(self, lds):
"""
Train a classifier on the specific type of dataset.
:param LabeledDataStorage lds: data
:return: self
"""
X, y, sample_weight = lds.get_data(self.features), lds.get_targets(), lds.get_weights(allow_nones=True)
return self.fit(X, y, sample_weight=sample_weight)
def test_on_lds(self, lds):
"""
Prepare a classification report for a single classifier.
:param LabeledDataStorage lds: data
:return: ClassificationReport
"""
from ..report import ClassificationReport
return ClassificationReport(classifiers={'clf': self}, lds=lds)
def test_on(self, X, y, sample_weight=None):
"""
Prepare classification report for a single classifier.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:param y: labels of samples --- array-like of shape [n_samples]
:param sample_weight: weight of samples,
array-like of shape [n_samples] or None if all weights are equal
:return: ClassificationReport
"""
from ..data import LabeledDataStorage
lds = LabeledDataStorage(data=X, target=y, sample_weight=sample_weight)
return self.test_on_lds(lds=lds)
class Regressor(BaseEstimator, RegressorMixin):
__doc__ = _docs.format('regression')
__metaclass__ = ABCMeta
def __init__(self, features=None):
self.features = list(features) if features is not None else features
def _get_features(self, X, allow_nans=False):
"""
Return data with the necessary features.
:param pandas.DataFrame X: training data
:return: pandas.DataFrame with necessary features
"""
X_prepared, self.features = _get_features(self.features, X, allow_nans=allow_nans)
return X_prepared
@abstractmethod
def fit(self, X, y, sample_weight=None):
"""
Train a regression model on the data.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:param y: values for samples, array-like of shape [n_samples]
:param sample_weight: weight of samples,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
pass
@abstractmethod
def predict(self, X):
"""
Predict values for data.
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples] with predicted values
"""
pass
@abstractmethod
def staged_predict(self, X):
"""
Predicts values for data on each stage (i.e. for boosting algorithms).
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:rtype: iterator
"""
pass
def fit_lds(self, lds):
"""
Train a regression model on the specific type of dataset.
:param LabeledDataStorage lds: data
:return: self
"""
X, y, sample_weight = lds.get_data(self.features), lds.get_targets(), lds.get_weights()
if sample_weight is None:
return self.fit(X, y)
else:
return self.fit(X, y, sample_weight=sample_weight)
def get_feature_importances(self):
"""
Get features importances.
:rtype: pandas.DataFrame with `index=self.features`
"""
try:
return pandas.DataFrame({"effect": self.feature_importances_}, index=self.features)
except AttributeError:
raise AttributeError("Classifier doesn't provide feature_importances_ property")
def test_on_lds(self, lds):
"""
Prepare a regression report for a single regressor.
:param LabeledDataStorage lds: data
:return: RegressionReport
"""
from ..report import RegressionReport
return RegressionReport(regressors={'clf': self}, lds=lds)
def test_on(self, X, y, sample_weight=None):
"""
Prepare a regression report for a single regressor
:param pandas.DataFrame X: data of shape [n_samples, n_features]
:param y: values of samples --- array-like of shape [n_samples]
:param sample_weight: weight of samples,
array-like of shape [n_samples] or None if all weights are equal
:return: RegressionReport
"""
from ..data import LabeledDataStorage
lds = LabeledDataStorage(data=X, target=y, sample_weight=sample_weight)
return self.test_on_lds(lds=lds)
| |
#################################################################
# seTree.py
# Originally from Tree.py
# Altered by Yi-Hong Lin, yihhongl@andrew.cmu.edu, 2004
#
# This class actually decides the behavior of the sceneGraphExplorer
# You might feel it realy looks like the original one, but we actually did a lots of change in it.
# such as, when selection happend in other place, such as picking directly inside the scene. or,
# when user removed something by hot key.
# The rename process has also been changed. It won't be rename in here anymore.
# Instead, here we will send out a message to sceneEditor to reaname the target.
#
#################################################################
import os, sys, string, Pmw, Tkinter
from direct.showbase.DirectObject import DirectObject
from Tkinter import IntVar, Menu, PhotoImage, Label, Frame, Entry
from pandac.PandaModules import *
# Initialize icon directory
ICONDIR = getModelPath().findFile(Filename('icons')).toOsSpecific()
if not os.path.isdir(ICONDIR):
raise RuntimeError, "can't find DIRECT icon directory (%s)" % `ICONDIR`
class TreeNode:
def __init__(self, canvas, parent, item, menuList = []):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = 0
self.children = {}
self.kidKeys = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
self.menuList = menuList
self.menuVar = IntVar()
self.menuVar.set(0)
self._popupMenu = None
self.image_id = None
if self.menuList:
if self.menuList[-1] == 'Separator':
self.menuList = self.menuList[:-1]
self._popupMenu = Menu(self.canvas, tearoff = 0)
for i in range(len(self.menuList)):
item = self.menuList[i]
if item == 'Separator':
self._popupMenu.add_separator()
else:
self._popupMenu.add_radiobutton(
label = item,
variable = self.menuVar,
value = i,
indicatoron = 0,
command = self.popupMenuCommand)
def destroy(self):
for key in self.kidKeys:
c = self.children[key]
del self.children[key]
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = 1
if self.parent != None:
if self.parent.state == 'expanded':
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
self.item.OnSelect(event)
def deselect(self, event=None):
if not self.selected:
return
self.selected = 0
if self.parent != None:
if self.parent.state == 'expanded':
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for key in self.kidKeys:
child = self.children[key]
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def popupMenu(self, event=None):
if self._popupMenu:
self._popupMenu.post(event.widget.winfo_pointerx(),
event.widget.winfo_pointery())
return "break"
def popupMenuCommand(self):
command = self.menuList[self.menuVar.get()]
self.item.MenuCommand(command)
if self.parent and (command != 'Update Explorer'):
# Update parent to try to keep explorer up to date
self.parent.update()
def expand(self, event=None):
if not self.item.IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def reveal(self):
# Make sure all parent nodes are marked as expanded
parent = self.parent
while parent:
if parent.state == 'collapsed':
parent.state = 'expanded'
parent = parent.parent
else:
break
# Redraw tree accordingly
self.update()
# Bring this item into view
self.view()
def lastvisiblechild(self):
if self.kidKeys and self.state == 'expanded':
return self.children[self.kidKeys[-1]].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(Tkinter.ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(Tkinter.ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y+17
# draw children
sublist = self.item._GetSubList()
if not sublist:
# IsExpandable() was mistaken; that's allowed
return y+17
self.kidKeys = []
for item in sublist:
key = item.GetKey()
if self.children.has_key(key):
child = self.children[key]
else:
child = TreeNode(self.canvas, self, item, self.menuList)
self.children[key] = child
self.kidKeys.append(key)
# Remove unused children
for key in self.children.keys():
if key not in self.kidKeys:
del(self.children[key])
cx = x+20
cy = y+17
cylast = 0
for key in self.kidKeys:
child = self.children[key]
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item.IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
self.canvas.tag_bind(id, "<3>", self.popupMenu)
def drawtext(self, text=None):
textx = self.x+20-1
texty = self.y-1
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
if text==None:
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
label = self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
if self.selected:
self.label.configure(fg="white", bg="darkblue")
else:
fg = self.item.GetTextFg()
self.label.configure(fg=fg, bg="white")
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.label.bind("<3>", self.popupMenu)
# Update text if necessary
if text != self.label['text']:
self.label['text'] = text
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
text = self.item.GetTextForEdit()
self.label['text'] = text
self.drawtext(text)
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, Tkinter.END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
self.drawtext()
self.canvas.focus_set()
def find(self, searchKey):
# Search for a node who's key matches the given key
# Is it this node
if searchKey == self.item.GetKey():
return self
# Nope, check the children
sublist = self.item._GetSubList()
for item in sublist:
key = item.GetKey()
# Use existing child or create new TreeNode if none exists
if self.children.has_key(key):
child = self.children[key]
else:
child = TreeNode(self.canvas, self, item, self.menuList)
# Update local list of children and keys
self.children[key] = child
self.kidKeys.append(key)
# See if node is child (or one of child's descendants)
retVal = child.find(searchKey)
if retVal:
return retVal
# Not here
return None
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetTextFg(self):
return "black"
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
def OnSelect(self):
"""Called when item selected."""
def GetTextForEdit(self):
"""Called before editting the item."""
| |
#!/usr/bin/env python
"""Alerta WatchDog script to monitor Alerta service"""
__author__ = "Mark Bradley (mbrad@github)"
__version__ = "0.8.1"
import urllib, urllib2, sys, os, smtplib, time, socket
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Monitor options
alertaURL = 'http://alerta.example.com:8080/alerta/management/healthcheck'
httptimeout = 5
tko = 3
retrytime = 10
checkperiod = 600
# Email Options
fromaddr = 'Alerta Watchdog <alerta@example.com>'
toaddr = ['admins@example.com']
smtphost = 'localhost'
# PushOver.net Options
pushenable = False
pushurl = 'https://api.pushover.net/1/messages.json'
pushtoken = 'obtain-app-token-from-pushover'
pushuser = 'this-is-your-pushover-token'
class Event:
"Event class to construct an event object"
def __init__(self):
self.epoch = int(time.time())
tt = time.gmtime(self.epoch)
self.human = '%02d/%02d/%02d %02d:%02d:%02d' % \
(tt[2], tt[1], tt[0], tt[3], tt[4], tt[5])
try:
request = urllib2.Request(alertaURL, None, { 'User-Agent' : 'python' })
socket.setdefaulttimeout(httptimeout)
httpresponse = urllib2.urlopen(request, None, timeout = httptimeout)
self.code = httpresponse.getcode()
self.body = httpresponse.read()
httpresponse.close()
except urllib2.HTTPError, e:
self.code = e.code
self.body = e.read()
except urllib2.URLError, e:
self.code = 0
self.body = str(e.reason)
except socket.timeout, e:
self.code = 0
self.body = str(e.reason)
def _argvchk():
if len(sys.argv) == 1:
return True
elif ('-h' or '--help') in sys.argv[1]:
print "\nThis daemon polls Alerta's Management URI remotely and determines if there is a problem.\n\t\
Usage: %s (-f = foreground)\n" % ((sys.argv[0]).split('/')[-1])
sys.exit(0)
elif '-f' in sys.argv[1]:
print "Debug Mode: Staying in foreground...\n"
return False
else:
print >>sys.stderr, "Error: Unknown Option"
sys.exit(1)
def dprint(msg):
if isdaemon == False: print msg
def mailalert(last, cache, typestr):
if pushcount > 3:
dprint('Info: Not sending Email notification - too many messages for event')
return
elif pushcount == 3:
suppnote = 'Further notifications will be suppressed.'
else:
suppnote = ' '
msg = MIMEMultipart('alternative')
msg['Subject'] = "Alerta Service Notification"
msg['From'] = fromaddr
msg['To'] = ', '.join(toaddr)
if typestr == 'WARNING':
colour = 'ff8c00'
elif typestr == 'CRITICAL':
colour = 'ff0000'
elif typestr == 'OK':
colour = '00ff00'
text = """\
Alerta state change: %s - Response: %s (Code: %d)
TKO: %d
Soft Event logged at %s
Hard Event logged at %s
Service Probed: %s
%s
Auto-generated by Alerta-WatchDog
--
""" % (typestr, last.body, last.code, tko, cache.human, \
last.human, alertaURL, suppnote)
html = """\
<html>
<head></head>
<body>
<p>
Alerta state change: <span style="background-color:#%s;">%s</span> - Response: <b>%s</b> (Code: %d)<br />
<br />
<I>TKO</I>: %d<br />
<I>Soft Event triggered at</I> %s<br />
<I>Hard Event triggered at</I> %s<br />
<br />
<I>Service Probed:</I> %s</p>
<p><I><B>%s</I></B></p>
<p><I>Auto-generated by Alerta-WatchDog</I><p>
--
</body>
</html>
""" % (colour, typestr, last.body, last.code, tko, cache.human, \
last.human, alertaURL, suppnote)
textpart = MIMEText(text, 'plain')
htmlpart = MIMEText(html, 'html')
msg.attach(textpart)
msg.attach(htmlpart)
s=smtplib.SMTP(smtphost)
s.sendmail(fromaddr, toaddr, msg.as_string())
dprint('\033[92mMAIL SENT\033[0m\n' + text)
def pushover(last, typestr):
if pushcount > 3:
dprint('Info: Not sending PushOver notification - too many messages for event')
return
elif pushenable == False:
dprint('Info: PushOver notifications disabled.')
return
pushmessage = 'State: %s Response: %s (%s)' % (typestr, last.body, last.code)
pushdata = urllib.urlencode({"token": pushtoken, "user": pushuser, "message": pushmessage})
try:
pushrequest = urllib2.Request(pushurl, pushdata, { "Content-type": "application/x-www-form-urlencoded" })
socket.setdefaulttimeout(httptimeout)
pushresponse = urllib2.urlopen(pushrequest, timeout = httptimeout)
except:
dprint('Error: Could not send data to Pushover')
def main():
"""Looping main function"""
global pushcount
pushcount = 1
count = 1
state = 'OK'
sleeptime = checkperiod
while 1:
dprint('Probing Alerta URI: %s' % (alertaURL))
response = Event()
dprint('Probe Response: [ Code: %s | Body: %s | Epoch: %d ]' % \
(response.code, response.body, response.epoch))
dprint('\nCOUNT: %d' % (count))
if response.code != 200:
dprint('Event Triggered')
if count == 1:
cache = response
dprint('Caching first event: [ Code: %d | Body: %s | Epoch: %d ]' % \
(cache.code, cache.body, cache.epoch))
if count == tko:
#Warnings
if response.body ==\
'HEARTBEAT_STALE':
state = 'WARNING'
dprint('State: \033[93mWARNING\033[0m')
mailalert(response, cache, state)
pushover(response, state)
pushcount += 1
count = 0 # Reset for tko more tries.
sleeptime = checkperiod
dprint('\nRe-Probing in %d seconds...' % (sleeptime))
time.sleep(sleeptime)
continue
else:
#Criticals
state = 'CRITICAL'
dprint('State: \033[91mCRITICAL\033[0m')
mailalert(response, cache, state)
pushover(response, state)
pushcount += 1
count = 0 # Reset for tko more tries.
sleeptime = checkperiod
dprint('\nRe-Probing in %d seconds...' % (sleeptime))
time.sleep(sleeptime)
continue
count += 1
sleeptime = retrytime
elif response.code == 200:
dprint('State: \033[92mOK\033[0m')
if state != 'OK':
state = 'OK'
pushcount = 0
mailalert(response, cache, state)
pushover(response, state)
sleeptime = checkperiod
count = 1
dprint('\nRe-Probing in %d seconds...' % (sleeptime))
time.sleep(sleeptime)
if __name__ == '__main__':
isdaemon = _argvchk()
if isdaemon == True:
pid = os.fork()
if pid:
sys.exit(0)
else:
print 'Alerta WatchDog process started... (PID: %d)' % (os.getpid())
os.close(2)
os.close(0)
os.close(1)
main()
sys.exit(0)
else:
print 'PID: %d' % (os.getpid())
main()
# vim: set ts=4 sw=4 et :
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
t = orm.StateType.objects.get_or_create(slug="draft-iana-action", label="IANA Action state")[0]
orm.State.objects.get_or_create(type=t, slug='newdoc', name='New Document', desc="A new document has been received by IANA, but no actions have been taken", order=1)
orm.State.objects.get_or_create(type=t, slug='inprog', name='In Progress', desc="IANA is currently processing the actions for this document", order=2)
orm.State.objects.get_or_create(type=t, slug='waitauth', name='Waiting on Authors', desc="IANA is waiting on the document's authors to respond", order=3)
orm.State.objects.get_or_create(type=t, slug='waitad', name='Waiting on ADs', desc="IANA is waiting on the IETF Area Directors to respond", order=4)
orm.State.objects.get_or_create(type=t, slug='waitwgc', name='Waiting on WGC', desc="IANA is waiting on the IETF Working Group Chairs to respond", order=5)
orm.State.objects.get_or_create(type=t, slug='waitrfc', name='Waiting on RFC Editor', desc="IANA has notified the RFC Editor that the actions have been completed", order=6)
orm.State.objects.get_or_create(type=t, slug='rfcedack', name='RFC-Ed-Ack', desc="Request completed. The RFC Editor has acknowledged receipt of IANA's message that the actions have been completed", order=7)
orm.State.objects.get_or_create(type=t, slug='onhold', name='On Hold', desc="IANA has suspended work on the document", order=8)
orm.State.objects.get_or_create(type=t, slug='noic', name='No IC', desc="Request completed. There were no IANA actions for this document", order=9)
t = orm.StateType.objects.get_or_create(slug="draft-iana-review", label="IANA Review state")[0]
orm.State.objects.get_or_create(type=t, slug="need-rev", name='IANA Review Needed', desc="Document has not yet been reviewed by IANA.", order=1)
orm.State.objects.get_or_create(type=t, slug="ok-act", name='IANA OK - Actions Needed', desc="Document requires IANA actions, and the IANA Considerations section indicates the details of the actions correctly.", order=2)
orm.State.objects.get_or_create(type=t, slug="ok-noact", name='IANA OK - No Actions Needed', desc="Document requires no IANA action, and the IANA Considerations section indicates this correctly.", order=3)
orm.State.objects.get_or_create(type=t, slug="not-ok", name='IANA Not OK', desc="IANA has issues with the text of the IANA Considerations section of the document.", order=4)
orm.State.objects.get_or_create(type=t, slug="changed", name='Version Changed - Review Needed', desc="Document revision has changed after review by IANA.", order=5)
# fixup RFC Editor states/tags
orm.State.objects.filter(type="draft-rfceditor", slug="edit").update(desc="Awaiting editing or being edited")
orm.State.objects.filter(type="draft-rfceditor", slug="iesg").update(desc="Awaiting IESG action")
orm.State.objects.filter(type="draft-rfceditor", slug="isr-auth").update(desc="Independent submission awaiting author action, or in discussion between author and ISE")
orm.State.objects.filter(type="draft-rfceditor", slug="iana-crd").update(slug="iana", desc="Document has been edited, but is holding for completion of IANA actions")
orm.State.objects.get_or_create(type_id="draft-rfceditor", slug="auth48-done", defaults=dict(name="AUTH48-DONE", desc="Final approvals are complete"))
orm["name.DocTagName"].objects.get_or_create(slug="iana", name="IANA", desc="The document has IANA actions that are not yet completed.")
for d in orm.Document.objects.filter(type="draft", tags="iana-crd"):
d.tags.remove("iana-crd")
d.tags.add("iana")
orm["name.DocTagName"].objects.filter(slug="iana-crd").delete()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.ballotdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotDocEvent', '_ormbases': ['doc.DocEvent']},
'ballot_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.BallotType']"}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'})
},
'doc.ballotpositiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotPositionDocEvent', '_ormbases': ['doc.DocEvent']},
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['doc.BallotDocEvent']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'comment_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'discuss': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'discuss_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'pos': ('django.db.models.fields.related.ForeignKey', [], {'default': "'norecord'", 'to': "orm['name.BallotPositionName']"})
},
'doc.ballottype': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotType'},
'doc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'positions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.BallotPositionName']", 'symmetrical': 'False', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.consensusdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'ConsensusDocEvent', '_ormbases': ['doc.DocEvent']},
'consensus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'})
},
'doc.docalias': {
'Meta': {'object_name': 'DocAlias'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'doc.docevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'DocEvent'},
'by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'desc': ('django.db.models.fields.TextField', [], {}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'doc.dochistory': {
'Meta': {'object_name': 'DocHistory'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocHistoryAuthor']", 'blank': 'True'}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history_set'", 'to': "orm['doc.Document']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.DocAlias']", 'symmetrical': 'False', 'through': "orm['doc.RelatedDocHistory']", 'blank': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.dochistoryauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocHistoryAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {})
},
'doc.docreminder': {
'Meta': {'object_name': 'DocReminder'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'due': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocReminderTypeName']"})
},
'doc.document': {
'Meta': {'object_name': 'Document'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocumentAuthor']", 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'reversely_related_document_set'", 'blank': 'True', 'through': "orm['doc.RelatedDocument']", 'to': "orm['doc.DocAlias']"}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.documentauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocumentAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'doc.initialreviewdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'InitialReviewDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.lastcalldocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'LastCallDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.newrevisiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'NewRevisionDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'doc.relateddochistory': {
'Meta': {'object_name': 'RelatedDocHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reversely_related_document_history_set'", 'to': "orm['doc.DocAlias']"})
},
'doc.relateddocument': {
'Meta': {'object_name': 'RelatedDocument'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocAlias']"})
},
'doc.state': {
'Meta': {'ordering': "['type', 'order']", 'object_name': 'State'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'previous_states'", 'symmetrical': 'False', 'to': "orm['doc.State']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.statedocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'StateDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.State']", 'null': 'True', 'blank': 'True'}),
'state_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"})
},
'doc.statetype': {
'Meta': {'object_name': 'StateType'},
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
'doc.telechatdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'TelechatDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'returning_item': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'telechat_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'doc.writeupdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'WriteupDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'group.group': {
'Meta': {'object_name': 'Group'},
'acronym': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False', 'blank': 'True'})
},
'name.ballotpositionname': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotPositionName'},
'blocking': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docrelationshipname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocRelationshipName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docremindertypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocReminderTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctagname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTagName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupstatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.grouptypename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.intendedstdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'IntendedStdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.stdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'StdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.streamname': {
'Meta': {'ordering': "['order']", 'object_name': 'StreamName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'person.email': {
'Meta': {'object_name': 'Email'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['doc']
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Regression model tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import LinearRegression
from tensorflow_probability.python.sts import SparseLinearRegression
from tensorflow_probability.python.sts import Sum
tfl = tf.linalg
@test_util.test_all_tf_execution_regimes
class _LinearRegressionTest(test_util.TestCase):
def test_basic_statistics(self):
# Verify that this model constructs a distribution with mean
# `matmul(design_matrix, weights)` and stddev 0.
batch_shape = [4, 3]
num_timesteps = 10
num_features = 2
design_matrix = self._build_placeholder(
np.random.randn(*(batch_shape + [num_timesteps, num_features])))
linear_regression = LinearRegression(design_matrix=design_matrix)
true_weights = self._build_placeholder(
np.random.randn(*(batch_shape + [num_features])))
predicted_time_series = tf.linalg.matmul(
design_matrix, true_weights[..., tf.newaxis])
ssm = linear_regression.make_state_space_model(
num_timesteps=num_timesteps,
param_vals={"weights": true_weights})
self.assertAllEqual(self.evaluate(ssm.mean()), predicted_time_series)
self.assertAllEqual(*self.evaluate((ssm.stddev(),
tf.zeros_like(predicted_time_series))))
@test_util.disable_test_for_backend(
disable_numpy=True, disable_jax=True,
reason="Uses TF stateful optimizers")
def test_simple_regression_correctness(self):
# Verify that optimizing a simple linear regression by gradient descent
# recovers the known-correct weights.
batch_shape = [4, 3]
num_timesteps = 10
num_features = 2
design_matrix = self._build_placeholder(
np.random.randn(*(batch_shape + [num_timesteps, num_features])))
true_weights = self._build_placeholder([4., -3.])
predicted_time_series = tf.linalg.matmul(
design_matrix, true_weights[..., tf.newaxis])
linear_regression = LinearRegression(
design_matrix=design_matrix,
weights_prior=tfd.Independent(
tfd.Cauchy(loc=self._build_placeholder(np.zeros([num_features])),
scale=self._build_placeholder(np.ones([num_features]))),
reinterpreted_batch_ndims=1))
observation_noise_scale_prior = tfd.LogNormal(
loc=self._build_placeholder(-2), scale=self._build_placeholder(0.1))
model = Sum(components=[linear_regression],
observation_noise_scale_prior=observation_noise_scale_prior)
learnable_weights = tf.Variable(
tf.zeros([num_features], dtype=true_weights.dtype))
def build_loss():
learnable_ssm = model.make_state_space_model(
num_timesteps=num_timesteps,
param_vals={
"LinearRegression/_weights": learnable_weights,
"observation_noise_scale": observation_noise_scale_prior.mode()})
return -learnable_ssm.log_prob(predicted_time_series)
# We provide graph- and eager-mode optimization for TF 2.0 compatibility.
num_train_steps = 80
optimizer = tf1.train.AdamOptimizer(learning_rate=0.1)
if tf.executing_eagerly():
for _ in range(num_train_steps):
optimizer.minimize(build_loss)
else:
train_op = optimizer.minimize(build_loss())
self.evaluate(tf1.global_variables_initializer())
for _ in range(num_train_steps):
_ = self.evaluate(train_op)
self.assertAllClose(*self.evaluate((true_weights, learnable_weights)),
atol=0.2)
def test_custom_weights_prior(self):
batch_shape = [4, 3]
num_timesteps = 10
num_features = 2
design_matrix = self._build_placeholder(
np.random.randn(*(batch_shape + [num_timesteps, num_features])))
# Build a model with scalar Exponential(1.) prior.
linear_regression = LinearRegression(
design_matrix=design_matrix,
weights_prior=tfd.Exponential(
rate=self._build_placeholder(np.ones(batch_shape))))
# Check that the prior is broadcast to match the shape of the weights.
weights = linear_regression.parameters[0]
self.assertAllEqual([num_features],
self.evaluate(weights.prior.event_shape_tensor()))
self.assertAllEqual(batch_shape,
self.evaluate(weights.prior.batch_shape_tensor()))
seed = test_util.test_seed(sampler_type="stateless")
weights_seed, ssm_seed, shape_seed, sample_seed = tfp.random.split_seed(
seed, n=4)
prior_sampled_weights = weights.prior.sample(seed=weights_seed)
ssm = linear_regression.make_state_space_model(
num_timesteps=num_timesteps,
param_vals={"weights": prior_sampled_weights})
lp = ssm.log_prob(ssm.sample(seed=ssm_seed))
self.assertAllEqual(batch_shape,
self.evaluate(lp).shape)
# Verify that the bijector enforces the prior constraint that
# weights must be nonnegative.
self.assertAllFinite(
self.evaluate(
weights.prior.log_prob(
weights.bijector(
tf.random.stateless_normal(
tf.shape(weights.prior.sample(64, seed=shape_seed)),
seed=sample_seed,
dtype=self.dtype)))))
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class _SparseLinearRegressionTest(test_util.TestCase):
def test_builds_without_errors(self):
batch_shape = [4, 3]
num_timesteps = 10
num_features = 2
design_matrix = self._build_placeholder(
np.random.randn(*(batch_shape + [num_timesteps, num_features])))
seed = test_util.test_seed(sampler_type="stateless")
prior_seed, ssm_seed = tfp.random.split_seed(seed, n=2)
weights_batch_shape = []
if not self.use_static_shape:
weights_batch_shape = tf1.placeholder_with_default(
np.array(weights_batch_shape, dtype=np.int32), shape=None)
sparse_regression = SparseLinearRegression(
design_matrix=design_matrix,
weights_batch_shape=weights_batch_shape)
prior_params = [param.prior.sample(seed=prior_seed)
for param in sparse_regression.parameters]
ssm = sparse_regression.make_state_space_model(
num_timesteps=num_timesteps,
param_vals=prior_params)
if self.use_static_shape:
output_shape = tensorshape_util.as_list(ssm.sample(seed=ssm_seed).shape)
else:
output_shape = self.evaluate(tf.shape(ssm.sample(seed=ssm_seed)))
self.assertAllEqual(output_shape, batch_shape + [num_timesteps, 1])
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
class LinearRegressionTestStaticShape64(_LinearRegressionTest):
dtype = np.float64
use_static_shape = True
class LinearRegressionTestDynamicShape32(_LinearRegressionTest):
dtype = np.float32
use_static_shape = False
class SparseLinearRegressionTestStaticShape64(_SparseLinearRegressionTest):
dtype = np.float64
use_static_shape = True
class SparseLinearRegressionTestDynamicShape32(_SparseLinearRegressionTest):
dtype = np.float32
use_static_shape = False
del _LinearRegressionTest # Don't try to run base class tests.
del _SparseLinearRegressionTest # Don't try to run base class tests.
if __name__ == "__main__":
test_util.main()
| |
# Authors: Lars Buitinck
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from array import array
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import _Mapping as Mapping
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int, the DictVectorizer can be
followed by :class:`sklearn.preprocessing.OneHotEncoder` to complete
binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : string, optional
Separator string used when constructing new features for one-hot
coding.
sparse : boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort : boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[2., 0., 1.],
[0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OrdinalEncoder : handles nominal/categorical
features encoded as columns of arbitrary data types.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import os.path
import typing as T
from .. import coredata
from .. import mlog
from ..mesonlib import MesonException, MachineChoice, version_compare, OptionKey
from .compilers import (
gnu_winlibs,
msvc_winlibs,
Compiler,
CompileCheckMode,
)
from .c_function_attributes import CXX_FUNC_ATTRIBUTES, C_FUNC_ATTRIBUTES
from .mixins.clike import CLikeCompiler
from .mixins.ccrx import CcrxCompiler
from .mixins.c2000 import C2000Compiler
from .mixins.arm import ArmCompiler, ArmclangCompiler
from .mixins.visualstudio import MSVCCompiler, ClangClCompiler
from .mixins.gnu import GnuCompiler
from .mixins.intel import IntelGnuLikeCompiler, IntelVisualStudioLikeCompiler
from .mixins.clang import ClangCompiler
from .mixins.elbrus import ElbrusCompiler
from .mixins.pgi import PGICompiler
from .mixins.emscripten import EmscriptenMixin
if T.TYPE_CHECKING:
from ..coredata import KeyedOptionDictType
from ..dependencies import Dependency
from ..envconfig import MachineInfo
from ..environment import Environment
from ..linkers import DynamicLinker
from ..programs import ExternalProgram
from .mixins.clike import CLikeCompiler as CompilerMixinBase
else:
CompilerMixinBase = object
def non_msvc_eh_options(eh: str, args: T.List[str]) -> None:
if eh == 'none':
args.append('-fno-exceptions')
elif eh == 's' or eh == 'c':
mlog.warning('non-MSVC compilers do not support ' + eh + ' exception handling.' +
'You may want to set eh to \'default\'.')
class CPPCompiler(CLikeCompiler, Compiler):
@classmethod
def attribute_check_func(cls, name: str) -> str:
try:
return CXX_FUNC_ATTRIBUTES.get(name, C_FUNC_ATTRIBUTES[name])
except KeyError:
raise MesonException(f'Unknown function attribute "{name}"')
language = 'cpp'
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
# If a child ObjCPP class has already set it, don't set it ourselves
Compiler.__init__(self, exelist, version, for_machine, info,
is_cross=is_cross, linker=linker,
full_version=full_version)
CLikeCompiler.__init__(self, exe_wrapper)
@staticmethod
def get_display_language() -> str:
return 'C++'
def get_no_stdinc_args(self) -> T.List[str]:
return ['-nostdinc++']
def sanity_check(self, work_dir: str, environment: 'Environment') -> None:
code = 'class breakCCompiler;int main(void) { return 0; }\n'
return self._sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
# -fpermissive allows non-conforming code to compile which is necessary
# for many C++ checks. Particularly, the has_header_symbol check is
# too strict without this and always fails.
return super().get_compiler_check_args(mode) + ['-fpermissive']
def has_header_symbol(self, hname: str, symbol: str, prefix: str,
env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
# Check if it's a C-like symbol
found, cached = super().has_header_symbol(hname, symbol, prefix, env,
extra_args=extra_args,
dependencies=dependencies)
if found:
return True, cached
# Check if it's a class or a template
if extra_args is None:
extra_args = []
t = f'''{prefix}
#include <{hname}>
using {symbol};
int main(void) {{ return 0; }}'''
return self.compiles(t, env, extra_args=extra_args,
dependencies=dependencies)
def _test_cpp_std_arg(self, cpp_std_value: str) -> bool:
# Test whether the compiler understands a -std=XY argument
assert(cpp_std_value.startswith('-std='))
# This test does not use has_multi_arguments() for two reasons:
# 1. has_multi_arguments() requires an env argument, which the compiler
# object does not have at this point.
# 2. even if it did have an env object, that might contain another more
# recent -std= argument, which might lead to a cascaded failure.
CPP_TEST = 'int i = static_cast<int>(0);'
with self.compile(CPP_TEST, extra_args=[cpp_std_value], mode='compile') as p:
if p.returncode == 0:
mlog.debug(f'Compiler accepts {cpp_std_value}:', 'YES')
return True
else:
mlog.debug(f'Compiler accepts {cpp_std_value}:', 'NO')
return False
@functools.lru_cache()
def _find_best_cpp_std(self, cpp_std: str) -> str:
# The initial version mapping approach to make falling back
# from '-std=c++14' to '-std=c++1y' was too brittle. For instance,
# Apple's Clang uses a different versioning scheme to upstream LLVM,
# making the whole detection logic awfully brittle. Instead, let's
# just see if feeding GCC or Clang our '-std=' setting works, and
# if not, try the fallback argument.
CPP_FALLBACKS = {
'c++11': 'c++0x',
'gnu++11': 'gnu++0x',
'c++14': 'c++1y',
'gnu++14': 'gnu++1y',
'c++17': 'c++1z',
'gnu++17': 'gnu++1z',
'c++20': 'c++2a',
'gnu++20': 'gnu++2a',
}
# Currently, remapping is only supported for Clang, Elbrus and GCC
assert(self.id in frozenset(['clang', 'lcc', 'gcc', 'emscripten']))
if cpp_std not in CPP_FALLBACKS:
# 'c++03' and 'c++98' don't have fallback types
return '-std=' + cpp_std
for i in (cpp_std, CPP_FALLBACKS[cpp_std]):
cpp_std_value = '-std=' + i
if self._test_cpp_std_arg(cpp_std_value):
return cpp_std_value
raise MesonException(f'C++ Compiler does not support -std={cpp_std}')
def get_options(self) -> 'KeyedOptionDictType':
opts = super().get_options()
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key: coredata.UserComboOption(
'C++ language standard to use',
['none'],
'none',
),
})
return opts
class ClangCPPCompiler(ClangCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ClangCompiler.__init__(self, defines)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
key = OptionKey('key', machine=self.for_machine, lang=self.language)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
key.evolve('rtti'): coredata.UserBooleanOption('Enable RTTI', True),
})
opts[key.evolve('std')].choices = [
'none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',
'c++2a', 'c++20', 'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z',
'gnu++2a', 'gnu++20',
]
if self.info.is_windows() or self.info.is_cygwin():
opts.update({
key.evolve('winlibs'): coredata.UserArrayOption(
'Standard Win libraries to link against',
gnu_winlibs,
),
})
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
non_msvc_eh_options(options[key.evolve('eh')].value, args)
if not options[key.evolve('rtti')].value:
args.append('-fno-rtti')
return args
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
if self.info.is_windows() or self.info.is_cygwin():
# without a typedict mypy can't understand this.
key = OptionKey('winlibs', machine=self.for_machine, lang=self.language)
libs = options[key].value.copy()
assert isinstance(libs, list)
for l in libs:
assert isinstance(l, str)
return libs
return []
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lstdc++']
class AppleClangCPPCompiler(ClangCPPCompiler):
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lc++']
class EmscriptenCPPCompiler(EmscriptenMixin, ClangCPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
full_version: T.Optional[str] = None):
if not is_cross:
raise MesonException('Emscripten compiler can only be used for cross compilation.')
ClangCPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper=exe_wrapper, linker=linker,
defines=defines, full_version=full_version)
self.id = 'emscripten'
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
return args
class ArmclangCPPCompiler(ArmclangCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ArmclangCompiler.__init__(self)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
})
opts[key].choices = [
'none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'gnu++98',
'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17',
]
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append('-std=' + std.value)
non_msvc_eh_options(options[key.evolve('eh')].value, args)
return args
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
class GnuCPPCompiler(GnuCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
GnuCompiler.__init__(self, defines)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self) -> 'KeyedOptionDictType':
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts = CPPCompiler.get_options(self)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
key.evolve('rtti'): coredata.UserBooleanOption('Enable RTTI', True),
key.evolve('debugstl'): coredata.UserBooleanOption(
'STL debug mode',
False,
)
})
opts[key].choices = [
'none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',
'c++2a', 'c++20', 'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17',
'gnu++1z', 'gnu++2a', 'gnu++20',
]
if self.info.is_windows() or self.info.is_cygwin():
opts.update({
key.evolve('winlibs'): coredata.UserArrayOption(
'Standard Win libraries to link against',
gnu_winlibs,
),
})
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
non_msvc_eh_options(options[key.evolve('eh')].value, args)
if not options[key.evolve('rtti')].value:
args.append('-fno-rtti')
if options[key.evolve('debugstl')].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
if self.info.is_windows() or self.info.is_cygwin():
# without a typedict mypy can't understand this.
key = OptionKey('winlibs', machine=self.for_machine, lang=self.language)
libs = options[key].value.copy()
assert isinstance(libs, list)
for l in libs:
assert isinstance(l, str)
return libs
return []
def get_pch_use_args(self, pch_dir: str, header: str) -> T.List[str]:
return ['-fpch-preprocess', '-include', os.path.basename(header)]
def language_stdlib_only_link_flags(self) -> T.List[str]:
return ['-lstdc++']
class PGICPPCompiler(PGICompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
PGICompiler.__init__(self)
class NvidiaHPC_CPPCompiler(PGICompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
PGICompiler.__init__(self)
self.id = 'nvidia_hpc'
class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
defines: T.Optional[T.Dict[str, str]] = None,
full_version: T.Optional[str] = None):
GnuCPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker,
full_version=full_version, defines=defines)
ElbrusCompiler.__init__(self)
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
cpp_stds = [
'none', 'c++98', 'c++03', 'c++0x', 'c++11', 'c++14', 'c++1y',
'gnu++98', 'gnu++03', 'gnu++0x', 'gnu++11', 'gnu++14', 'gnu++1y',
]
if version_compare(self.version, '>=1.24.00'):
cpp_stds += [ 'c++1z', 'c++17', 'gnu++1z', 'gnu++17' ]
if version_compare(self.version, '>=1.25.00'):
cpp_stds += [ 'c++2a', 'gnu++2a' ]
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
key.evolve('debugstl'): coredata.UserBooleanOption(
'STL debug mode',
False,
),
})
opts[key].choices = cpp_stds
return opts
# Elbrus C++ compiler does not have lchmod, but there is only linker warning, not compiler error.
# So we should explicitly fail at this case.
def has_function(self, funcname: str, prefix: str, env: 'Environment', *,
extra_args: T.Optional[T.List[str]] = None,
dependencies: T.Optional[T.List['Dependency']] = None) -> T.Tuple[bool, bool]:
if funcname == 'lchmod':
return False, False
else:
return super().has_function(funcname, prefix, env,
extra_args=extra_args,
dependencies=dependencies)
# Elbrus C++ compiler does not support RTTI, so don't check for it.
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
args.append(self._find_best_cpp_std(std.value))
non_msvc_eh_options(options[key.evolve('eh')].value, args)
if options[key.evolve('debugstl')].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
class IntelCPPCompiler(IntelGnuLikeCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
IntelGnuLikeCompiler.__init__(self)
self.lang_header = 'c++-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',
'-Wpch-messages', '-Wnon-virtual-dtor']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra']}
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
# Every Unix compiler under the sun seems to accept -std=c++03,
# with the exception of ICC. Instead of preventing the user from
# globally requesting C++03, we transparently remap it to C++98
c_stds = ['c++98', 'c++03']
g_stds = ['gnu++98', 'gnu++03']
if version_compare(self.version, '>=15.0.0'):
c_stds += ['c++11', 'c++14']
g_stds += ['gnu++11']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c++17']
if version_compare(self.version, '>=17.0.0'):
g_stds += ['gnu++14']
if version_compare(self.version, '>=19.1.0'):
c_stds += ['c++2a']
g_stds += ['gnu++2a']
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
key.evolve('rtti'): coredata.UserBooleanOption('Enable RTTI', True),
key.evolve('debugstl'): coredata.UserBooleanOption('STL debug mode', False),
})
opts[key].choices = ['none'] + c_stds + g_stds
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value != 'none':
remap_cpp03 = {
'c++03': 'c++98',
'gnu++03': 'gnu++98'
}
args.append('-std=' + remap_cpp03.get(std.value, std.value))
if options[key.evolve('eh')].value == 'none':
args.append('-fno-exceptions')
if not options[key.evolve('rtti')].value:
args.append('-fno-rtti')
if options[key.evolve('debugstl')].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
class VisualStudioLikeCPPCompilerMixin(CompilerMixinBase):
"""Mixin for C++ specific method overrides in MSVC-like compilers."""
VC_VERSION_MAP = {
'none': (True, None),
'vc++11': (True, 11),
'vc++14': (True, 14),
'vc++17': (True, 17),
'vc++latest': (True, "latest"),
'c++11': (False, 11),
'c++14': (False, 14),
'c++17': (False, 17),
'c++latest': (False, "latest"),
}
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
# need a typeddict for this
key = OptionKey('winlibs', machine=self.for_machine, lang=self.language)
return T.cast(T.List[str], options[key].value[:])
def _get_options_impl(self, opts: 'KeyedOptionDictType', cpp_stds: T.List[str]) -> 'KeyedOptionDictType':
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts.update({
key.evolve('eh'): coredata.UserComboOption(
'C++ exception handling type.',
['none', 'default', 'a', 's', 'sc'],
'default',
),
key.evolve('rtti'): coredata.UserBooleanOption('Enable RTTI', True),
key.evolve('winlibs'): coredata.UserArrayOption(
'Windows libs to link against.',
msvc_winlibs,
),
})
opts[key.evolve('std')].choices = cpp_stds
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
eh = options[key.evolve('eh')]
if eh.value == 'default':
args.append('/EHsc')
elif eh.value == 'none':
args.append('/EHs-c-')
else:
args.append('/EH' + eh.value)
if not options[key.evolve('rtti')].value:
args.append('/GR-')
permissive, ver = self.VC_VERSION_MAP[options[key].value]
if ver is not None:
args.append(f'/std:c++{ver}')
if not permissive:
args.append('/permissive-')
return args
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
# XXX: this is a hack because so much GnuLike stuff is in the base CPPCompiler class.
return Compiler.get_compiler_check_args(self, mode)
class CPP11AsCPP14Mixin(CompilerMixinBase):
"""Mixin class for VisualStudio and ClangCl to replace C++11 std with C++14.
This is a limitation of Clang and MSVC that ICL doesn't share.
"""
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
# Note: there is no explicit flag for supporting C++11; we attempt to do the best we can
# which means setting the C++ standard version to C++14, in compilers that support it
# (i.e., after VS2015U3)
# if one is using anything before that point, one cannot set the standard.
key = OptionKey('std', machine=self.for_machine, lang=self.language)
if options[key].value in {'vc++11', 'c++11'}:
mlog.warning(self.id, 'does not support C++11;',
'attempting best effort; setting the standard to C++14', once=True)
# Don't mutate anything we're going to change, we need to use
# deepcopy since we're messing with members, and we can't simply
# copy the members because the option proxy doesn't support it.
options = copy.deepcopy(options)
if options[key].value == 'vc++11':
options[key].value = 'vc++14'
else:
options[key].value = 'c++14'
return super().get_option_compile_args(options)
class VisualStudioCPPCompiler(CPP11AsCPP14Mixin, VisualStudioLikeCPPCompilerMixin, MSVCCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo', target: str,
exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
MSVCCompiler.__init__(self, target)
self.id = 'msvc'
def get_options(self) -> 'KeyedOptionDictType':
cpp_stds = ['none', 'c++11', 'vc++11']
# Visual Studio 2015 and later
if version_compare(self.version, '>=19'):
cpp_stds.extend(['c++14', 'c++latest', 'vc++latest'])
# Visual Studio 2017 and later
if version_compare(self.version, '>=19.11'):
cpp_stds.extend(['vc++14', 'c++17', 'vc++17'])
return self._get_options_impl(super().get_options(), cpp_stds)
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
key = OptionKey('std', machine=self.for_machine, lang=self.language)
if options[key].value != 'none' and version_compare(self.version, '<19.00.24210'):
mlog.warning('This version of MSVC does not support cpp_std arguments')
options = copy.copy(options)
options[key].value = 'none'
args = super().get_option_compile_args(options)
if version_compare(self.version, '<19.11'):
try:
i = args.index('/permissive-')
except ValueError:
return args
del args[i]
return args
def get_always_args(self) -> T.List[str]:
args = super().get_always_args()
# update the __cplusplus #define to match the version given on the
# command line with /std:NNN, but only for versions above 15.7 (2017)
# https://docs.microsoft.com/en-us/cpp/build/reference/zc-cplusplus?view=msvc-160
if version_compare(self.version, '>= 15.7'):
args.append('/Zc:__cplusplus')
return args
class ClangClCPPCompiler(CPP11AsCPP14Mixin, VisualStudioLikeCPPCompilerMixin, ClangClCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo', target: str,
exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ClangClCompiler.__init__(self, target)
self.id = 'clang-cl'
def get_options(self) -> 'KeyedOptionDictType':
cpp_stds = ['none', 'c++11', 'vc++11', 'c++14', 'vc++14', 'c++17', 'vc++17', 'c++latest']
return self._get_options_impl(super().get_options(), cpp_stds)
class IntelClCPPCompiler(VisualStudioLikeCPPCompilerMixin, IntelVisualStudioLikeCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice,
is_cross: bool, info: 'MachineInfo', target: str,
exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
IntelVisualStudioLikeCompiler.__init__(self, target)
def get_options(self) -> 'KeyedOptionDictType':
# This has only been tested with version 19.0,
cpp_stds = ['none', 'c++11', 'vc++11', 'c++14', 'vc++14', 'c++17', 'vc++17', 'c++latest']
return self._get_options_impl(super().get_options(), cpp_stds)
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
# XXX: this is a hack because so much GnuLike stuff is in the base CPPCompiler class.
return IntelVisualStudioLikeCompiler.get_compiler_check_args(self, mode)
class ArmCPPCompiler(ArmCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
ArmCompiler.__init__(self)
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none', 'c++03', 'c++11']
return opts
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
args = []
key = OptionKey('std', machine=self.for_machine, lang=self.language)
std = options[key]
if std.value == 'c++11':
args.append('--cpp11')
elif std.value == 'c++03':
args.append('--cpp')
return args
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
return []
class CcrxCPPCompiler(CcrxCompiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
CcrxCompiler.__init__(self)
# Override CCompiler.get_always_args
def get_always_args(self) -> T.List[str]:
return ['-nologo', '-lang=cpp']
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
def get_compile_only_args(self) -> T.List[str]:
return []
def get_output_args(self, target: str) -> T.List[str]:
return ['-output=obj=%s' % target]
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
return []
class C2000CPPCompiler(C2000Compiler, CPPCompiler):
def __init__(self, exelist: T.List[str], version: str, for_machine: MachineChoice, is_cross: bool,
info: 'MachineInfo', exe_wrapper: T.Optional['ExternalProgram'] = None,
linker: T.Optional['DynamicLinker'] = None,
full_version: T.Optional[str] = None):
CPPCompiler.__init__(self, exelist, version, for_machine, is_cross,
info, exe_wrapper, linker=linker, full_version=full_version)
C2000Compiler.__init__(self)
def get_options(self) -> 'KeyedOptionDictType':
opts = CPPCompiler.get_options(self)
key = OptionKey('std', machine=self.for_machine, lang=self.language)
opts[key].choices = ['none', 'c++03']
return opts
def get_always_args(self) -> T.List[str]:
return ['-nologo', '-lang=cpp']
def get_option_compile_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
def get_compile_only_args(self) -> T.List[str]:
return []
def get_output_args(self, target: str) -> T.List[str]:
return ['-output=obj=%s' % target]
def get_option_link_args(self, options: 'KeyedOptionDictType') -> T.List[str]:
return []
def get_compiler_check_args(self, mode: CompileCheckMode) -> T.List[str]:
return []
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class IpAllocationsOperations(object):
"""IpAllocationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified IpAllocation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Gets the specified IpAllocation by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'IpAllocation')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.IpAllocation"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.IpAllocation"]
"""Creates or updates an IpAllocation in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.IpAllocation
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either IpAllocation or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.IpAllocation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ip_allocation_name=ip_allocation_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ip_allocation_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.IpAllocation"
"""Updates a IpAllocation tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ip_allocation_name: The name of the IpAllocation.
:type ip_allocation_name: str
:param parameters: Parameters supplied to update IpAllocation tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IpAllocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.IpAllocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ipAllocationName': self._serialize.url("ip_allocation_name", ip_allocation_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IpAllocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations/{ipAllocationName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/IpAllocations'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IpAllocationListResult"]
"""Gets all IpAllocations in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IpAllocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.IpAllocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IpAllocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('IpAllocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/IpAllocations'} # type: ignore
| |
''' @Summary: API endpoint for accessing company data from mysql database '''
from flask import jsonify, request
from flask_restful import Resource, reqparse
from api.sql.models import * #import all of the models from models.py
from api.util.parse_json import json_decode, json_encode #for json request parsing
from api import login_required, db
class companyUtils:
''' Decoupling of re-usable methods in a static context
Note: jsonify does not handle lists and isn't used here '''
@staticmethod
def get_company_poc_list(_company_name_):
try:
x = company_data.query.filter_by(company_name=_company_name_).first()
if x != None:
return {
'status' : 200,
'poc' : x.poc
}
else:
return {
'status' : 400,
'message' : 'Company POCs for specified company are not available'
}
except Exception as e:
return {'status' : 400}
@staticmethod
def get_all_poc_list():
try: # load_only("company_name")
x = company_data.query.all()
if x != None:
co_poc_list = []
for co in x:
co_poc_list.append({
'company_name' : co.company_name,
'poc' : co.poc
})
return {
'status' : 200,
'all_company_poc' : co_poc_list
}
else:
return {
'status' : 400,
'message' : 'Company POCs are not available'
}
except Exception as e:
return {'status' : 400}
class manageCompany(Resource):
@login_required
def get(self, _company_name_): # get all info about a company #
try:
x = company_data.query.filter_by(company_name=_company_name_).first()
_company_id = x.company_id
_company_name = x.company_name
_street = x.street
_city = x.city
_state = x.state
_zip = x.zip
_phone_number = x.phone_number
_poc = x.poc
_authinfo = x.authinfo
_sites = x.sites
if x != None:
return jsonify(
status = 200,
message = 'Company search success',
company_id = _company_id,
company_name = _company_name,
street = _street,
city = _city,
state = _state,
zip = _zip,
phone_number = _phone_number,
poc = _poc,
authinfo = _authinfo,
sites = _sites
)
else:
return {
'status' : 400,
'message' : 'Company search failure'
}
except Exception as e:
return {'status' : 400}
@login_required
def put(self, _company_name_): # update a company's info #
try:
parser = reqparse.RequestParser()
parser.add_argument('company_id', type=int, help='Company_id for account', location='json')
parser.add_argument('company_name', type=str, help='Name of company for account', location='json')
parser.add_argument('street', type=str, help='street for account', location='json')
parser.add_argument('city', type=str, help='City location for account', location='json')
parser.add_argument('state', type=str, help='State location for account', location='json')
parser.add_argument('zip', type=str, help='Zip code for account', location='json')
parser.add_argument('phone_number', type=str, help='Phone_number for account', location='json')
parser.add_argument('poc', type=json_encode, help='Point Of Contact List for account', location='json')
parser.add_argument('authinfo', type=json_encode, help='Authentication info for account', location='json')
parser.add_argument('sites', type=json_encode, help='List of divisions for account', location='json')
args = parser.parse_args()
curr_session = db.session # open database session
try:
x = company_data.query.filter_by(company_name=_company_name_).first() #fetch the name to be updated
if args['company_id'] != None:
x.company_id = args['company_id']
if args['company_name'] != None:
x.company_name = args['company_name']
if args['street'] != None:
x.street = args['street']
if args['city'] != None:
x.city = args['city']
if args['state'] != None:
x.state = args['state']
if args['zip'] != None:
x.zip = args['zip']
if args['phone_number'] != None:
x.phone_number = args['phone_number']
if args['poc'] != None:
x.poc = json_decode(args['poc'])
if args['authinfo'] != None:
x.authinfo = json_decode(args['authinfo'])
if args['sites'] != None:
x.sites = json_decode(args['sites'])
curr_session.commit() #commit changes
return {
'status' : 200,
'message' : 'Company update successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Company update failure'
}
except Exception as e:
return {'status' : 400}
@login_required
def delete(self, _company_name_): # delete a company #
try:
curr_session = db.session #open database session
x = company_data.query.filter_by(company_name=_company_name_).first()
try:
db.session.delete(x)
db.session.commit()
return {
'status' : 200,
'message' : 'Company delete successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Company delete failure'
}
except Exception as e:
return {'status' : 400}
class manageCompanyList(Resource):
'''contains methods for accessing resource lists from company table in mysql'''
@login_required
def get(self, _company_name_=None): # _company_name_ is optional param #
URL = request.url
# get a list of sites for specified company #
if URL.find("api/company") > 0 and URL.find("sites") > 0 and _company_name_ != None:
try:
x = company_data.query.filter_by(company_name=_company_name_).first()
if x != None:
return jsonify(
status = 200,
sites = x.sites
)
else:
return {
'status' : 400,
'message' : 'Company sites for specified company are not available'
}
except Exception as e:
return {'status' : 400}
# get list of all sites #
elif URL.find("api/company/sites") > 0 and _company_name_ == None:
try: # .load_only("company_name")
x = company_data.query.all()
if x != None:
co_site_dict = {}
for co in x:
co_site_dict[co.company_name] = co.sites
return jsonify(
status = 200,
company_sites = co_site_dict
)
else:
return {
'status' : 400,
'message' : 'Company sites are not available'
}
except Exception as e:
return {'status' : 400}
# get a list of poc's for specified company #
if URL.find("api/company") > 0 and URL.find("poc") > 0 and _company_name_ != None:
return companyUtils.get_company_poc_list(_company_name_)
# get list of all poc's for all companies #
elif URL.find("api/company/poc") > 0 and _company_name_ == None:
return companyUtils.get_all_poc_list()
# get a list of all companies #
elif URL.find("api/company") > 0 and _company_name_ == None:
try:
x = company_data.query.with_entities(company_data.company_name).all()
if x != None:
companyList = []
for co in x:
companyList.append(co[0])
return jsonify(
status = 200,
companies = companyList
)
else:
return {
'status' : 400,
'message' : 'Company names are not available'
}
except Exception as e:
return {'status' : 400}
else:
return {
'status' : 404,
'message' : 'Redirection error, route is not available'
}
# add to list / create new company #
@login_required
def post(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('company_id', type=int, help='Company_id for account', location='json')
parser.add_argument('company_name', type=str, help='Company name for account', location='json')
parser.add_argument('street', type=str, help='street for account', location='json')
parser.add_argument('city', type=str, help='City location for account', location='json')
parser.add_argument('state', type=str, help='State location for account', location='json')
parser.add_argument('zip', type=str, help='Zip code for account', location='json')
parser.add_argument('phone_number', type=str, help='Company_id for account', location='json')
parser.add_argument('poc', type=json_encode, help='Point Of Contact for account', location='json')
parser.add_argument('authinfo', type=json_encode, help='Authentication settings for account', location='json')
parser.add_argument('sites', type=json_encode, help='List of divisions for account', location='json')
args = parser.parse_args()
_company_id = args['company_id']
_company_name = args['company_name']
_street = args['street']
_city = args['city']
_state = args['state']
_zip = args['zip']
_phone_number = args['phone_number']
_poc = args['poc']
_authinfo = args['authinfo']
_sites = args['sites']
query = company_data(company_id=_company_id, company_name=_company_name, street=_street, city=_city,
state=_state, zip=_zip, phone_number=_phone_number, poc=json_decode(_poc),
authinfo=json_decode(_authinfo), sites=json_decode(_sites))
curr_session = db.session #open database session
try:
curr_session.add(query) #add prepared statement to opened session
curr_session.commit() #commit changes
return {
'status' : 200,
'message' : 'Company creation successful'
}
except:
curr_session.rollback()
curr_session.flush() # for resetting non-commited .add()
return {
'status' : 400,
'message' : 'Company creation failure'
}
except Exception as e:
return {'status' : 400}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
====================
Pygame Button Widget
====================
A button widget for pygame display surfaces. Sends a message when clicked.
Uses the Pygame Display service.
Example Usage
-------------
Three buttons that output messages to the console::
button1 = Button(caption="Press SPACE or click",key=K_SPACE).activate()
button2 = Button(caption="Reverse colours",fgcolour=(255,255,255),bgcolour=(0,0,0)).activate()
button3 = Button(caption="Mary...",msg="Mary had a little lamb", position=(200,100)).activate()
ce = ConsoleEchoer().activate()
button1.link( (button1,"outbox"), (ce,"inbox") )
button2.link( (button2,"outbox"), (ce,"inbox") )
button3.link( (button3,"outbox"), (ce,"inbox") )
How does it work?
-----------------
The component requests a display surface from the Pygame Display service
component. This is used as the surface of the button. It also binds event
listeners to the service, as appropriate.
Arguments to the constructor configure the appearance and behaviour of the
button component:
- If an output "msg" is not specified, the default is a tuple ("CLICK", id) where
id is the self.id attribute of the component.
- A pygame keycode can be specified that will also trigger the button as if it
had been clicked
- you can set the text label, colour, margin size and position of the button
- the button can have a transparent background
- you can specify a size as width,height. If specified, the margin size is
ignored and the text label will be centred within the button
If a producerFinished or shutdownMicroprocess message is received on its
"control" inbox. It is passed on out of its "signal" outbox and the component
terminates.
Upon termination, this component does *not* unbind itself from the Pygame Display
service. It does not deregister event handlers and does not relinquish the
display surface it requested.
"""
import pygame
import Axon
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Kamaelia.UI.GraphicDisplay import PygameDisplay
class Button(Axon.Component.component):
"""\
Button(...) -> new Button component.
Create a button widget in pygame, using the Pygame Display service. Sends a
message out of its outbox when clicked.
Keyword arguments (all optional):
- caption -- text (default="Button <component id>")
- position -- (x,y) position of top left corner in pixels
- margin -- pixels margin between caption and button edge (default=8)
- bgcolour -- (r,g,b) fill colour (default=(224,224,224))
- fgcolour -- (r,g,b) text colour (default=(0,0,0))
- msg -- sent when clicked (default=("CLICK",self.id))
- key -- if not None, pygame keycode to trigger click (default=None)
- transparent -- draw background transparent if True (default=False)
- size -- None or (w,h) in pixels (default=None)
"""
Inboxes = { "inbox" : "Receive events from Pygame Display",
"control" : "For shutdown messages",
"callback" : "Receive callbacks from Pygame Display"
}
Outboxes = { "outbox" : "button click events emitted here",
"signal" : "For shutdown messages",
"display_signal" : "Outbox used for communicating to the display surface" }
def __init__(self, caption=None, position=None, margin=8, bgcolour = (224,224,224), fgcolour = (0,0,0), msg=None,
key = None,
transparent = False, size=None):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(Button,self).__init__()
self.backgroundColour = bgcolour
self.foregroundColour = fgcolour
self.margin = margin
self.key = key
### print "KEY",key
if caption is None:
caption = "Button "+str(self.id)
self.caption = caption
self.size = size
pygame.font.init()
self.buildCaption(caption)
if msg is None:
msg = ("CLICK", self.id)
self.eventMsg = msg
if transparent:
transparency = bgcolour
else:
transparency = None
self.disprequest = { "DISPLAYREQUEST" : True,
"callback" : (self,"callback"),
"events" : (self, "inbox"),
"size": self.size,
"transparency" : transparency }
if not position is None:
self.disprequest["position"] = position
def buildCaption(self, text):
"""Pre-render the text to go on the button label."""
# Text is rendered to self.image
font = pygame.font.Font(None, 14)
self.image = font.render(text,True, self.foregroundColour, )
(w,h) = self.image.get_size()
if not self.size:
self.size = (w + 2*self.margin, h + 2*self.margin)
self.imagePosition = (self.margin, self.margin)
else:
self.imagePosition = ( (self.size[0]-w)/2, (self.size[1]-h)/2 )
def waitBox(self,boxname):
"""Generator. yields 1 until data ready on the named inbox."""
waiting = True
while waiting:
if self.dataReady(boxname): return
else: yield 1
def main(self):
"""Main loop."""
displayservice = PygameDisplay.getDisplayService()
self.link((self,"display_signal"), displayservice)
self.send( self.disprequest,
"display_signal")
for _ in self.waitBox("callback"): yield 1
self.display = self.recv("callback")
self.blitToSurface()
self.send({ "ADDLISTENEVENT" : pygame.MOUSEBUTTONDOWN,
"surface" : self.display},
"display_signal")
if self.key is not None:
### print "ADDING LISTEN", self.key, pygame.KEYDOWN, self.display
message = { "ADDLISTENEVENT" : pygame.KEYDOWN,
"surface" : self.display,
"TRACE" : "ME"}
### print "----------------", message
self.send(message, "display_signal")
# while self.outboxes["display_signal"] != []:
# print dir(self)
# print "Waiting clean out", self.outboxes["display_signal"], self
# yield 1
done = False
while not done:
while self.dataReady("control"):
cmsg = self.recv("control")
if isinstance(cmsg, producerFinished) or isinstance(cmsg, shutdownMicroprocess):
self.send(cmsg, "signal")
done = True
# print "Shutdown recieved, exitting", self.caption
while self.dataReady("inbox"):
for event in self.recv("inbox"):
### print event
if event.type == pygame.MOUSEBUTTONDOWN:
# print "BUTTON", event.button
bounds = self.display.get_rect()
if bounds.collidepoint(*event.pos):
self.send( self.eventMsg, "outbox" )
if event.type == pygame.KEYDOWN:
### print "EVENT", event.type, event.key
if event.key == self.key:
self.send( self.eventMsg, "outbox" )
self.pause()
yield 1
self.display.set_alpha(0)
self.send(Axon.Ipc.producerFinished(message=self.display), "display_signal")
yield 1
def blitToSurface(self):
"""Clears the background and renders the text label onto the button surface."""
try:
self.display.fill( self.backgroundColour )
self.display.blit( self.image, self.imagePosition )
except:
pass
self.send({"REDRAW":True, "surface":self.display}, "display_signal")
__kamaelia_components__ = ( Button, )
if __name__ == "__main__":
from Kamaelia.Util.Console import ConsoleEchoer
from pygame.locals import *
button1 = Button(caption="Press SPACE or click",key=K_SPACE).activate()
button2 = Button(caption="Reverse colours",fgcolour=(255,255,255),bgcolour=(0,0,0)).activate()
button3 = Button(caption="Mary...",msg="Mary had a little lamb", position=(200,100)).activate()
ce = ConsoleEchoer().activate()
button1.link( (button1,"outbox"), (ce,"inbox") )
button2.link( (button2,"outbox"), (ce,"inbox") )
button3.link( (button3,"outbox"), (ce,"inbox") )
Axon.Scheduler.scheduler.run.runThreads()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import eventlet
eventlet.monkey_patch(os=False)
import copy
import gettext
import os
import shutil
import sys
import tempfile
import uuid
import fixtures
import mox
from oslo.config import cfg
import stubout
import testtools
from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.objects import base as objects_base
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import paths
from nova import service
from nova.tests import conf_fixture
from nova.tests import policy_fixture
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False)
logging.setup('nova')
_DB_CACHE = None
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.INIT_VERSION:
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.addCleanup(self.stubs.UnsetAll)
self.addCleanup(self.stubs.SmartUnsetAll)
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class TestingException(Exception):
pass
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(TranslationFixture())
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.log_fixture = self.useFixture(fixtures.FakeLogger())
self.useFixture(conf_fixture.ConfFixture(CONF))
if self.USES_DB:
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObject._obj_classes)
self.addCleanup(self._restore_obj_registry)
mox_fixture = self.useFixture(MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
# This will be cleaned up by the NestedTempfile fixture
CONF.set_override('lock_path', tempfile.mkdtemp())
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""
`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
| |
# -*- coding: utf-8 -*-
from cms.exceptions import NoHomeFound
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from cms.utils.i18n import get_fallback_languages
from cms.utils.page import get_available_slug, check_title_slugs
from cms.utils.urlutils import urljoin
from datetime import datetime
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.db.models import Q
from django.db.models.fields.related import OneToOneRel
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _, get_language, ugettext
from menus.menu_pool import menu_pool
from os.path import join
from publisher.errors import MpttPublisherCantPublish
from mptt.models import MPTTModel
import copy
class Page(MPTTModel):
"""
A simple hierarchical page model
"""
__metaclass__ = PageMetaClass
MODERATOR_CHANGED = 0
MODERATOR_NEED_APPROVEMENT = 1
MODERATOR_NEED_DELETE_APPROVEMENT = 2
MODERATOR_APPROVED = 10
# special case - page was approved, but some of page parents are not approved yet
MODERATOR_APPROVED_WAITING_FOR_PARENTS = 11
moderator_state_choices = (
(MODERATOR_CHANGED, _('changed')),
(MODERATOR_NEED_APPROVEMENT, _('req. app.')),
(MODERATOR_NEED_DELETE_APPROVEMENT, _('delete')),
(MODERATOR_APPROVED, _('approved')),
(MODERATOR_APPROVED_WAITING_FOR_PARENTS, _('app. par.')),
)
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(1,_('for logged in users only')),
(2,_('for anonymous users only')),
)
PUBLISHER_STATE_DEFAULT = 0
PUBLISHER_STATE_DIRTY = 1
PUBLISHER_STATE_DELETE = 2
template_choices = [(x, _(y)) for x,y in settings.CMS_TEMPLATES]
created_by = models.CharField(_("created by"), max_length=70, editable=False)
changed_by = models.CharField(_("changed by"), max_length=70, editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(editable=False, default=datetime.now)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_('When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True, help_text=_('When to expire the page. Leave empty to never expire.'), db_index=True)
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False, help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_("An unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
published = models.BooleanField(_("is published"), blank=True)
template = models.CharField(_("template"), max_length=100, choices=template_choices, help_text=_('The template used to render the content.'))
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"))
moderator_state = models.SmallIntegerField(_('moderator state'), choices=moderator_state_choices, default=MODERATOR_NEED_APPROVEMENT, blank=True)
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
login_required = models.BooleanField(_("login required"),default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=None, null=True, blank=True, choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True, help_text=_("limit when this page is visible in the menu"))
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=1, editable=False, db_index=True)
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
publisher_state = models.SmallIntegerField(default=0, editable=False, db_index=True)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
class Meta:
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('site','tree_id', 'lft')
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['id', 'publisher_is_draft', 'publisher_public',
'publisher_state', 'moderator_state',
'placeholders', 'lft', 'rght', 'tree_id',
'parent']
def __unicode__(self):
title = self.get_menu_title(fallback=True)
if title is None:
title = u""
return u'%s' % (title,)
def move_page(self, target, position='first-child'):
"""Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
"""
self.move_to(target, position)
# fire signal
from cms.models.moderatormodels import PageModeratorState
self.force_moderation_action = PageModeratorState.ACTION_MOVE
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=self) #titles get saved before moderation
self.save(change_state=True) # always save the page after move, because of publisher
# check the slugs
check_title_slugs(self)
def copy_page(self, target, site, position='first-child', copy_permissions=True, copy_moderation=True, public_copy=False):
"""
copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note: public_copy was added in order to enable the creation of a copy for creating the public page during
the publish operation as it sets the publisher_is_draft=False.
"""
from cms.utils.moderator import update_moderation_message
page_copy = None
if public_copy:
# create a copy of the draft page - existing code loops through pages so added it to a list
pages = [copy.copy(self)]
else:
pages = [self] + list(self.get_descendants().order_by('-rght'))
if not public_copy:
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.placeholders.all())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = page.pk
page.pk = None
page.level = None
page.rght = None
page.lft = None
page.tree_id = None
page.published = False
page.moderator_state = Page.MODERATOR_CHANGED
page.publisher_public_id = None
# only set reverse_id on standard copy
if not public_copy:
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.insert_at(target, position)
else:
count = 1
found = False
for prnt in tree:
if prnt.old_pk == page.parent_id:
page.parent = prnt
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
tree.append(page)
page.site = site
# override default page settings specific for public copy
if public_copy:
page.published = True
page.publisher_is_draft=False
page.moderator_state = Page.MODERATOR_APPROVED
# we need to set relate this new public copy to its draft page (self)
page.publisher_public = self
# code taken from Publisher publish() overridden here as we need to save the page
# before we are able to use the page object for titles, placeholders etc.. below
# the method has been modified to return the object after saving the instance variable
page = self._publisher_save_public(page)
page_copy = page # create a copy used in the return
else:
# only need to save the page if it isn't public since it is saved above otherwise
page.save()
# copy moderation, permissions if necessary
if settings.CMS_PERMISSION and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
if settings.CMS_MODERATOR and copy_moderation:
from cms.models.moderatormodels import PageModerator
for moderator in PageModerator.objects.filter(page__id=origin_id):
moderator.pk = None
moderator.page = page
moderator.save()
# update moderation message for standard copy
if not public_copy:
update_moderation_message(page, unicode(_('Page was copied.')))
# copy titles of this page
for title in titles:
title.pk = None # setting pk = None creates a new instance
title.publisher_public_id = None
title.published = False
title.page = page
# create slug-copy for standard copy
if not public_copy:
title.slug = get_available_slug(title)
title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = list(ph.cmsplugin_set.all().order_by('tree_id', '-rght'))
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
if plugins:
copy_plugins_to(plugins, ph)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
return page_copy # return the page_copy or None
def save(self, no_signals=False, change_state=True, commit=True,
force_with_moderation=False, force_state=None, **kwargs):
"""
Args:
commit: True if model should be really saved
force_with_moderation: can be true when new object gets added under
some existing page and this new page will require moderation;
this is because of how this adding works - first save, then move
"""
# Published pages should always have a publication date
publish_directly, under_moderation = False, False
if self.publisher_is_draft:
# publisher specific stuff, but only on draft model, this is here
# because page initializes publish process
if settings.CMS_MODERATOR:
under_moderation = force_with_moderation or self.pk and bool(self.get_moderator_queryset().count())
created = not bool(self.pk)
if settings.CMS_MODERATOR:
if change_state:
if created:
# new page....
self.moderator_state = Page.MODERATOR_CHANGED
elif not self.requires_approvement():
# always change state to need approvement when there is some change
self.moderator_state = Page.MODERATOR_NEED_APPROVEMENT
if not under_moderation and (self.published or self.publisher_public):
# existing page without moderator - publish it directly if
# published is True
publish_directly = True
elif change_state:
self.moderator_state = Page.MODERATOR_CHANGED
#publish_directly = True - no publisher, no publishing!! - we just
# use draft models in this case
if force_state is not None:
self.moderator_state = force_state
# if the page is published we set the publish date if not set yet.
if self.publication_date is None and self.published:
self.publication_date = datetime.now()
if self.reverse_id == "":
self.reverse_id = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
self.changed_by = user.username
else:
self.changed_by = "script"
if not self.pk:
self.created_by = self.changed_by
if commit:
if no_signals:# ugly hack because of mptt
self.save_base(cls=self.__class__, **kwargs)
else:
super(Page, self).save(**kwargs)
#if commit and (publish_directly or created and not under_moderation):
if self.publisher_is_draft:
if self.published:
if commit and publish_directly:
self.publish()
elif self.publisher_public and self.publisher_public.published:
self.publisher_public.published = False
self.publisher_public.save()
def save_base(self, *args, **kwargs):
"""Overriden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state:
self.publisher_state = self.PUBLISHER_STATE_DIRTY
if keep_state:
delattr(self, '_publisher_keep_state')
ret = super(Page, self).save_base(*args, **kwargs)
return ret
@transaction.commit_manually
def publish(self):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
IMPORTANT: @See utils.moderator.approve_page for publishing permissions
Also added @transaction.commit_manually decorator as delete()
was removing both draft and public versions
Returns: True if page was successfully published.
"""
# Publish can only be called on moderated and draft pages
if not self.publisher_is_draft:
transaction.rollback()
return
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
if self._publisher_can_publish():
########################################################################
# delete the existing public page using transaction block to ensure save() and delete() do not conflict
# the draft version was being deleted if I replaced the save() below with a delete()
try:
old_public = self.get_public_object()
old_public.publisher_state = self.PUBLISHER_STATE_DELETE
# store old public on self, pass around instead
self.old_public = old_public
old_public.publisher_public = None # remove the reference to the publisher_draft version of the page so it does not get deleted
old_public.save()
except:
transaction.rollback()
else:
transaction.commit()
# we hook into the modified copy_page routing to do the heavy lifting of copying the draft page to a new public page
new_public = self.copy_page(target=None, site=self.site, copy_moderation=False, position=None, copy_permissions=False, public_copy=True)
# taken from Publisher - copy_page needs to call self._publisher_save_public(copy) for mptt insertion
# insert_at() was maybe calling _create_tree_space() method, in this
# case may tree_id change, so we must update tree_id from db first
# before save
if getattr(self, 'tree_id', None):
me = self._default_manager.get(pk=self.pk)
self.tree_id = me.tree_id
self.published = True
self.publisher_public = new_public
self.moderator_state = Page.MODERATOR_APPROVED
self.publisher_state = self.PUBLISHER_STATE_DEFAULT
self._publisher_keep_state = True
published = True
else:
self.moderator_state = Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS
self.save(change_state=False)
if not published:
# was not published, escape
transaction.commit()
return
# clean moderation log
self.pagemoderatorstate_set.all().delete()
# we delete the old public page - this only deletes the public page as we
# have removed the old_public.publisher_public=None relationship to the draft page above
if old_public:
# reparent public child pages before delete so they don't get purged as well
for child_page in old_public.children.order_by('lft'):
child_page.move_to(new_public, 'last-child')
child_page.save(change_state=False)
transaction.commit()
# reload old_public to get correct tree attrs
old_public = Page.objects.get(pk=old_public.pk)
old_public.move_to(None, 'last-child')
# moving the object out of the way berore deleting works, but why?
# finally delete the old public page
old_public.delete()
# manually commit the last transaction batch
transaction.commit()
# page was published, check if there are some childs, which are waiting
# for publishing (because of the parent)
publish_set = self.children.filter(moderator_state = Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)
for page in publish_set:
# recursive call to all childrens....
page.moderator_state = Page.MODERATOR_APPROVED
page.save(change_state=False)
page.publish()
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self)
return published
def delete(self):
"""Mark public instance for deletion and delete draft.
"""
placeholders = self.placeholders.all()
for ph in placeholders:
plugin = CMSPlugin.objects.filter(placeholder=ph)
plugin.delete()
ph.delete()
if self.publisher_public_id:
# mark the public instance for deletion
self.publisher_public.publisher_state = self.PUBLISHER_STATE_DELETE
self.publisher_public.save()
super(Page, self).delete()
def delete_with_public(self):
placeholders = list(self.placeholders.all())
if self.publisher_public_id:
placeholders = placeholders + list(self.publisher_public.placeholders.all())
for ph in placeholders:
plugin = CMSPlugin.objects.filter(placeholder=ph)
plugin.delete()
ph.delete()
if self.publisher_public_id:
self.publisher_public.delete()
super(Page, self).delete()
def get_draft_object(self):
return self
def get_public_object(self):
return self.publisher_public
def get_calculated_status(self):
"""
get the calculated status of the page based on published_date,
published_end_date, and status
"""
if settings.CMS_SHOW_START_DATE:
if self.publication_date > datetime.now():
return False
if settings.CMS_SHOW_END_DATE and self.publication_end_date:
if self.publication_end_date < datetime.now():
return True
return self.published
calculated_status = property(get_calculated_status)
def get_languages(self):
"""
get the list of all existing languages for this page
"""
from cms.models.titlemodels import Title
if not hasattr(self, "all_languages"):
self.all_languages = Title.objects.filter(page=self).values_list("language", flat=True).distinct()
self.all_languages = list(self.all_languages)
self.all_languages.sort()
return self.all_languages
def get_absolute_url(self, language=None, fallback=True):
try:
if self.is_home():
return reverse('pages-root')
except NoHomeFound:
pass
if settings.CMS_FLAT_URLS:
path = self.get_slug(language, fallback)
else:
path = self.get_path(language, fallback)
if hasattr(self, "home_cut_cache") and self.home_cut_cache:
if not self.get_title_obj_attribute("has_url_overwrite", language, fallback) and path:
path = "/".join(path.split("/")[1:])
else:
home_pk = None
try:
home_pk = self.home_pk_cache
except NoHomeFound:
pass
ancestors = self.get_cached_ancestors(ascending=True)
if self.parent_id and ancestors[-1].pk == home_pk and not self.get_title_obj_attribute("has_url_overwrite", language, fallback) and path:
path = "/".join(path.split("/")[1:])
if settings.CMS_DBGETTEXT and settings.CMS_DBGETTEXT_SLUGS:
path = '/'.join([ugettext(p) for p in path.split('/')])
return urljoin(reverse('pages-root'), path)
def get_cached_ancestors(self, ascending=True):
if ascending:
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors(ascending))
return self.ancestors_ascending
else:
if not hasattr(self, "ancestors_descending"):
self.ancestors_descending = list(self.get_ancestors(ascending))
return self.ancestors_descending
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle()
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
if attribute and settings.CMS_DBGETTEXT:
if attrname in ('slug', 'path') and \
not settings.CMS_DBGETTEXT_SLUGS:
return attribute
return ugettext(attribute)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_meta_keywords(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the keywords meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_keywords", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.get_title_obj_attribute("application_urls", language, fallback, version_id, force_reload)
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif not language in self.title_cache:
if fallback:
fallback_langs = get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
title = Title.objects.get_title(self, language, language_fallback=fallback)
if title:
self.title_cache[title.language] = title
language = title.language
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
template = None
if self.template and len(self.template)>0 and \
self.template != settings.CMS_TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
for p in self.get_ancestors(ascending=True):
template = p.get_template()
break
if not template:
template = settings.CMS_TEMPLATES[0][0]
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in settings.CMS_TEMPLATES)
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in settings.CMS_TEMPLATES:
if t[0] == template:
return t[1]
return _("default")
def has_change_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission()) and \
self.has_generic_permission(request, "change")
def has_delete_permission(self, request):
opts = self._meta
if request.user.is_superuser:
return True
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission()) and \
self.has_generic_permission(request, "delete")
def has_publish_permission(self, request):
return self.has_generic_permission(request, "publish")
def has_advanced_settings_permission(self, request):
return self.has_generic_permission(request, "advanced_settings")
def has_change_permissions_permission(self, request):
"""Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions")
def has_add_permission(self, request):
"""Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add")
def has_move_page_permission(self, request):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page")
def has_moderate_permission(self, request):
"""Has user ability to moderate current page? If moderation isn't
installed, nobody can moderate.
"""
if not settings.CMS_MODERATOR:
return False
return self.has_generic_permission(request, "moderate")
def has_generic_permission(self, request, perm_type):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
att_name = "permission_%s_cache" % perm_type
if not hasattr(self, "permission_user_cache") or not hasattr(self, att_name) \
or request.user.pk != self.permission_user_cache.pk:
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = request.user
setattr(self, att_name, has_generic_permission(self.id, request.user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def is_home(self):
if self.parent_id:
return False
else:
try:
return self.home_pk_cache == self.pk
except NoHomeFound:
pass
return False
def get_home_pk_cache(self):
attr = "%s_home_pk_cache_%s" % (self.publisher_is_draft and "draft" or "public", self.site.pk)
if not hasattr(self, attr):
setattr(self, attr, self.get_object_queryset().get_home(self.site).pk)
return getattr(self, attr)
def set_home_pk_cache(self, value):
attr = "%s_home_pk_cache_%s" % (self.publisher_is_draft and "draft" or "public", self.site.pk)
setattr(self, attr, value)
home_pk_cache = property(get_home_pk_cache, set_home_pk_cache)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing page-scope files.
This allows multiple pages to contain files with identical names without namespace issues.
Plugins such as Picture can use this method to initialise the 'upload_to' parameter for
File-based fields. For example:
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(settings.CMS_PAGE_MEDIA_PATH, "%d" % self.id, filename)
def last_page_states(self):
"""Returns last five page states, if they exist, optimized, calls sql
query only if some states available
"""
# TODO: optimize SQL... 1 query per page
if settings.CMS_MODERATOR:
has_moderator_state = getattr(self, '_has_moderator_state_chache', None)
if has_moderator_state == False:
return None
return self.pagemoderatorstate_set.all().order_by('created',)[:5]
return None
def get_moderator_queryset(self):
"""Returns ordered set of all PageModerator instances, which should
moderate this page
"""
from cms.models.moderatormodels import PageModerator
if not settings.CMS_MODERATOR or not self.tree_id:
return PageModerator.objects.get_empty_query_set()
q = Q(page__tree_id=self.tree_id, page__level__lt=self.level, moderate_descendants=True) | \
Q(page__tree_id=self.tree_id, page__level=self.level - 1, moderate_children=True) | \
Q(page__pk=self.pk, moderate_page=True)
return PageModerator.objects.distinct().filter(q).order_by('page__level')
def is_under_moderation(self):
return bool(self.get_moderator_queryset().count())
def is_approved(self):
"""Returns true, if page is approved and published, or approved, but
parents are missing..
"""
return self.moderator_state in (Page.MODERATOR_APPROVED, Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)
def is_public_published(self):
"""Returns true if public model is published.
"""
if hasattr(self, 'public_published_cache'):
# if it was cached in change list, return cached value
return self.public_published_cache
# othervise make db lookup
if self.publisher_public_id:
return self.publisher_public.published
#return is_public_published(self)
return False
def requires_approvement(self):
return self.moderator_state in (Page.MODERATOR_NEED_APPROVEMENT, Page.MODERATOR_NEED_DELETE_APPROVEMENT)
def get_moderation_value(self, user):
"""Returns page moderation value for given user, moderation value is
sum of moderations.
"""
moderation_value = getattr(self, '_moderation_value_cahce', None)
if moderation_value is not None and self._moderation_value_cache_for_user_id == user.pk:
return moderation_value
try:
page_moderator = self.pagemoderator_set.get(user=user)
except ObjectDoesNotExist:
return 0
moderation_value = page_moderator.get_decimal()
self._moderation_value_cahce = moderation_value
self._moderation_value_cache_for_user_id = user
return moderation_value
def _collect_delete_marked_sub_objects(self, seen_objs, parent=None, nullable=False, excluded_models=None):
if excluded_models is None:
excluded_models = [self.__class__]
elif not isinstance(self, Page) or self.__class__ in excluded_models:
return
pk_val = self._get_pk_val()
if seen_objs.add(self.__class__, pk_val, self, parent, nullable):
return
for related in self._meta.get_all_related_objects():
rel_opts_name = related.get_accessor_name()
if not issubclass(related.model, Page) or related.model in excluded_models:
continue
if isinstance(related.field.rel, OneToOneRel):
try:
sub_obj = getattr(self, rel_opts_name)
except ObjectDoesNotExist:
pass
else:
if sub_obj.publisher_is_draft:
continue
sub_obj._collect_delete_marked_sub_objects(seen_objs, self.__class__, related.field.null, excluded_models=excluded_models)
else:
# To make sure we can access all elements, we can't use the
# normal manager on the related object. So we work directly
# with the descriptor object.
for cls in self.__class__.mro():
if rel_opts_name in cls.__dict__:
rel_descriptor = cls.__dict__[rel_opts_name]
break
else:
raise AssertionError("Should never get here.")
delete_qs = rel_descriptor.delete_manager(self).all()
#filter(publisher_state=Publisher.PUBLISHER_STATE_DELETE)
for sub_obj in delete_qs:
if not isinstance(sub_obj, Page) or sub_obj.__class__ in excluded_models:
continue
if sub_obj.publisher_is_draft:
continue
sub_obj._collect_delete_marked_sub_objects(seen_objs, self.__class__, related.field.null, excluded_models=excluded_models)
# Handle any ancestors (for the model-inheritance case). We do this by
# traversing to the most remote parent classes -- those with no parents
# themselves -- and then adding those instances to the collection. That
# will include all the child instances down to "self".
parent_stack = [p for p in self._meta.parents.values() if p is not None]
while parent_stack:
link = parent_stack.pop()
parent_obj = getattr(self, link.name)
if parent_obj._meta.parents:
parent_stack.extend(parent_obj._meta.parents.values())
continue
# At this point, parent_obj is base class (no ancestor models). So
# delete it and all its descendents.
if parent_obj.publisher_is_draft:
continue
parent_obj._collect_delete_marked_sub_objects(seen_objs, excluded_models=excluded_models)
def _publisher_delete_marked(self, collect=True):
"""If this instance, or some remote instances are marked for deletion
kill them.
"""
if self.publisher_is_draft:
# escape soon from draft models
return
if collect:
from django.db.models.query import CollectedObjects
seen = CollectedObjects()
self._collect_delete_marked_sub_objects(seen)
for cls in seen.unordered_keys():
items = seen[cls]
if issubclass(cls, Page):
for item in items.values():
item._publisher_delete_marked(collect=False)
if self.publisher_state == self.PUBLISHER_STATE_DELETE:
try:
self.delete()
except AttributeError:
# this exception may happen because of the plugin relations
# to CMSPlugin and mppt way of _meta assignment
pass
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return self.publisher_is_draft and qs.drafts() or qs.public()
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise MpttPublisherCantPublish
return True
def _publisher_get_public_copy(self):
"""This is here because of the relation between CMSPlugins - model
inheritance.
eg. Text.objects.get(pk=1).publisher_public returns instance of CMSPlugin
instead of instance of Text, thats why this method must be overriden in
CMSPlugin.
"""
return self.publisher_public
def get_next_filtered_sibling(self, **filters):
"""Very simillar to original mptt method, but adds support for filters.
Returns this model instance's next sibling in the tree, or
``None`` if it doesn't have a next sibling.
"""
opts = self._meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__gt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__gt' % opts.left_attr: getattr(self, opts.right_attr),
})
sibling = None
try:
sibling = self._tree_manager.filter(**filters)[0]
except IndexError:
pass
return sibling
def get_previous_fitlered_sibling(self, **filters):
"""Very simillar to original mptt method, but adds support for filters.
Returns this model instance's previous sibling in the tree, or
``None`` if it doesn't have a previous sibling.
"""
opts = self._meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__lt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
order_by = '-%s' % opts.tree_id_attr
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__lt' % opts.right_attr: getattr(self, opts.left_attr),
})
order_by = '-%s' % opts.right_attr
sibling = None
try:
sibling = self._tree_manager.filter(**filters).order_by(order_by)[0]
except IndexError:
pass
return sibling
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides original
publisher method.
Args:
obj - public variant of `self` to be saved.
"""
prev_sibling = self.get_previous_fitlered_sibling(publisher_is_draft=True, publisher_public__isnull=False)
if not self.publisher_public_id:
# is there anybody on left side?
if prev_sibling:
obj.insert_at(prev_sibling.publisher_public, position='right', save=False)
else:
# it is a first time published object, perform insert_at:
parent, public_parent = self.parent, None
if parent:
public_parent = parent.publisher_public
if public_parent:
obj.insert_at(public_parent, save=False)
else:
# check if object was moved / structural tree change
prev_public_sibling = self.old_public.get_previous_fitlered_sibling()
if not self.level == self.old_public.level or \
not (self.level > 0 and self.parent.publisher_public == self.old_public.parent) or \
not prev_sibling == prev_public_sibling == None or \
(prev_sibling and prev_sibling.publisher_public_id == prev_public_sibling.id):
if prev_sibling:
obj.insert_at(prev_sibling.publisher_public, position="right")
elif self.parent:
# move as a first child to parent
target = self.parent.publisher_public
obj.insert_at(target, position='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling()
if next_sibling and next_sibling.publisher_public_id:
obj.insert_at(next_sibling.publisher_public, position="left")
else:
# insert at last public position
prev_sibling = self.old_public.get_previous_fitlered_sibling()
if prev_sibling:
obj.insert_at(prev_sibling, position="right")
elif self.old_public.parent:
# move as a first child to parent
target = self.old_public.parent
obj.insert_at(target, position='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.old_public.get_next_filtered_sibling()
if next_sibling and next_sibling.publisher_public_id:
obj.insert_at(next_sibling, position="left")
# or none structural change, just save
obj.save()
return obj
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.plugins import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if not placeholder_name in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
def _reversion():
if 'publisher' in settings.INSTALLED_APPS:
exclude_fields = ['publisher_is_draft', 'publisher_public', 'publisher_state']
else:
exclude_fields = []
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for the gdal raster driver tests.
Provides tools to simplify testing a driver, which drivers are
available, and where to find test files.
Rewrite of GDALTest class:
http://trac.osgeo.org/gdal/browser/trunk/autotest/pymod/gdaltest.py#L284
"""
import contextlib
import json
from optparse import OptionParser
import os
import unittest
from osgeo import gdal
from osgeo import osr
import gflags as flags
import logging
from autotest2.gcore import gcore_util
FLAGS = flags.FLAGS
drivers = [gdal.GetDriver(i).ShortName.lower()
for i in range(gdal.GetDriverCount())]
AAIGRID_DRIVER = 'aaigrid'
ACE2_DRIVER = 'ace2'
ADRG_DRIVER = 'adrg'
AIG_DRIVER = 'aig'
AIRSAR_DRIVER = 'airsar'
ARG_DRIVER = 'arg'
BAG_DRIVER = 'bag'
BIGGIF_DRIVER = 'biggif'
BLX_DRIVER = 'blx'
BMP_DRIVER = 'bmp'
BSB_DRIVER = 'bsb'
BT_DRIVER = 'bt'
CEOS_DRIVER = 'ceos'
COASP_DRIVER = 'coasp'
COSAR_DRIVER = 'cosar'
CPG_DRIVER = 'cpg'
CTABLE2_DRIVER = 'ctable2'
CTG_DRIVER = 'ctg'
DIMAP_DRIVER = 'dimap'
DIPEX_DRIVER = 'dipex'
DOQ1_DRIVER = 'doq1'
DOQ2_DRIVER = 'doq2'
DTED_DRIVER = 'dted'
E00GRID_DRIVER = 'e00grid'
ECRGTOC_DRIVER = 'ecrgtoc'
ECW_DRIVER = 'ecw'
EHDR_DRIVER = 'ehdr'
EIR_DRIVER = 'eir'
ELAS_DRIVER = 'elas'
ENVI_DRIVER = 'envi'
ERS_DRIVER = 'ers'
ESAT_DRIVER = 'esat'
FAST_DRIVER = 'fast'
FIT_DRIVER = 'fit'
FITS_DRIVER = 'fits'
FUJIBAS_DRIVER = 'fujibas'
GENBIN_DRIVER = 'genbin'
GFF_DRIVER = 'gff'
GIF_DRIVER = 'gif'
GMT_DRIVER = 'gmt'
GRASS_DRIVER = 'grass'
GRASSASCIIGRID_DRIVER = 'grassasciigrid'
GRIB_DRIVER = 'grib'
GS7BG_DRIVER = 'gs7bg'
GSAG_DRIVER = 'gsag'
GSBG_DRIVER = 'gsbg'
GSC_DRIVER = 'gsc'
GTIFF_DRIVER = 'gtiff'
GTX_DRIVER = 'gtx'
GXF_DRIVER = 'gxf'
HDF4_DRIVER = 'hdf4'
HDF5_DRIVER = 'hdf5'
HDF4IMAGE_DRIVER = 'hdf4image'
HDF5IMAGE_DRIVER = 'hdf5image'
HF2_DRIVER = 'hf2'
HFA_DRIVER = 'hfa'
HTTP_DRIVER = 'http'
IDA_DRIVER = 'ida'
ILWIS_DRIVER = 'ilwis'
INGR_DRIVER = 'ingr'
IRIS_DRIVER = 'iris'
ISIS2_DRIVER = 'isis2'
ISIS3_DRIVER = 'isis3'
JAXAPALSAR_DRIVER = 'jaxapalsar'
JDEM_DRIVER = 'jdem'
JP2ECW_DRIVER = 'jp2ecw'
JP2KAK_DRIVER = 'jp2kak'
JPEG2000_DRIVER = 'jpeg2000'
JP2MRSID = 'jp2mrsid'
JP2OPENJPEG = 'jp2openjpeg'
JPEG_DRIVER = 'jpeg'
JPIPKAK_DRIVER = 'jpipkak'
KMLSUPEROVERLAY_DRIVER = 'kmlsuperoverlay'
KRO_DRIVER = 'kro'
L1B_DRIVER = 'l1b'
LAN_DRIVER = 'lan'
LCP_DRIVER = 'lcp'
LEVELLER_DRIVER = 'leveller'
LOSLAS_DRIVER = 'loslas'
MAP_DRIVER = 'map'
MBTILES_DRIVER = 'mbtiles'
MEM_DRIVER = 'mem'
MFF_DRIVER = 'mff'
MFF2_DRIVER = 'mff2'
MG4LIDAR_DRIVER = 'mg4lidar'
MRSID_DRIVER = 'mrsid'
MSGN_DRIVER = 'msgn'
NDF_DRIVER = 'ndf'
NETCDF_DRIVER = 'netcdf'
NGSGEOID_DRIVER = 'ngsgeoid'
NITF_DRIVER = 'nitf'
NTV2_DRIVER = 'ntv2'
NWT_GRC_DRIVER = 'nwt_grc'
NWT_GRD_DRIVER = 'nwt_grd'
OZI_DRIVER = 'ozi'
PAUX_DRIVER = 'paux'
PCIDSK_DRIVER = 'pcidsk'
PCRASTER_DRIVER = 'pcraster'
PDF_DRIVER = 'pdf'
PDS_DRIVER = 'pds'
PNG_DRIVER = 'png'
PNM_DRIVER = 'pnm'
POSTGISRASTER_DRIVER = 'postgisraster'
R_DRIVER = 'r'
RASTERLITE_DRIVER = 'rasterlite'
RIK_DRIVER = 'rik'
RMF_DRIVER = 'rmf'
RPFTOC_DRIVER = 'rpftoc'
RS2_DRIVER = 'rs2'
RST_DRIVER = 'rst'
SAGA_DRIVER = 'saga'
SAR_CEOS_DRIVER = 'sar_ceos'
SDTS_DRIVER = 'sdts'
SGI_DRIVER = 'sgi'
SNODAS_DRIVER = 'snodas'
SRP_DRIVER = 'srp'
SRTMHGT_DRIVER = 'srtmhgt'
TERRAGEN_DRIVER = 'terragen'
TIL_DRIVER = 'til'
TSX_DRIVER = 'tsx'
USGSDEM_DRIVER = 'usgsdem'
VRT_DRIVER = 'vrt'
WCS_DRIVER = 'wcs'
WEBP_DRIVER = 'webp'
WMS_DRIVER = 'wms'
XPM_DRIVER = 'xpm'
XYZ_DRIVER = 'xyz'
ZMAP_DRIVER = 'zmap'
# A string copy of byte.tif so that tests do not need to depend on all of the
# tiff data to get a simple tiff for basic tests.
TIFF_BYTE_FILE = (
'\x49\x49\x2a\x00\x98\x01\x00\x00\x6b\x7b\x84\x73\x84\x84\x8c\x84\x84'
'\x84\x6b\x84\x6b\x84\x84\x6b\x7b\x73\x9c\x94\x73\x84\x6b\x7b\x94\x73'
'\xa5\x73\x8c\x6b\x7b\x7b\x63\x84\x7b\x84\x84\x84\x63\x9c\x73\x84\x8c'
'\x84\x7b\x73\x8c\x6b\x8c\x73\x84\x7b\x6b\x84\x84\x73\x73\x6b\x73\x6b'
'\x94\x84\x7b\x7b\x73\x84\x84\x7b\x73\x7b\x73\x7b\x6b\x73\x94\x6b\x73'
'\x8c\x73\x84\x84\x9c\x84\x8c\x84\x84\x73\x73\x73\x7b\x94\x7b\xa5\x7b'
'\x84\x6b\x6b\x84\x9c\x7b\xbd\xad\xad\x94\x94\x73\x94\x7b\x6b\x84\x73'
'\x84\x9c\x63\x7b\x73\x84\x84\xce\x6b\xc5\xad\x94\x8c\x8c\x84\x63\x84'
'\x7b\x73\x8c\x84\x84\x63\x84\x7b\x84\xad\x7b\x73\x94\x7b\x94\x73\x94'
'\x7b\x8c\x7b\x6b\x73\x84\x73\x6b\x73\x63\x7b\x63\xb5\x63\x6b\x7b\x73'
'\x84\x73\x7b\x84\x73\x84\x84\x7b\x7b\x84\x63\x73\x63\x7b\x84\x73\x73'
'\x6b\x8c\x8c\x63\x8c\x63\x73\x7b\x6b\x84\x6b\x73\x6b\x73\x7b\x84\x7b'
'\x6b\x7b\x84\x84\x84\x84\x84\x7b\x63\x84\x7b\x6b\x94\x63\x73\x7b\x8c'
'\xad\x7b\x6b\x7b\x7b\x7b\x6b\x7b\x7b\x7b\x6b\x8c\x7b\x7b\x73\x73\x5a'
'\x6b\xad\x6b\x6b\x6b\x6b\x63\x84\x7b\x73\xad\x94\x63\x7b\x7b\x6b\x7b'
'\x63\x6b\xbd\xad\x6b\x73\x73\x6b\x63\x8c\x6b\xad\x8c\x94\x84\x84\x6b'
'\x7b\x63\x63\x73\x63\x84\x63\x8c\x73\x94\x7b\x63\x84\x7b\x94\x8c\x8c'
'\x6b\x8c\x5a\x6b\x73\x6b\x5a\x63\x7b\x73\x73\x73\x7b\x7b\x94\x73\x94'
'\x63\x84\xa5\x94\x9c\x7b\x6b\x6b\x6b\x73\x8c\x63\x73\x63\x63\x6b\x73'
'\x84\x73\x5a\x7b\x73\xbd\xad\x8c\x8c\xa5\x73\x84\x5a\x63\x73\x5a\x63'
'\x63\x6b\x63\x84\x63\x6b\x84\x84\x9c\xb5\x8c\xad\x7b\x84\x63\x73\x7b'
'\x4a\x73\x63\x7b\x8c\x9c\x84\xa5\x8c\x8c\x63\xad\xf7\xff\xce\x84\x6b'
'\x8c\x7b\x94\x84\xa5\xa5\x94\x8c\x84\x7b\x6b\x7b\x6b\x7b\xb5\xb5\x9c'
'\x94\x9c\x9c\x9c\xb5\x84\x94\x73\x84\x6b\x6b\x6b\x6b\x6b\x73\x63\x6b'
'\x0f\x00\x00\x01\x03\x00\x01\x00\x00\x00\x14\x00\x00\x00\x01\x01\x03'
'\x00\x01\x00\x00\x00\x14\x00\x00\x00\x02\x01\x03\x00\x01\x00\x00\x00'
'\x08\x00\x00\x00\x03\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06'
'\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x11\x01\x04\x00\x01\x00'
'\x00\x00\x08\x00\x00\x00\x15\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00'
'\x00\x16\x01\x03\x00\x01\x00\x00\x00\x14\x00\x00\x00\x17\x01\x04\x00'
'\x01\x00\x00\x00\x90\x01\x00\x00\x1c\x01\x03\x00\x01\x00\x00\x00\x01'
'\x00\x00\x00\x53\x01\x03\x00\x01\x00\x00\x00\x01\x00\x00\x00\x0e\x83'
'\x0c\x00\x03\x00\x00\x00\x52\x02\x00\x00\x82\x84\x0c\x00\x06\x00\x00'
'\x00\x6a\x02\x00\x00\xaf\x87\x03\x00\x18\x00\x00\x00\x9a\x02\x00\x00'
'\xb1\x87\x02\x00\x16\x00\x00\x00\xca\x02\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x4e\x40\x00\x00\x00\x00\x00\x00\x4e\x40\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x40\xe6\x1a\x41\x00\x00\x00\x00\xcc\x9e\x4c\x41\x00\x00\x00\x00\x00'
'\x00\x00\x00\x01\x00\x01\x00\x00\x00\x05\x00\x00\x04\x00\x00\x01\x00'
'\x01\x00\x01\x04\x00\x00\x01\x00\x01\x00\x02\x04\xb1\x87\x15\x00\x00'
'\x00\x00\x0c\x00\x00\x01\x00\x57\x68\x04\x0c\x00\x00\x01\x00\x29\x23'
'\x4e\x41\x44\x32\x37\x20\x2f\x20\x55\x54\x4d\x20\x7a\x6f\x6e\x65\x20'
'\x31\x31\x4e\x7c\x00')
def SkipIfDriverMissing(driver_name):
"""Decorator that only runs a test if a required driver is found.
Args:
driver_name: Lower case short name of a driver. e.g. 'dted'.
Returns:
A pass through function if the test should be run or the unittest skip
function if the test or TestCase should not be run.
"""
def _IdReturn(obj):
return obj
debug = gdal.GetConfigOption('CPL_DEBUG')
if driver_name not in drivers:
if debug:
logging.info('Debug: Skipping test. Driver not found: %s', driver_name)
return unittest.case.skip('Skipping "%s" driver dependent test.' %
driver_name)
if debug:
logging.info('Debug: Running test. Found driver: %s', driver_name)
return _IdReturn
def GetTestFilePath(filename):
return os.path.join(
FLAGS.test_srcdir,
'autotest2/gdrivers/testdata',
os.path.split(os.path.abspath(__file__))[0],
'testdata',
filename
)
def CreateParser():
parser = OptionParser()
parser.add_option('-t', '--temp-dir', default=os.getcwd(),
help='Where to put temporary files.',
metavar='DIR')
parser.add_option('-p', '--pam-dir', default=None,
help='Where to store the .aux.xml files created '
'by the persistent auxiliary metadata system. '
'Defaults to temp-directory/pam.',
metavar='DIR')
parser.add_option('-v', '--verbose', default=False, action='store_true',
help='Put the unittest run into verbose mode.')
return parser
def Setup(options):
if options.verbose:
logging.basicConfig(level=logging.INFO)
options.temp_dir = os.path.abspath(options.temp_dir)
gdal.SetConfigOption('CPL_TMPDIR', options.temp_dir)
logging.info('CPL_TMPDIR: %s', options.temp_dir)
options.pam_dir = options.pam_dir or os.path.join(options.temp_dir, 'pam')
if not os.path.isdir(options.pam_dir):
os.mkdir(options.pam_dir)
gdal.SetConfigOption('GDAL_PAM_PROXY_DIR', options.pam_dir)
logging.info('GDAL_PAM_PROXY_DIR: %s', options.pam_dir)
class TempFiles(object):
def __init__(self):
self.count = 0
self.tmp_dir = None
def TempFile(self, basename, ext=''):
if not self.tmp_dir:
self.tmp_dir = gdal.GetConfigOption('TMPDIR')
if not self.tmp_dir:
logging.fatal('Do not have a tmp_dir!!!')
filepath = os.path.join(self.tmp_dir,
basename + '%03d' % self.count + ext)
self.count += 1
return filepath
_temp_files = TempFiles()
@contextlib.contextmanager
def ConfigOption(key, value, default=None):
"""Set a gdal config option and when the context closes, try to revert it.
TODO(schwehr): This would be better as part of gcore_util.py.
Args:
key: String naming the config option.
value: String value to set the option to.
default: String value to reset the option to if no starting value.
Yields:
None
"""
original_value = gdal.GetConfigOption(key, default)
gdal.SetConfigOption(key, value)
try:
yield
finally:
gdal.SetConfigOption(key, original_value)
class DriverTestCase(unittest.TestCase):
"""Checks the basic functioning of a single raster driver.
Assumes that only one driver is registered for the file type.
CheckOpen has a critical side effect that it puts the open data
source in the src attribute. Checks below CheckOpen in this class
assume that self.src is the original open file.
"""
def setUp(self, driver_name, ext):
super(DriverTestCase, self).setUp()
gcore_util.SetupTestEnv()
assert driver_name
self.driver_name = driver_name.lower()
self.driver = gdal.GetDriverByName(driver_name)
assert self.driver
self.ext = ext
# Start with a clean slate.
gdal.ErrorReset()
# Allow details and custom message.
self.longMessage = True
def assertIterAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
msg = msg or ''
self.assertEqual(len(first), len(second), 'lists not same length ' + msg)
for a, b in zip(first, second):
self.assertAlmostEqual(a, b, places=places, msg=msg, delta=delta)
def CheckDriver(self):
self.assertEqual(self.driver_name, self.driver.ShortName.lower())
def CheckOpen(self, filepath, check_driver=True):
"""Open the test file and keep it open as self.src.
Args:
filepath: str, Path to a file to open with GDAL.
check_driver: If True, make sure that the file opened with the
default driver for this test. If it is a str, then check that
the driver used matches the string. If False, then do not
check the driver.
"""
if filepath.startswith(os.path.sep) and not filepath.startswith('/vsi'):
self.assertTrue(os.path.isfile(filepath), 'Does not exist: ' + filepath)
self.src = gdal.Open(filepath, gdal.GA_ReadOnly)
self.assertTrue(self.src, '%s driver unable to open %s' % (self.driver_name,
filepath))
if check_driver:
driver_name = self.src.GetDriver().ShortName.lower()
if isinstance(check_driver, str) or isinstance(check_driver, unicode):
self.assertEqual(check_driver, driver_name)
else:
self.assertEqual(self.driver_name, driver_name)
self.filepath = filepath
def CheckGeoTransform(self, gt_expected, gt_delta=None):
gt = self.src.GetGeoTransform()
if not gt and not gt_expected:
return
self.assertEqual(len(gt_expected), 6)
gt_delta = gt_delta or ((abs(gt_expected[1]) + abs(gt_expected[2])) / 100.0)
for idx in range(6):
self.assertAlmostEqual(gt[idx], gt_expected[idx], delta=gt_delta)
def CheckProjection(self, prj_expected):
prj = self.src.GetProjection()
if not prj and not prj_expected:
return
src_osr = osr.SpatialReference(wkt=prj)
prj2 = osr.SpatialReference()
prj2.SetFromUserInput(prj_expected)
msg = 'Projection mismatch:\nGot:\n%s\nExpected:\n%s' % (prj, prj_expected)
self.assertTrue(src_osr.IsSame(prj2), msg=msg)
def CheckShape(self, width, height, num_bands):
self.assertEqual(width, self.src.RasterXSize)
self.assertEqual(height, self.src.RasterYSize)
self.assertEqual(num_bands, self.src.RasterCount)
def CheckBand(self, band_num, checksum, gdal_type=None, nodata=None,
min_val=None, max_val=None):
band = self.src.GetRasterBand(band_num)
self.assertEqual(band.Checksum(), checksum)
if gdal_type is not None:
self.assertEqual(gdal_type, band.DataType)
if nodata is not None:
self.assertEqual(nodata, band.GetNoDataValue())
if min_val is not None or max_val is not None:
stats = band.GetStatistics(False, True)
if min_val is not None:
self.assertAlmostEqual(min_val, stats[0])
if max_val is not None:
self.assertAlmostEqual(max_val, stats[1])
def CheckBandSubRegion(self, band_num, checksum, xoff, yoff, xsize, ysize):
band = self.src.GetRasterBand(band_num)
self.assertEqual(checksum, band.Checksum(xoff, yoff, xsize, ysize))
# TODO(schwehr): Add assertCreateCopyInterrupt method.
def CheckCreateCopy(self,
check_checksums=True,
check_stats=True,
check_geotransform=True,
check_projection=True,
options=None,
strict=True,
vsimem=False,
remove_result=False,
checksums=None,
stats=None,
metadata=None):
"""Compare a copy to the currently open file.
Args:
check_checksums: Set to False to not check checksums. Or a list of one
checksum per band.
check_stats: Compare band statistics if true. Or a list of one
(min, max) tuple per band.
check_geotransform: Set to False to skip checking the geotransform.
check_projection: Set to False to skip checking the projection.
options: List of options to pass to CreateCopy.
strict: Set to False to have the CreateCopy operation in loose mode.
vsimem: If true, copy to memory.
remove_result: If true, remove the copy when done.
checksums: Optional list of checksums. If left out, uses the checksums
from the input file will be used.
stats: Optional list of min/max tuples to compare for each band. If
left out, uses the stats from the input file.
metadata: A dictionary of metadata fields to verify.
Returns:
Open gdal raster Dataset.
"""
# TODO(schwehr): Complain if options is a str or unicode.
# TODO(schwehr): Use gdal.GetConfigOption('TMPDIR') if available.
options = options or []
basename = os.path.basename(self.src.GetFileList()[0])
if vsimem:
dst_file = os.path.join('/vsimem/', basename + self.ext)
else:
dst_file = _temp_files.TempFile(basename, self.ext)
dst = self.driver.CreateCopy(dst_file, self.src, strict=strict,
options=options)
self.assertTrue(dst)
self.assertEqual(dst.GetDriver().ShortName.lower(), self.driver_name)
# TODO(schwehr): Pre-close tests.
del dst # Flush the file.
self.dst = gdal.Open(dst_file)
self.assertTrue(self.dst)
self.assertEqual(self.dst.RasterCount, self.src.RasterCount)
for band_num in range(1, self.dst.RasterCount + 1):
src_band = self.src.GetRasterBand(band_num)
dst_band = self.dst.GetRasterBand(band_num)
if check_checksums:
dst_checksum = dst_band.Checksum()
if checksums:
self.assertEqual(dst_checksum, checksums[band_num - 1])
else:
self.assertEqual(dst_checksum, src_band.Checksum())
if check_stats:
dst_stats = dst_band.ComputeRasterMinMax()
if stats:
self.assertEqual(dst_stats, stats[band_num - 1])
else:
self.assertEqual(dst_stats, src_band.ComputeRasterMinMax())
if check_geotransform:
self.CheckGeoTransform(self.dst.GetGeoTransform())
if check_projection:
self.CheckProjection(self.dst.GetProjection())
if metadata:
result_metadata = self.dst.GetMetadata()
for key in metadata:
self.assertEqual(metadata[key], result_metadata[key])
if remove_result:
self.dst = None
self.driver.Delete(dst_file)
return self.dst
def CheckCreateCopySimple(self, data):
"""Try to make a copy from vsimem to the format under test in vsimem.
Args:
data: Contents of the source tif file to write to vsimem.
"""
filepath = '/vsimem/create_copy_simple.tif'
with gcore_util.GdalUnlinkWhenDone(filepath):
dst = gdal.VSIFOpenL(filepath, 'wb')
gdal.VSIFWriteL(data, 1, len(data), dst)
gdal.VSIFCloseL(dst)
self.CheckOpen(filepath, check_driver=GTIFF_DRIVER)
self.CheckCreateCopy(vsimem=True, remove_result=True)
def CheckInfo(self):
"""Use a golden json dump to see if the current read matches.
May need addition work in the future to keep the checks from being brittle.
Must call CheckOpen before using this.
"""
expect = json.load(open(self.filepath + '.json'))
options = gdal.InfoOptions(
format='json', computeMinMax=True, stats=True, computeChecksum=True)
result = gdal.Info(self.src, options=options)
# Save in case of failure.
result_json = json.dumps(result)
basename_json = os.path.basename(self.filepath) + '.json'
# Some drivers include the version number and a difference is okay as long
# as the driverShortName is the same, it's okay.
expect.pop('driverLongName')
result.pop('driverLongName')
description_expect = expect.pop('description')
description_result = result.pop('description')
self.assertEqual(os.path.basename(description_result), description_expect)
files_expect = expect.pop('files')
files_result = result.pop('files')
self.assertEqual(len(files_result), len(files_expect),
'%s versus %s' % (files_result, files_expect))
for filepath_result, filepath_expect in zip(files_result, files_expect):
self.assertEqual(os.path.basename(filepath_result), filepath_expect)
extent_expect_field = _GetExtentField(expect)
extent_result_field = _GetExtentField(result)
if not extent_expect_field or extent_expect_field != extent_result_field:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(extent_expect_field, extent_result_field, self.filepath)
extent_expect = expect.pop(extent_expect_field)['coordinates'][0]
extent_result = result.pop(extent_result_field)['coordinates'][0]
self.assertEqual(len(extent_result), len(extent_expect))
for a, b in zip(extent_result, extent_expect):
self.assertAlmostEqual(a[0], b[0], places=2, msg=self.filepath)
bands_expect = expect.pop('bands')
bands_result = result.pop('bands')
if bands_result != bands_expect:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(bands_result, bands_expect, self.filepath)
srs_wkt_expect = expect.pop('coordinateSystem')['wkt']
srs_wkt_result = result.pop('coordinateSystem')['wkt']
if srs_wkt_expect:
srs_expect = osr.SpatialReference(wkt=str(srs_wkt_expect))
srs_result = osr.SpatialReference(wkt=str(srs_wkt_result))
if not srs_expect.IsSame(srs_result):
MaybeWriteOutputFile(basename_json, result_json)
self.assertTrue(srs_expect.IsSame(srs_result), self.filepath)
if result != expect:
MaybeWriteOutputFile(basename_json, result_json)
self.assertEqual(result, expect, self.filepath)
def _GetExtentField(json_info):
"""The extent field must be only one of extent or wgs84Extent."""
has_extent = 'extent' in json_info
has_wgs84 = 'wgs84Extent' in json_info
if has_extent and not has_wgs84:
return 'extent'
if not has_extent and has_wgs84:
return 'wgs84Extent'
return None
def MaybeWriteOutputFile(filename, data):
"""Write a file from a test if allowed."""
if 'TEST_UNDECLARED_OUTPUTS_DIR' not in os.environ:
logging.error('Not allowed to write from the test.')
return
output_dir = os.environ['TEST_UNDECLARED_OUTPUTS_DIR']
filepath = os.path.join(output_dir, os.path.basename(filename))
open(filepath, 'w').write(data)
| |
from robot.api import logger # noqa: F401 #pylint: disable=unused-import
from TestStack.White.UIItems.ListBoxItems import ComboBox, ListBox
from TestStack.White.UIItems import UIActionException
from WhiteLibrary.exceptions import ItemDisabledError
from WhiteLibrary.keywords.librarycomponent import LibraryComponent
from WhiteLibrary.keywords.robotlibcore import keyword
class ListKeywords(LibraryComponent):
@keyword
def select_listbox_value(self, locator, value):
"""Selects a value from a listbox.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
``value`` is the value to be selected.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
listbox.Select(value)
@keyword
def select_listbox_index(self, locator, item_index):
"""Selects an item by its index from a listbox.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
``item_index`` is the index of the item to select.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
listbox.Select(int(item_index))
@keyword
def get_listbox_selected_text(self, locator):
"""Returns the text of the selected listbox item.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
return listbox.SelectedItemText
@keyword
def listbox_selection_should_be(self, locator, expected):
"""Checks the listbox selection.
Fails if the selection was not as expected.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected selection value.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
if listbox.SelectedItemText != expected:
raise AssertionError(
u"Expected listbox selection to be {}, was {}".format(expected, listbox.SelectedItemText)
)
@keyword
def listbox_should_contain(self, locator, expected):
"""Checks that listbox contains an item with text ``expected``.
Fails if the listbox does not contain an item with the given text.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected item text.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
try:
listbox.Item(str(expected))
except UIActionException as error:
# Check error in case we get UIActionException with another message
if "Item of text" in str(error):
raise AssertionError(u"ListBox with locator '{}' did not contain '{}'".format(locator, expected))
raise error
@keyword
def listbox_should_not_contain(self, locator, expected):
"""Verifies that a listbox does not contain an item with text ``expected``.
Fails if the listbox contains an item with the given text.
``locator`` is the locator of the listbox or ListBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected item text.
"""
listbox = self.state._get_typed_item_by_locator(ListBox, locator)
try:
listbox.Item(str(expected))
raise AssertionError(u"ListBox with locator '{}' should not have contained '{}'".format(locator, expected))
except UIActionException as error:
# Check error in case we get UIActionException with another message
if "Item of text" not in str(error):
raise error
@keyword
def select_combobox_value(self, locator, value):
"""Selects a value from a combobox.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``value`` is the value to be selected.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
if not combobox.Enabled:
raise ItemDisabledError(u"Could not select item '{}' because the ComboBox was disabled".format(value))
combobox.Select(value)
@keyword
def select_combobox_index(self, locator, item_index):
"""Selects a value from combobox by using its index.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``item_index`` is the index to be selected.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
if not combobox.Enabled:
raise ItemDisabledError(u"Could not select item at {} because the ComboBox was disabled".format(item_index))
combobox.Select(int(item_index))
@keyword
def get_combobox_selected_text(self, locator):
"""Returns the text of the selected combobox item.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
return combobox.SelectedItemText
@keyword
def verify_combobox_item(self, locator, expected):
"""*DEPRECATED* Please use Verify Combobox Selection instead.
Verifies the selected value of a combobox.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected selection value.
"""
self.verify_combobox_selection(locator, expected)
@keyword
def verify_combobox_selection(self, locator, expected):
"""Verifies that the combobox value is selected.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected selection value.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
self.state._verify_value(expected, combobox.EditableText)
@keyword
def combobox_should_contain(self, locator, expected):
"""Verifies that a combobox contains an item with text ``expected``.
Fails if the combobox does not contain an item with the given text.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected item text.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
try:
combobox.Item(str(expected))
except UIActionException as error:
# Check error in case we get UIActionException with another message
if "Item of text" in str(error):
raise AssertionError(u"ComboBox with locator '{}' did not contain '{}'".format(locator, expected))
raise error
@keyword
def combobox_should_not_contain(self, locator, expected):
"""Verifies that a combobox does not contain an item with text ``expected``.
Fails if the combobox contains an item with the given text.
``locator`` is the locator of the combobox or ComboBox item object.
Locator syntax is explained in `Item locators`.
``expected`` is the expected item text.
"""
combobox = self.state._get_typed_item_by_locator(ComboBox, locator)
try:
combobox.Item(str(expected))
raise AssertionError(u"ComboBox with locator '{}' should not have contained '{}'".format(locator, expected))
except UIActionException as error:
# Check error in case we get UIActionException with another message
if "Item of text" not in str(error):
raise error
| |
#!/usr/bin/env python
# coding=utf8
import hashlib
import hmac
import os
import tempfile
from .decorator import StoreDecorator
class _HMACFileReader(object):
def __init__(self, hm, source):
self.hm = hm
self.source = source
# "preload" buffer
self.buffer = source.read(self.hm.digest_size)
if not len(self.buffer) == self.hm.digest_size:
raise VerificationException('Source does not contain HMAC hash '\
'(too small)')
def read(self, n=None):
if '' == self.buffer or 0 == n:
return ''
new_read = self.source.read(n) if None != n else self.source.read()
finished = (None == n or len(new_read) != n)
self.buffer += new_read
if None != n:
offset = min(n, len(self.buffer) - self.hm.digest_size)
else:
offset = len(self.buffer) - self.hm.digest_size
rv, self.buffer = self.buffer[:offset], self.buffer[offset:]
# update hmac
self.hm.update(rv)
if finished:
# check hash
if not self.buffer == self.hm.digest():
raise VerificationException('HMAC verification failed.')
return rv
def close(self):
self.source.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class VerificationException(Exception):
"""This exception is thrown whenever there was an error with an
authenticity check performed by any of the decorators in this module."""
pass
class HMACDecorator(StoreDecorator):
"""HMAC authentication and integrity check decorator.
This decorator overrides the :meth:`get`, :meth:`get_file`, :meth:`open`,
:meth:`put` and :meth:`put_file` methods and alters the data that is store
in the follow way:
First, the original data is stored while being fed to an hmac instance. The
resulting hash is appended to the data as a binary string, every value
stored therefore takes up an additional
:prop:`HMACDecorator.hmac_digestsize` bytes.
Upon retrieval using any of :meth:`get`, :meth:`get_file` or :meth:`open`
methods, the data is checked as soon as the hash is readable. Since hashes
are stored at the end, almost no extra memory is used when using streaming
methods. However, :meth:`get_file` and :meth:`open` will only check the
hash value once it is read, that is, at the end of the retrieval.
The decorator will protect against any modification of the stored data and
ensures that only those with knowledge of the :attr:`__secret_key`
can alter any data. The key used to store data is also used to extend the
HMAC secret key, making it impossible to copy a valid message over to a
different key.
"""
def __init__(self, secret_key, decorated_store, hashfunc=hashlib.sha256):
super(HMACDecorator, self).__init__(decorated_store)
self.__hashfunc = hashfunc
self.__secret_key = bytes(secret_key)
@property
def hmac_digestsize(self):
# returns, in bytes, the size of the digest
return self.hmac_mixin_hashfunc().digestsize
def __new_hmac(self, key, msg=None):
if not msg:
msg = b''
# item key is used as salt for secret_key
hm = hmac.HMAC(
key=key.encode('ascii') + self.__secret_key,
msg=msg,
digestmod=self.__hashfunc)
return hm
def get(self, key):
buf = self._dstore.get(key)
hm = self.__new_hmac(key)
hash = buf[-hm.digest_size:]
# shorten buf
buf = buf[:-hm.digest_size]
hm.update(buf)
if not hm.digest() == hash:
raise VerificationException('Invalid hash on key %r' % key)
return buf
def get_file(self, key, file):
if isinstance(file, str):
try:
f = open(file, 'wb')
except OSError as e:
raise IOError('Error opening %s for writing: %r' % (
file, e
))
# file is open, now we call ourself again with a proper file
try:
self.get_file(key, f)
finally:
f.close()
else:
# need to use open, no way around it it seems
# this will check the HMAC as well
source = self.open(key)
bufsize = 1024 * 1024
# copy
while True:
buf = source.read(bufsize)
file.write(buf)
if len(buf) != bufsize:
break
def open(self, key):
source = self._dstore.open(key)
return _HMACFileReader(self.__new_hmac(key), source)
def put(self, key, value):
# just append hmac and put
data = value + self.__new_hmac(key, value).digest()
return self._dstore.put(key, data)
def put_file(self, key, file):
hm = self.__new_hmac(key)
bufsize = 1024 * 1024
if isinstance(file, str):
# we read the file once, then write the hash at the end, before
# handing it over to the original backend
with open(file, 'rb+') as source:
while True:
buf = source.read(bufsize)
hm.update(buf)
if len(buf) < bufsize:
break
# file has been read, append hash
source.write(hm.digest())
# after the file has been closed, hand it over
return self._dstore.put_file(key, file)
else:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
try:
while True:
buf = file.read(bufsize)
hm.update(buf)
tmpfile.write(buf)
if len(buf) < bufsize:
break
tmpfile.write(hm.digest())
tmpfile.close()
return self._dstore.put_file(key, tmpfile.name)
finally:
os.unlink(tmpfile.name)
| |
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
def _full_like(x, val):
xp = cuda.get_array_module(x)
return xp.full_like(x, val)
def _zeros_like(x):
xp = cuda.get_array_module(x)
return xp.zeros_like(x)
def _dot(x, y):
return sum(map(lambda a: a[0] * a[1], zip(x, y)))
class NumericalGradientTest(unittest.TestCase):
eps = None
def f(self, xs):
return xs[0] ** 2,
def df(self, xs):
return (2 * xs[0],),
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (_uniform(2, 1),)
def check_numerical_grad_one(self, f, df, xs, gys, eps):
dfxs = df(xs)
gys = tuple(0 if gy is None else gy for gy in gys)
# matrix-vector multiplication of dfxs and dys
dx_expect = tuple(map(lambda dfx: _dot(dfx, gys), dfxs))
def func():
return f(xs)
dx_actual = gradient_check.numerical_grad(func, xs, gys, eps)
self.assertEqual(len(dx_expect), len(dx_actual))
for e, a in zip(dx_expect, dx_actual):
testing.assert_allclose(e, a, atol=1e-3, rtol=1e-3)
def check_numerical_grad(self, f, df, xs, gys, eps=None):
if eps is None:
eps = tuple(10 ** (-i) for i in six.moves.range(2, 5))
elif not isinstance(eps, tuple):
eps = (eps, )
for e in eps:
self.check_numerical_grad_one(f, df, xs, gys, e)
@condition.retry(3)
def test_numerical_grad_cpu(self):
self.check_numerical_grad(self.f, self.df, self.xs, self.gys,
eps=self.eps)
@condition.retry(3)
@attr.gpu
def test_numerical_grad_gpu(self):
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_numerical_grad(self.f, self.df,
tuple(map(cuda.to_gpu, self.xs)), gys,
eps=self.eps)
class NumericalGradientTest2(NumericalGradientTest):
def f(self, xs):
return 1,
def df(self, xs):
return (0,),
class NumericalGradientTest3(NumericalGradientTest):
# Too small eps causes cancellation of significant digits
eps = (1e-2, 1e-3)
def f(self, xs):
xp = cuda.get_array_module(*xs)
return xp.exp(xs[0]),
def df(self, xs):
xp = cuda.get_array_module(*xs)
return (xp.exp(xs[0]),),
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (_uniform(2, 1),)
class NumericalGradientTest4(NumericalGradientTest):
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _full_like(xs[0], 4), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _full_like(xs[1], 5), _full_like(xs[1], 7)))
def setUp(self):
self.xs = tuple(_uniform(2, 1) for _ in six.moves.range(2))
self.gys = tuple(_uniform(2, 1) for _ in six.moves.range(3))
class NumericalGradientTest5(NumericalGradientTest4):
def f(self, xs):
assert len(xs) == 2
return (2 * xs[0] + 3 * xs[1],
4 * xs[0] + 5 * xs[1],
6 * xs[0] + 7 * xs[1])
def df(self, xs):
assert len(xs) == 2
return (
(_full_like(xs[0], 2), _zeros_like(xs[0]), _full_like(xs[0], 6)),
(_full_like(xs[1], 3), _zeros_like(xs[1]), _full_like(xs[1], 7)))
def setUp(self):
super(NumericalGradientTest5, self).setUp()
self.gys = (_uniform(2, 1), None, _uniform(2, 1))
class NumericalGradientTest6(NumericalGradientTest):
def setUp(self):
self.xs = (_uniform(2, 1),)
self.gys = (None,)
class NumericalGradientReferenceTest(unittest.TestCase):
def setUp(self):
self.x = _uniform(2, 3)
def check_reference(self, x):
# A returned value and an input refers the same memory.
# See issue #488
def func():
return x,
gx, = gradient_check.numerical_grad(func, (x,), (1,))
testing.assert_allclose(cuda.to_cpu(gx), 1)
def test_reference_cpu(self):
self.check_reference(self.x)
@attr.gpu
def test_reference_gpu(self):
self.check_reference(cuda.to_gpu(self.x))
class NumericalGradientInvalidEps(NumericalGradientTest):
def check_invalid_eps(self, xs, gys, eps):
with self.assertRaises(AssertionError):
self.check_numerical_grad(self.f, self.df, xs, gys, eps)
@condition.retry(3)
def test_numerical_grad_cpu(self):
self.check_invalid_eps(self.xs, self.gys, 0)
self.check_invalid_eps(self.xs, self.gys, -1.0)
@condition.retry(3)
@attr.gpu
def test_numerical_grad_gpu(self):
xs = tuple(map(cuda.to_gpu, self.xs))
gys = tuple(None if gy is None else cuda.to_gpu(gy)
for gy in self.gys)
self.check_invalid_eps(xs, gys, 0)
self.check_invalid_eps(xs, gys, -1.0)
class NumericalGradientInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0)
self.y = numpy.array(0)
self.f = lambda: None
@attr.gpu
def test_invalid_inputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x, y), ())
@attr.gpu
def test_invalid_outputs(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (), (self.x, y))
@attr.gpu
def test_invalid_mixed(self):
y = cuda.to_gpu(self.y)
with self.assertRaises(RuntimeError):
gradient_check.numerical_grad(self.f, (self.x,), (y,))
class NumericalGradientEpsTest(unittest.TestCase):
def setUp(self):
self.x = numpy.array(0.0, dtype=numpy.float32)
self.y = numpy.array(1.0, dtype=numpy.float32)
def check_different_eps(self, x, y):
def f():
if -1 < x < 1:
return x.copy(),
elif -2 < x < 2:
return 2 * x,
else:
return 0,
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=0.5)
self.assertEqual(gx, 1.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=1.5)
self.assertEqual(gx, 2.)
gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=2.5)
self.assertEqual(gx, 0.)
def test_differenct_eps_cpu(self):
self.check_different_eps(self.x, self.y)
@attr.gpu
def test_differenct_eps_gpu(self):
self.check_different_eps(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class AssertAllCloseTest(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.y = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def check_identical(self, x):
testing.assert_allclose(x, x, atol=0, rtol=0)
@condition.repeat(5)
def test_identical_cpu(self):
self.check_identical(self.x)
@condition.repeat(5)
@attr.gpu
def test_identical_gpu(self):
self.check_identical(cuda.to_gpu(self.x))
def check_atol(self, x, y):
x_cpu = cuda.to_cpu(x)
y_cpu = cuda.to_cpu(y)
max_abs_diff = numpy.max(numpy.abs(x_cpu - y_cpu))
with self.assertRaises(AssertionError):
testing.assert_allclose(x, y, atol=max_abs_diff - 1, rtol=0)
testing.assert_allclose(x, y, atol=max_abs_diff + 1, rtol=0)
@condition.repeat(5)
def test_atol_cpu(self):
self.check_atol(self.x, self.y)
@condition.repeat(5)
@attr.gpu
def test_atol_gpu(self):
self.check_atol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class AssertAllCloseTest2(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.y = numpy.random.uniform(1, 2, (2, 3)).astype(numpy.float32)
def check_rtol(self, x, y):
x_cpu = cuda.to_cpu(x)
y_cpu = cuda.to_cpu(y)
max_ratio = numpy.max(numpy.abs(x_cpu - y_cpu) / y_cpu)
with self.assertRaises(AssertionError):
testing.assert_allclose(x, y, atol=0, rtol=max_ratio - 1)
testing.assert_allclose(x, y, atol=0, rtol=max_ratio + 1)
@condition.repeat(5)
def test_rtol_cpu(self):
self.check_rtol(self.x, self.y)
@condition.repeat(5)
@attr.gpu
def test_rtol_gpu(self):
self.check_rtol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
class Ident(chainer.Function):
def forward(self, inputs):
return inputs
def backward(self, inputs, grads):
return grads
# numpy.float16 is not tested because of the low precision.
@testing.parameterize(*testing.product({
'dtype': [None, numpy.float32, numpy.float64],
}))
class TestCheckBackward(unittest.TestCase):
def test_multiple_output(self):
x1 = numpy.array([1], dtype='f')
x2 = numpy.array([1], dtype='f')
g1 = numpy.array([1], dtype='f')
g2 = numpy.array([1], dtype='f')
def f(x, y):
s, t = Ident()(x, y)
u = Ident()(t)
return s, u
gradient_check.check_backward(f, (x1, x2), (g1, g2), dtype=self.dtype)
def test_no_grads_for_not_float(self):
x1 = numpy.array([1], dtype='f')
x2 = numpy.array([0, 1], dtype='i') # grad check for this is skipped
g1 = numpy.array([1], dtype='f')
def f(x, y):
s = Ident()(x)
return s,
gradient_check.check_backward(f, (x1, x2), g1)
def test_no_grads_option(self):
x1 = numpy.array([1], dtype='f')
x2 = numpy.array([1], dtype='f') # grad check for this is skipped
g1 = numpy.array([1], dtype='f')
def f(x, y):
s = Ident()(x)
return s,
self.assertRaises(RuntimeError, gradient_check.check_backward,
f, (x1, x2), g1, no_grads=[False, False])
gradient_check.check_backward(f, (x1, x2), g1, no_grads=[False, True])
class NewIdent(chainer.FunctionNode):
def forward(self, inputs):
return inputs
def backward(self, indexes, grad_outputs):
return NewIdent().apply(grad_outputs)
class TestCheckDoubleBackward(unittest.TestCase):
def check_multiple_input_output(self, xp):
arrays = xp.ones((6, 1), dtype='f')
x1, x2, gy1, gy2, ggx1, ggx2 = arrays
def f(x, y):
w1 = x + y
w2 = w1 + y
return w1 * w1, w2 * w2
gradient_check.check_double_backward(f, (x1, x2), (gy1, gy2),
(ggx1, ggx2))
def test_multiple_input_output_cpu(self):
self.check_multiple_input_output(numpy)
@attr.gpu
def test_multiple_input_output_gpu(self):
self.check_multiple_input_output(cuda.cupy)
testing.run_module(__name__, __file__)
| |
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import logging
import os
import random
import sys
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import Any, Dict, List
from unittest.mock import patch
import sockeye.constants as C
import sockeye.prepare_data
import sockeye.train
import sockeye.translate
import sockeye.lexicon
logger = logging.getLogger(__name__)
_DIGITS = "0123456789"
_MID = 5
def generate_digits_file(source_path: str,
target_path: str,
line_count: int = 100,
line_length: int = 9,
sort_target: bool = False,
line_count_empty: int = 0,
seed=13):
assert line_count_empty <= line_count
random_gen = random.Random(seed)
with open(source_path, "w") as source_out, open(target_path, "w") as target_out:
all_digits = []
for _ in range(line_count - line_count_empty):
digits = [random_gen.choice(_DIGITS) for _ in range(random_gen.randint(1, line_length))]
all_digits.append(digits)
for _ in range(line_count_empty):
all_digits.append([])
random_gen.shuffle(all_digits)
for digits in all_digits:
print(C.TOKEN_SEPARATOR.join(digits), file=source_out)
if sort_target:
digits.sort()
print(C.TOKEN_SEPARATOR.join(digits), file=target_out)
def generate_low_high_factors(input_path: str, output_path: str):
"""
Writes low/high factor file given a file of digit sequences.
"""
with open(input_path, 'r') as fin, open(output_path, 'w') as fout:
for line in fin:
digits = map(int, line.rstrip().split())
factors = ("l" if digit < _MID else "h" for digit in digits)
print(C.TOKEN_SEPARATOR.join(factors), file=fout)
def generate_odd_even_factors(input_path: str, output_path: str):
"""
Writes odd/even factor file given a file of digit sequences.
"""
with open(input_path, 'r') as fin, open(output_path, 'w') as fout:
for line in fin:
digits = map(int, line.rstrip().split())
factors = ("e" if digit % 2 == 0 else "o" for digit in digits)
print(C.TOKEN_SEPARATOR.join(factors), file=fout)
def generate_fast_align_lex(lex_path: str):
"""
Generate a fast_align format lex table for digits.
:param lex_path: Path to write lex table.
"""
with open(lex_path, "w") as lex_out:
for digit in _DIGITS:
print("{0}\t{0}\t0".format(digit), file=lex_out)
LEXICON_CREATE_PARAMS_COMMON = "create -i {input} -m {model} -k {topk} -o {lexicon}"
@contextmanager
def tmp_digits_dataset(prefix: str,
train_line_count: int, train_line_count_empty: int, train_max_length: int,
dev_line_count: int, dev_max_length: int,
test_line_count: int, test_line_count_empty: int, test_max_length: int,
sort_target: bool = False,
seed_train: int = 13, seed_dev: int = 13,
with_n_source_factors: int = 0,
with_n_target_factors: int = 0) -> Dict[str, Any]:
"""
Creates a temporary dataset with train, dev, and test. Returns a dictionary with paths to the respective temporary
files.
"""
with TemporaryDirectory(prefix=prefix) as work_dir:
# Simple digits files for train/dev data
train_source_path = os.path.join(work_dir, "train.src")
train_target_path = os.path.join(work_dir, "train.tgt")
dev_source_path = os.path.join(work_dir, "dev.src")
dev_target_path = os.path.join(work_dir, "dev.tgt")
test_source_path = os.path.join(work_dir, "test.src")
test_target_path = os.path.join(work_dir, "test.tgt")
generate_digits_file(train_source_path, train_target_path, train_line_count, train_max_length,
line_count_empty=train_line_count_empty, sort_target=sort_target, seed=seed_train)
generate_digits_file(dev_source_path, dev_target_path, dev_line_count, dev_max_length, sort_target=sort_target,
seed=seed_dev)
generate_digits_file(test_source_path, test_target_path, test_line_count, test_max_length,
line_count_empty=test_line_count_empty, sort_target=sort_target, seed=seed_dev)
data = {'work_dir': work_dir,
'train_source': train_source_path,
'train_target': train_target_path,
'dev_source': dev_source_path,
'dev_target': dev_target_path,
'test_source': test_source_path,
'test_target': test_target_path}
if with_n_source_factors > 0:
data['train_source_factors'] = []
data['dev_source_factors'] = []
data['test_source_factors'] = []
for i in range(with_n_source_factors):
train_factor_path = train_source_path + ".factors%d" % i
dev_factor_path = dev_source_path + ".factors%d" % i
test_factor_path = test_source_path + ".factors%d" % i
generate_low_high_factors(train_source_path, train_factor_path)
generate_low_high_factors(dev_source_path, dev_factor_path)
generate_low_high_factors(test_source_path, test_factor_path)
data['train_source_factors'].append(train_factor_path)
data['dev_source_factors'].append(dev_factor_path)
data['test_source_factors'].append(test_factor_path)
if with_n_target_factors > 0:
data['train_target_factors'] = []
data['dev_target_factors'] = []
data['test_target_factors'] = []
for i in range(with_n_target_factors):
train_factor_path = train_target_path + ".factors%d" % i
dev_factor_path = dev_target_path + ".factors%d" % i
test_factor_path = test_target_path + ".factors%d" % i
generate_odd_even_factors(train_target_path, train_factor_path)
generate_odd_even_factors(dev_target_path, dev_factor_path)
generate_odd_even_factors(test_target_path, test_factor_path)
data['train_target_factors'].append(train_factor_path)
data['dev_target_factors'].append(dev_factor_path)
data['test_target_factors'].append(dev_factor_path)
yield data
TRAIN_PARAMS_COMMON = "--use-cpu --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --validation-source {dev_source} --validation-target {dev_target} --output {model}" \
" --seed {seed}"
PREPARE_DATA_COMMON = " --max-seq-len {max_len} --source {train_source} --target {train_target}" \
" --output {output} --pad-vocab-to-multiple-of 16"
TRAIN_WITH_SOURCE_FACTORS_COMMON = " --source-factors {source_factors}"
DEV_WITH_SOURCE_FACTORS_COMMON = " --validation-source-factors {dev_source_factors}"
TRAIN_WITH_TARGET_FACTORS_COMMON = " --target-factors {target_factors}"
DEV_WITH_TARGET_FACTORS_COMMON = " --validation-target-factors {dev_target_factors}"
TRAIN_PARAMS_PREPARED_DATA_COMMON = "--use-cpu --max-seq-len {max_len} --prepared-data {prepared_data}" \
" --validation-source {dev_source} --validation-target {dev_target} " \
"--output {model}"
TRANSLATE_PARAMS_COMMON = "--use-cpu --models {model} --input {input} --output {output} " \
"--output-type json"
TRANSLATE_WITH_FACTORS_COMMON = " --input-factors {input_factors}"
TRANSLATE_PARAMS_RESTRICT = "--restrict-lexicon {lexicon} --restrict-lexicon-topk {topk}"
SCORE_PARAMS_COMMON = "--use-cpu --model {model} --source {source} --target {target} --output {output} "
SCORE_WITH_SOURCE_FACTORS_COMMON = " --source-factors {source_factors}"
SCORE_WITH_TARGET_FACTORS_COMMON = " --target-factors {target_factors}"
def run_train_translate(train_params: str,
translate_params: str,
data: Dict[str, Any],
use_prepared_data: bool = False,
max_seq_len: int = 10,
seed: int = 13) -> Dict[str, Any]:
"""
Train a model and translate a test set. Returns the updated data dictionary containing paths to translation outputs
and scores.
:param train_params: Command line args for model training.
:param translate_params: First command line args for translation.
:param data: Dictionary containing test data
:param use_prepared_data: Whether to use the prepared data functionality.
:param max_seq_len: The maximum sequence length.
:param seed: The seed used for training.
:return: Data dictionary, updated with translation outputs and scores
"""
work_dir = os.path.join(data['work_dir'], 'train_translate')
data['model'] = os.path.join(work_dir, "model")
# Optionally create prepared data directory
if use_prepared_data:
data['train_prepared'] = os.path.join(work_dir, "prepared_data")
prepare_params = "{} {}".format(
sockeye.prepare_data.__file__,
PREPARE_DATA_COMMON.format(train_source=data['train_source'],
train_target=data['train_target'],
output=data['train_prepared'],
max_len=max_seq_len))
if 'train_source_factors' in data:
prepare_params += TRAIN_WITH_SOURCE_FACTORS_COMMON.format(
source_factors=" ".join(data['train_source_factors']))
if 'train_target_factors' in data:
prepare_params += TRAIN_WITH_TARGET_FACTORS_COMMON.format(
target_factors=" ".join(data['train_target_factors']))
if '--weight-tying-type src_trg' in train_params:
prepare_params += ' --shared-vocab'
logger.info("Preparing data with parameters %s.", prepare_params)
with patch.object(sys, "argv", prepare_params.split()):
sockeye.prepare_data.main()
# Train model
params = "{} {} {}".format(sockeye.train.__file__,
TRAIN_PARAMS_PREPARED_DATA_COMMON.format(prepared_data=data['train_prepared'],
dev_source=data['dev_source'],
dev_target=data['dev_target'],
model=data['model'],
max_len=max_seq_len),
train_params)
if 'dev_source_factors' in data:
params += DEV_WITH_SOURCE_FACTORS_COMMON.format(dev_source_factors=" ".join(data['dev_source_factors']))
if 'dev_target_factors' in data:
params += DEV_WITH_TARGET_FACTORS_COMMON.format(dev_target_factors=" ".join(data['dev_target_factors']))
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
else:
# Train model
params = "{} {} {}".format(sockeye.train.__file__,
TRAIN_PARAMS_COMMON.format(train_source=data['train_source'],
train_target=data['train_target'],
dev_source=data['dev_source'],
dev_target=data['dev_target'],
model=data['model'],
max_len=max_seq_len,
seed=seed),
train_params)
if 'train_source_factors' in data:
params += TRAIN_WITH_SOURCE_FACTORS_COMMON.format(source_factors=" ".join(data['train_source_factors']))
if 'train_target_factors' in data:
params += TRAIN_WITH_TARGET_FACTORS_COMMON.format(target_factors=" ".join(data['train_target_factors']))
if 'dev_source_factors' in data:
params += DEV_WITH_SOURCE_FACTORS_COMMON.format(dev_source_factors=" ".join(data['dev_source_factors']))
if 'dev_target_factors' in data:
params += DEV_WITH_TARGET_FACTORS_COMMON.format(dev_target_factors=" ".join(data['dev_target_factors']))
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.train.main()
# create Top-K lexicon from simple ttable mapping digit to digit
ttable_path = os.path.join(data['work_dir'], "ttable")
generate_fast_align_lex(ttable_path)
lexicon_path = os.path.join(data['work_dir'], "lexicon")
params = "{} {}".format(sockeye.lexicon.__file__,
LEXICON_CREATE_PARAMS_COMMON.format(input=ttable_path,
model=data['model'],
topk=20,
lexicon=lexicon_path))
with patch.object(sys, "argv", params.split()):
sockeye.lexicon.main()
data['lexicon'] = lexicon_path
# Translate corpus with the 1st params and scoring output handler to obtain scores
data['test_output'] = os.path.join(work_dir, "test.out")
params = "{} {} {}".format(sockeye.translate.__file__,
TRANSLATE_PARAMS_COMMON.format(model=data['model'],
input=data['test_source'],
output=data['test_output']),
translate_params)
if 'test_source_factors' in data:
params += TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=" ".join(data['test_source_factors']))
logger.info("Translating with params %s", params)
with patch.object(sys, "argv", params.split()):
sockeye.translate.main()
# Collect test inputs
with open(data['test_source']) as inputs:
data['test_inputs'] = [line.strip() for line in inputs]
# Collect test references
with open(data['test_target'], "r") as ref:
data['test_targets'] = [line.strip() for line in ref]
# Collect test translate outputs and scores
data['test_outputs'] = collect_translate_output_and_scores(data['test_output'])
assert len(data['test_inputs']) == len(data['test_targets']) == len(data['test_outputs'])
return data
def run_translate_restrict(data: Dict[str, Any], translate_params: str) -> Dict[str, Any]:
"""
Runs sockeye.translate with vocabulary selection and checks if number of outputs are the same as without
vocabulary selection. Adds restricted outputs and scores to the data dictionary.
"""
translate_mod = sockeye.translate
out_path = os.path.join(data['work_dir'], "out-restrict.txt")
# Translate corpus with restrict-lexicon
params = "{} {} {} {}".format(translate_mod.__file__,
TRANSLATE_PARAMS_COMMON.format(model=data['model'],
input=data['test_source'],
output=out_path),
translate_params,
TRANSLATE_PARAMS_RESTRICT.format(lexicon=data['lexicon'], topk=1))
if 'test_source_factors' in data:
params += TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=" ".join(data['test_source_factors']))
with patch.object(sys, "argv", params.split()):
translate_mod.main()
# Collect test translate outputs and scores
data['test_outputs_restricted'] = collect_translate_output_and_scores(out_path)
assert len(data['test_outputs_restricted']) == len(data['test_outputs'])
return data
def collect_translate_output_and_scores(out_path: str) -> List[Dict]:
"""
Collects json outputs from an output file, produced with the 'json' or nbest output handler.
"""
logger.debug("collect_translate_output_and_scores(%s)", out_path)
outputs = []
with open(out_path) as out_fh:
for line in out_fh:
line = line.strip()
logger.debug(" line: %s", line)
outputs.append(json.loads(line))
return outputs
| |
#!/usr/bin/python3
import os, os.path, re, json
from functools import wraps
from flask import Flask, request, render_template, abort, Response
import auth, utils
from mailconfig import get_mail_users, get_mail_users_ex, get_admins, add_mail_user, set_mail_password, remove_mail_user
from mailconfig import get_mail_user_privileges, add_remove_mail_user_privilege
from mailconfig import get_mail_aliases, get_mail_aliases_ex, get_mail_domains, add_mail_alias, remove_mail_alias
# Create a worker pool for the status checks. The pool should
# live across http requests so we don't baloon the system with
# processes.
import multiprocessing.pool
pool = multiprocessing.pool.Pool(processes=10)
env = utils.load_environment()
auth_service = auth.KeyAuthService()
# We may deploy via a symbolic link, which confuses flask's template finding.
me = __file__
try:
me = os.readlink(__file__)
except OSError:
pass
app = Flask(__name__, template_folder=os.path.abspath(os.path.join(os.path.dirname(me), "templates")))
# Decorator to protect views that require a user with 'admin' privileges.
def authorized_personnel_only(viewfunc):
@wraps(viewfunc)
def newview(*args, **kwargs):
# Authenticate the passed credentials, which is either the API key or a username:password pair.
error = None
try:
email, privs = auth_service.authenticate(request, env)
except ValueError as e:
# Authentication failed.
privs = []
error = str(e)
# Authorized to access an API view?
if "admin" in privs:
# Call view func.
return viewfunc(*args, **kwargs)
elif not error:
error = "You are not an administrator."
# Not authorized. Return a 401 (send auth) and a prompt to authorize by default.
status = 401
headers = {
'WWW-Authenticate': 'Basic realm="{0}"'.format(auth_service.auth_realm),
'X-Reason': error,
}
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
# Don't issue a 401 to an AJAX request because the user will
# be prompted for credentials, which is not helpful.
status = 403
headers = None
if request.headers.get('Accept') in (None, "", "*/*"):
# Return plain text output.
return Response(error+"\n", status=status, mimetype='text/plain', headers=headers)
else:
# Return JSON output.
return Response(json.dumps({
"status": "error",
"reason": error,
})+"\n", status=status, mimetype='application/json', headers=headers)
return newview
@app.errorhandler(401)
def unauthorized(error):
return auth_service.make_unauthorized_response()
def json_response(data):
return Response(json.dumps(data), status=200, mimetype='application/json')
###################################
# Control Panel (unauthenticated views)
@app.route('/')
def index():
# Render the control panel. This route does not require user authentication
# so it must be safe!
no_admins_exist = (len(get_admins(env)) == 0)
return render_template('index.html',
hostname=env['PRIMARY_HOSTNAME'],
storage_root=env['STORAGE_ROOT'],
no_admins_exist=no_admins_exist,
)
@app.route('/me')
def me():
# Is the caller authorized?
try:
email, privs = auth_service.authenticate(request, env)
except ValueError as e:
return json_response({
"status": "invalid",
"reason": str(e),
})
resp = {
"status": "ok",
"email": email,
"privileges": privs,
}
# Is authorized as admin? Return an API key for future use.
if "admin" in privs:
resp["api_key"] = auth_service.create_user_key(email)
# Return.
return json_response(resp)
# MAIL
@app.route('/mail/users')
@authorized_personnel_only
def mail_users():
if request.args.get("format", "") == "json":
return json_response(get_mail_users_ex(env, with_archived=True, with_slow_info=True))
else:
return "".join(x+"\n" for x in get_mail_users(env))
@app.route('/mail/users/add', methods=['POST'])
@authorized_personnel_only
def mail_users_add():
try:
return add_mail_user(request.form.get('email', ''), request.form.get('password', ''), request.form.get('privileges', ''), env)
except ValueError as e:
return (str(e), 400)
@app.route('/mail/users/password', methods=['POST'])
@authorized_personnel_only
def mail_users_password():
try:
return set_mail_password(request.form.get('email', ''), request.form.get('password', ''), env)
except ValueError as e:
return (str(e), 400)
@app.route('/mail/users/remove', methods=['POST'])
@authorized_personnel_only
def mail_users_remove():
return remove_mail_user(request.form.get('email', ''), env)
@app.route('/mail/users/privileges')
@authorized_personnel_only
def mail_user_privs():
privs = get_mail_user_privileges(request.args.get('email', ''), env)
if isinstance(privs, tuple): return privs # error
return "\n".join(privs)
@app.route('/mail/users/privileges/add', methods=['POST'])
@authorized_personnel_only
def mail_user_privs_add():
return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "add", env)
@app.route('/mail/users/privileges/remove', methods=['POST'])
@authorized_personnel_only
def mail_user_privs_remove():
return add_remove_mail_user_privilege(request.form.get('email', ''), request.form.get('privilege', ''), "remove", env)
@app.route('/mail/aliases')
@authorized_personnel_only
def mail_aliases():
if request.args.get("format", "") == "json":
return json_response(get_mail_aliases_ex(env))
else:
return "".join(x+"\t"+y+"\n" for x, y in get_mail_aliases(env))
@app.route('/mail/aliases/add', methods=['POST'])
@authorized_personnel_only
def mail_aliases_add():
return add_mail_alias(
request.form.get('source', ''),
request.form.get('destination', ''),
env,
update_if_exists=(request.form.get('update_if_exists', '') == '1')
)
@app.route('/mail/aliases/remove', methods=['POST'])
@authorized_personnel_only
def mail_aliases_remove():
return remove_mail_alias(request.form.get('source', ''), env)
@app.route('/mail/domains')
@authorized_personnel_only
def mail_domains():
return "".join(x+"\n" for x in get_mail_domains(env))
# DNS
@app.route('/dns/zones')
@authorized_personnel_only
def dns_zones():
from dns_update import get_dns_zones
return json_response([z[0] for z in get_dns_zones(env)])
@app.route('/dns/update', methods=['POST'])
@authorized_personnel_only
def dns_update():
from dns_update import do_dns_update
try:
return do_dns_update(env, force=request.form.get('force', '') == '1')
except Exception as e:
return (str(e), 500)
@app.route('/dns/secondary-nameserver')
@authorized_personnel_only
def dns_get_secondary_nameserver():
from dns_update import get_custom_dns_config
return json_response({ "hostname": get_custom_dns_config(env).get("_secondary_nameserver") })
@app.route('/dns/secondary-nameserver', methods=['POST'])
@authorized_personnel_only
def dns_set_secondary_nameserver():
from dns_update import set_secondary_dns
try:
return set_secondary_dns(request.form.get('hostname'), env)
except ValueError as e:
return (str(e), 400)
@app.route('/dns/set')
@authorized_personnel_only
def dns_get_records():
from dns_update import get_custom_dns_config, get_custom_records
additional_records = get_custom_dns_config(env)
records = get_custom_records(None, additional_records, env)
return json_response([{
"qname": r[0],
"rtype": r[1],
"value": r[2],
} for r in records])
@app.route('/dns/set/<qname>', methods=['POST'])
@app.route('/dns/set/<qname>/<rtype>', methods=['POST'])
@app.route('/dns/set/<qname>/<rtype>/<value>', methods=['POST'])
@authorized_personnel_only
def dns_set_record(qname, rtype="A", value=None):
from dns_update import do_dns_update, set_custom_dns_record
try:
# Get the value from the URL, then the POST parameters, or if it is not set then
# use the remote IP address of the request --- makes dynamic DNS easy. To clear a
# value, '' must be explicitly passed.
if value is None:
value = request.form.get("value")
if value is None:
value = request.environ.get("HTTP_X_FORWARDED_FOR") # normally REMOTE_ADDR but we're behind nginx as a reverse proxy
if value == '' or value == '__delete__':
# request deletion
value = None
if set_custom_dns_record(qname, rtype, value, env):
return do_dns_update(env)
return "OK"
except ValueError as e:
return (str(e), 400)
@app.route('/dns/dump')
@authorized_personnel_only
def dns_get_dump():
from dns_update import build_recommended_dns
return json_response(build_recommended_dns(env))
# SSL
@app.route('/ssl/csr/<domain>', methods=['POST'])
@authorized_personnel_only
def ssl_get_csr(domain):
from web_update import get_domain_ssl_files, create_csr
ssl_key, ssl_certificate, ssl_via = get_domain_ssl_files(domain, env)
return create_csr(domain, ssl_key, env)
@app.route('/ssl/install', methods=['POST'])
@authorized_personnel_only
def ssl_install_cert():
from web_update import install_cert
domain = request.form.get('domain')
ssl_cert = request.form.get('cert')
ssl_chain = request.form.get('chain')
return install_cert(domain, ssl_cert, ssl_chain, env)
# WEB
@app.route('/web/domains')
@authorized_personnel_only
def web_get_domains():
from web_update import get_web_domains_info
return json_response(get_web_domains_info(env))
@app.route('/web/update', methods=['POST'])
@authorized_personnel_only
def web_update():
from web_update import do_web_update
return do_web_update(env)
# System
@app.route('/system/status', methods=["POST"])
@authorized_personnel_only
def system_status():
from status_checks import run_checks
class WebOutput:
def __init__(self):
self.items = []
def add_heading(self, heading):
self.items.append({ "type": "heading", "text": heading, "extra": [] })
def print_ok(self, message):
self.items.append({ "type": "ok", "text": message, "extra": [] })
def print_error(self, message):
self.items.append({ "type": "error", "text": message, "extra": [] })
def print_warning(self, message):
self.items.append({ "type": "warning", "text": message, "extra": [] })
def print_line(self, message, monospace=False):
self.items[-1]["extra"].append({ "text": message, "monospace": monospace })
output = WebOutput()
run_checks(False, env, output, pool)
return json_response(output.items)
@app.route('/system/updates')
@authorized_personnel_only
def show_updates():
from status_checks import list_apt_updates
return "".join(
"%s (%s)\n"
% (p["package"], p["version"])
for p in list_apt_updates())
@app.route('/system/update-packages', methods=["POST"])
@authorized_personnel_only
def do_updates():
utils.shell("check_call", ["/usr/bin/apt-get", "-qq", "update"])
return utils.shell("check_output", ["/usr/bin/apt-get", "-y", "upgrade"], env={
"DEBIAN_FRONTEND": "noninteractive"
})
@app.route('/system/backup/status')
@authorized_personnel_only
def backup_status():
from backup import backup_status
return json_response(backup_status(env))
# APP
if __name__ == '__main__':
if "DEBUG" in os.environ: app.debug = True
if "APIKEY" in os.environ: auth_service.key = os.environ["APIKEY"]
if not app.debug:
app.logger.addHandler(utils.create_syslog_handler())
# For testing on the command line, you can use `curl` like so:
# curl --user $(</var/lib/mailinabox/api.key): http://localhost:10222/mail/users
auth_service.write_key()
# For testing in the browser, you can copy the API key that's output to the
# debug console and enter that as the username
app.logger.info('API key: ' + auth_service.key)
# Start the application server. Listens on 127.0.0.1 (IPv4 only).
app.run(port=10222)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from past.builtins import basestring
from builtins import object
import json
import logging
import threading
import sys
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from beeswax.server.dbms import QueryServerException
from beeswax.server.hive_server2_lib import HiveServerClient
from ImpalaService import ImpalaHiveServer2Service
from impala.impala_flags import get_webserver_certificate_file, is_webserver_spnego_enabled, is_kerberos_enabled
from impala.conf import DAEMON_API_USERNAME, DAEMON_API_PASSWORD, DAEMON_API_PASSWORD_SCRIPT, DAEMON_API_AUTH_SCHEME, COORDINATOR_URL
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
def get_api(user, url):
api = ImpalaDaemonApi(url)
api.set_user(user)
return api
def _get_impala_server_url(session):
http_addr = ""
if COORDINATOR_URL.get():
http_addr = COORDINATOR_URL.get()
else:
if not session:
raise PopupException(_('No active Thrift session with Impala Coordinator, please run a query first.'))
properties = session.get_properties()
http_addr = properties.get('coordinator_host', properties.get('http_addr'))
http_addr = http_addr.replace('http://', '').replace('https://', '')
return ('https://' if get_webserver_certificate_file() else 'http://') + http_addr
class ImpalaServerClientException(Exception):
pass
class ImpalaDaemonApiException(Exception):
pass
class ImpalaServerClient(HiveServerClient):
def get_exec_summary(self, operation_handle, session_handle):
"""
Calls Impala HS2 API's GetExecSummary method on the given query handle
:return: TExecSummary object serialized as a dict
"""
req = ImpalaHiveServer2Service.TGetExecSummaryReq(operationHandle=operation_handle, sessionHandle=session_handle)
# GetExecSummary() only works for closed queries
try:
self.close_operation(operation_handle)
except QueryServerException as e:
LOG.warning('Failed to close operation for query handle, query may be invalid or already closed.')
resp = self.call(self._client.GetExecSummary, req)
return self._serialize_exec_summary(resp.summary)
def get_runtime_profile(self, operation_handle, session_handle):
"""
Calls Impala HS2 API's GetRuntimeProfile method on the given query handle
:return: TExecSummary object serialized as a dict
"""
req = ImpalaHiveServer2Service.TGetRuntimeProfileReq(operationHandle=operation_handle, sessionHandle=session_handle)
# TGetRuntimeProfileReq() only works for closed queries
try:
self.close_operation(operation_handle)
except QueryServerException as e:
LOG.warning('Failed to close operation for query handle, query may be invalid or already closed.')
resp = self.call(self._client.GetRuntimeProfile, req)
return resp.profile
def _serialize_exec_summary(self, summary):
try:
summary_dict = {
'state': summary.state,
'exch_to_sender_map': summary.exch_to_sender_map,
'error_logs': summary.error_logs,
'status': None,
'progress': None,
'nodes': [],
}
if summary.status is not None:
summary_dict['status'] = summary.status.__dict__
if summary.progress is not None:
summary_dict['progress'] = summary.progress.__dict__
if summary.nodes:
for node in summary.nodes:
node_dict = node.__dict__
if node.exec_stats is not None:
node_dict['exec_stats'] = [stat.__dict__ for stat in node.exec_stats]
if node.estimated_stats is not None:
node_dict['estimated_stats'] = node.estimated_stats.__dict__
summary_dict['nodes'].append(node_dict)
return summary_dict
except Exception as e:
raise ImpalaServerClientException('Failed to serialize the TExecSummary object: %s' % str(e))
class ImpalaDaemonApi(object):
def __init__(self, server_url):
self._url = server_url
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
self._security_enabled = is_kerberos_enabled()
self._webserver_spnego_enabled = is_webserver_spnego_enabled()
self._thread_local = threading.local()
# You can set username/password for Impala Web UI which overrides kerberos
daemon_api_pwd = \
(DAEMON_API_PASSWORD.get() if DAEMON_API_PASSWORD.get()
is not None else (DAEMON_API_PASSWORD_SCRIPT.get() if DAEMON_API_PASSWORD_SCRIPT.get()
is not None else None))
if DAEMON_API_USERNAME.get() is not None and daemon_api_pwd is not None:
if DAEMON_API_AUTH_SCHEME.get().lower() == 'basic':
self._client.set_basic_auth(DAEMON_API_USERNAME.get(), daemon_api_pwd)
LOG.info("Using username and password for basic authentication")
else:
self._client.set_digest_auth(DAEMON_API_USERNAME.get(), daemon_api_pwd)
LOG.info('Using username and password for digest authentication')
elif self._webserver_spnego_enabled or self._security_enabled:
self._client.set_kerberos_auth()
LOG.info('Using kerberos principal for authentication')
def __str__(self):
return "ImpalaDaemonApi at %s" % self._url
@property
def url(self):
return self._url
@property
def security_enabled(self):
return self._security_enabled
@property
def user(self):
return self._thread_local.user
def set_user(self, user):
if hasattr(user, 'username'):
self._thread_local.user = user.username
else:
self._thread_local.user = user
def get_queries(self):
params = {
'json': 'true'
}
resp = self._root.get('queries', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi did not return valid JSON: %s' % e)
def get_query(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_plan', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi did not return valid JSON: %s' % e)
def get_query_profile(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_profile', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi query_profile did not return valid JSON: %s' % e)
def get_query_memory(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_memory', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi query_memory did not return valid JSON: %s' % e)
def kill(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('cancel_query', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi kill did not return valid JSON: %s' % e)
def get_query_backends(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_backends', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi query_backends did not return valid JSON: %s' % e)
def get_query_finstances(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_finstances', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi query_finstances did not return valid JSON: %s' % e)
def get_query_summary(self, query_id):
params = {
'query_id': query_id,
'json': 'true'
}
resp = self._root.get('query_summary', params=params)
try:
if isinstance(resp, basestring):
return json.loads(resp)
else:
return resp
except ValueError as e:
raise ImpalaDaemonApiException('ImpalaDaemonApi query_summary did not return valid JSON: %s' % e)
def get_query_profile_encoded(self, query_id):
params = {
'query_id': query_id
}
return self._root.get('query_profile_encoded', params=params)
| |
from django.db.models.expressions import Func, Value
from django.db.models.fields import IntegerField
from django.db.models.functions import Coalesce
from django.db.models.lookups import Transform
from django.db.utils import NotSupportedError
class BytesToCharFieldConversionMixin:
"""
Convert CharField results from bytes to str.
MySQL returns long data types (bytes) instead of chars when it can't
determine the length of the result string. For example:
LPAD(column1, CHAR_LENGTH(column2), ' ')
returns the LONGTEXT (bytes) instead of VARCHAR.
"""
def convert_value(self, value, expression, connection):
if connection.features.db_functions_convert_bytes_to_str:
if self.output_field.get_internal_type() == 'CharField' and isinstance(value, bytes):
return value.decode()
return super().convert_value(value, expression, connection)
class MySQLSHA2Mixin:
def as_mysql(self, compiler, connection, **extra_content):
return super().as_sql(
compiler,
connection,
template='SHA2(%%(expressions)s, %s)' % self.function[3:],
**extra_content,
)
class OracleHashMixin:
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
template=(
"LOWER(RAWTOHEX(STANDARD_HASH(UTL_I18N.STRING_TO_RAW("
"%(expressions)s, 'AL32UTF8'), '%(function)s')))"
),
**extra_context,
)
class PostgreSQLSHAMixin:
def as_postgresql(self, compiler, connection, **extra_content):
return super().as_sql(
compiler,
connection,
template="ENCODE(DIGEST(%(expressions)s, '%(function)s'), 'hex')",
function=self.function.lower(),
**extra_content,
)
class Chr(Transform):
function = 'CHR'
lookup_name = 'chr'
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection, function='CHAR',
template='%(function)s(%(expressions)s USING utf16)',
**extra_context
)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler, connection,
template='%(function)s(%(expressions)s USING NCHAR_CS)',
**extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR', **extra_context)
class ConcatPair(Func):
"""
Concatenate two arguments together. This is used by `Concat` because not
all backend databases support more than two arguments.
"""
function = 'CONCAT'
def as_sqlite(self, compiler, connection, **extra_context):
coalesced = self.coalesce()
return super(ConcatPair, coalesced).as_sql(
compiler, connection, template='%(expressions)s', arg_joiner=' || ',
**extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
# Use CONCAT_WS with an empty separator so that NULLs are ignored.
return super().as_sql(
compiler, connection, function='CONCAT_WS',
template="%(function)s('', %(expressions)s)",
**extra_context
)
def coalesce(self):
# null on either side results in null for expression, wrap with coalesce
c = self.copy()
c.set_source_expressions([
Coalesce(expression, Value('')) for expression in c.get_source_expressions()
])
return c
class Concat(Func):
"""
Concatenate text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure a non-null result.
"""
function = None
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError('Concat must take at least two expressions')
paired = self._paired(expressions)
super().__init__(paired, **extra)
def _paired(self, expressions):
# wrap pairs of expressions in successive concat functions
# exp = [a, b, c, d]
# -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d))))
if len(expressions) == 2:
return ConcatPair(*expressions)
return ConcatPair(expressions[0], self._paired(expressions[1:]))
class Left(Func):
function = 'LEFT'
arity = 2
def __init__(self, expression, length, **extra):
"""
expression: the name of a field, or an expression returning a string
length: the number of characters to return from the start of the string
"""
if not hasattr(length, 'resolve_expression'):
if length < 1:
raise ValueError("'length' must be greater than 0.")
super().__init__(expression, length, **extra)
def get_substr(self):
return Substr(self.source_expressions[0], Value(1), self.source_expressions[1])
def as_oracle(self, compiler, connection, **extra_context):
return self.get_substr().as_oracle(compiler, connection, **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return self.get_substr().as_sqlite(compiler, connection, **extra_context)
class Length(Transform):
"""Return the number of characters in the expression."""
function = 'LENGTH'
lookup_name = 'length'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='CHAR_LENGTH', **extra_context)
class Lower(Transform):
function = 'LOWER'
lookup_name = 'lower'
class LPad(BytesToCharFieldConversionMixin, Func):
function = 'LPAD'
def __init__(self, expression, length, fill_text=Value(' '), **extra):
if not hasattr(length, 'resolve_expression') and length is not None and length < 0:
raise ValueError("'length' must be greater or equal to 0.")
super().__init__(expression, length, fill_text, **extra)
class LTrim(Transform):
function = 'LTRIM'
lookup_name = 'ltrim'
class MD5(OracleHashMixin, Transform):
function = 'MD5'
lookup_name = 'md5'
class Ord(Transform):
function = 'ASCII'
lookup_name = 'ord'
output_field = IntegerField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='ORD', **extra_context)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='UNICODE', **extra_context)
class Repeat(BytesToCharFieldConversionMixin, Func):
function = 'REPEAT'
def __init__(self, expression, number, **extra):
if not hasattr(number, 'resolve_expression') and number is not None and number < 0:
raise ValueError("'number' must be greater or equal to 0.")
super().__init__(expression, number, **extra)
def as_oracle(self, compiler, connection, **extra_context):
expression, number = self.source_expressions
length = None if number is None else Length(expression) * number
rpad = RPad(expression, length, expression)
return rpad.as_sql(compiler, connection, **extra_context)
class Replace(Func):
function = 'REPLACE'
def __init__(self, expression, text, replacement=Value(''), **extra):
super().__init__(expression, text, replacement, **extra)
class Reverse(Transform):
function = 'REVERSE'
lookup_name = 'reverse'
def as_oracle(self, compiler, connection, **extra_context):
# REVERSE in Oracle is undocumented and doesn't support multi-byte
# strings. Use a special subquery instead.
return super().as_sql(
compiler, connection,
template=(
'(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM '
'(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s '
'FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) '
'GROUP BY %(expressions)s)'
),
**extra_context
)
class Right(Left):
function = 'RIGHT'
def get_substr(self):
return Substr(self.source_expressions[0], self.source_expressions[1] * Value(-1))
class RPad(LPad):
function = 'RPAD'
class RTrim(Transform):
function = 'RTRIM'
lookup_name = 'rtrim'
class SHA1(OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA1'
lookup_name = 'sha1'
class SHA224(MySQLSHA2Mixin, PostgreSQLSHAMixin, Transform):
function = 'SHA224'
lookup_name = 'sha224'
def as_oracle(self, compiler, connection, **extra_context):
raise NotSupportedError('SHA224 is not supported on Oracle.')
class SHA256(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA256'
lookup_name = 'sha256'
class SHA384(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA384'
lookup_name = 'sha384'
class SHA512(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = 'SHA512'
lookup_name = 'sha512'
class StrIndex(Func):
"""
Return a positive integer corresponding to the 1-indexed position of the
first occurrence of a substring inside another string, or 0 if the
substring is not found.
"""
function = 'INSTR'
arity = 2
output_field = IntegerField()
def as_postgresql(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='STRPOS', **extra_context)
class Substr(Func):
function = 'SUBSTRING'
def __init__(self, expression, pos, length=None, **extra):
"""
expression: the name of a field, or an expression returning a string
pos: an integer > 0, or an expression returning an integer
length: an optional number of characters to return
"""
if not hasattr(pos, 'resolve_expression'):
if pos < 1:
raise ValueError("'pos' must be greater than 0")
expressions = [expression, pos]
if length is not None:
expressions.append(length)
super().__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function='SUBSTR', **extra_context)
class Trim(Transform):
function = 'TRIM'
lookup_name = 'trim'
class Upper(Transform):
function = 'UPPER'
lookup_name = 'upper'
| |
"""Test the Volumio config flow."""
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.volumio.config_flow import CannotConnectError
from homeassistant.components.volumio.const import DOMAIN
from tests.common import MockConfigEntry
TEST_SYSTEM_INFO = {"id": "1111-1111-1111-1111", "name": "TestVolumio"}
TEST_CONNECTION = {
"host": "1.1.1.1",
"port": 3000,
}
TEST_DISCOVERY = zeroconf.ZeroconfServiceInfo(
host="1.1.1.1",
hostname="mock_hostname",
name="mock_name",
port=3000,
properties={"volumioName": "discovered", "UUID": "2222-2222-2222-2222"},
type="mock_type",
)
TEST_DISCOVERY_RESULT = {
"host": TEST_DISCOVERY.host,
"port": TEST_DISCOVERY.port,
"id": TEST_DISCOVERY.properties["UUID"],
"name": TEST_DISCOVERY.properties["volumioName"],
}
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "TestVolumio"
assert result2["data"] == {**TEST_SYSTEM_INFO, **TEST_CONNECTION}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_updates_unique_id(hass):
"""Test a duplicate id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_SYSTEM_INFO["id"],
data={
"host": "dummy",
"port": 11,
"name": "dummy",
"id": TEST_SYSTEM_INFO["id"],
},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
assert entry.data == {**TEST_SYSTEM_INFO, **TEST_CONNECTION}
async def test_empty_system_info(hass):
"""Test old volumio versions with empty system info."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value={},
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_CONNECTION["host"]
assert result2["data"] == {
"host": TEST_CONNECTION["host"],
"port": TEST_CONNECTION["port"],
"name": TEST_CONNECTION["host"],
"id": None,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=CannotConnectError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_exception(hass):
"""Test we handle generic error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_CONNECTION,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_discovery(hass):
"""Test discovery flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
return_value=TEST_SYSTEM_INFO,
), patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_DISCOVERY_RESULT["name"]
assert result2["data"] == TEST_DISCOVERY_RESULT
assert result2["result"]
assert result2["result"].unique_id == TEST_DISCOVERY_RESULT["id"]
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_cannot_connect(hass):
"""Test discovery aborts if cannot connect."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
with patch(
"homeassistant.components.volumio.config_flow.Volumio.get_system_info",
side_effect=CannotConnectError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={},
)
assert result2["type"] == "abort"
assert result2["reason"] == "cannot_connect"
async def test_discovery_duplicate_data(hass):
"""Test discovery aborts if same mDNS packet arrives."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
assert result["type"] == "form"
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_ZEROCONF}, data=TEST_DISCOVERY
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery id aborts and updates existing entry."""
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_DISCOVERY_RESULT["id"],
data={
"host": "dummy",
"port": 11,
"name": "dummy",
"id": TEST_DISCOVERY_RESULT["id"],
},
state=config_entries.ConfigEntryState.SETUP_RETRY,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.volumio.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data == TEST_DISCOVERY_RESULT
assert len(mock_setup_entry.mock_calls) == 1
| |
import math
import warnings
from sympy import Rational as frac
from sympy import sqrt
from ..helpers import article
from ._helpers import C2Scheme, register
from ._rabinowitz_richter import rabinowitz_richter_1
from ._tyler import tyler_2
source = article(
authors=["Richard Franke"],
title="Obtaining cubatures for rectangles and other planar regions by using orthogonal polynomials",
journal="Math. Comp.",
volume="25",
year="1971",
pages="803-817",
url="https://doi.org/10.1090/S0025-5718-1971-0300440-5",
)
def franke_1(lmbda=2):
assert -frac(9, 5) <= lmbda <= frac(9, 4)
a = sqrt(frac(9 + 5 * lmbda, 15))
b = sqrt(frac(9 - 4 * lmbda, 15))
c = sqrt(frac(3, 5))
d = {
"zero2": [[frac(4 * (4 + 5 * lmbda), 9 * (9 + 5 * lmbda))]],
"sxy": [[frac(25, 36 * (9 - 4 * lmbda))], [b], [c]],
"c2_a0": [[frac(40, 36 * (9 + 5 * lmbda))], [a]],
"c2_0a": [[frac(10 * (1 - lmbda), 9 * (9 - 4 * lmbda))], [c]],
}
return C2Scheme(f"Franke(1, {lmbda})", d, 5, source)
def franke_2a():
a = math.sqrt((15 + 2 * sqrt(30)) / 35)
b = math.sqrt((15 - 2 * sqrt(30)) / 35)
# closed forms not appearing in the article:
c = 5 * (18 + math.sqrt(30)) / 324 / 4
d = 2 * (18 + math.sqrt(30)) / 81 / 4
d = {
"sxy": [
[0.01094603802180727, c, 0.07601767326255625],
[0.105784012371275e1, math.sqrt(3 / 5), 0.469253522127911],
[a, b, a],
],
"c2_0a": [[d], [b]],
}
return C2Scheme("Franke 2a", d, 7, source, 1.232e-14)
def franke_2b():
a = math.sqrt((15 + 2 * sqrt(30)) / 35)
b = math.sqrt((15 - 2 * sqrt(30)) / 35)
d = {
"sxy": [
[0.0483131729357575, 0.0422624803047505, 0.120773808410886],
[0.774596669241483, 0.915060523380880, 0.396191039748320],
[a, b, b],
],
"c2_0a": [[0.077301076697212], [a]],
}
return C2Scheme("Franke 2b", d, 7, source)
def franke_3a():
a = math.sqrt(5.0 / 9.0 + 2.0 / 63.0 * math.sqrt(70))
b = math.sqrt(5.0 / 9.0 - 2.0 / 63.0 * math.sqrt(70))
d = {
"sxy": [
[
0.705065140564012e-1,
0.721121511007611e-1,
0.971492736037507e-1,
0.368549048677049,
],
[
0.845927799771709,
0.628901636732253,
0.959681421214621,
0.436030596273468,
],
[a, a, b, b],
],
"c2_a0": [[0.316049382716049], [0.774596669241483]],
"c2_0a": [[0.188616439798053, 0.258606964371341e-1], [a, b]],
"zero2": [[0.505679012345679]],
}
for value in d.values():
value[0] = [val / 4 for val in value[0]]
return C2Scheme("Franke 3a", d, 9, source)
def franke_3b():
a = math.sqrt(5.0 / 9.0 + 2.0 / 63.0 * math.sqrt(70))
b = math.sqrt(5.0 / 9.0 - 2.0 / 63.0 * math.sqrt(70))
d = {
"sxy": [
[
0.499290623065150e-1,
0.158445182284802,
0.183383788151247,
0.881476523665422e-1,
],
[
0.945813739519925,
0.465346624836203,
0.804253925742002,
0.681385892163677,
],
[a, a, b, b],
],
"c2_a0": [
[0.114456375561331, 0.454432513327558],
[0.963018409085396, 0.428610143223121],
],
"c2_0a": [[0.571052809297435e-1, 0.414194459963155], [a, b]],
}
for value in d.values():
value[0] = [val / 4 for val in value[0]]
return C2Scheme("Franke 3b", d, 9, source)
def franke_3c():
a = math.sqrt(5.0 / 9.0 + 2.0 / 63.0 * math.sqrt(70))
b = math.sqrt(5.0 / 9.0 - 2.0 / 63.0 * math.sqrt(70))
d = {
"sxy": [
[0.494522019130682e-1, 0.163914731881061, 0.265904816944092],
[0.949307350001342, 0.458177548931134, 0.774596669241483],
[a, a, b],
],
"c2_a0": [
[0.113041839046410, 0.479922229600720],
[0.967776908976724, 0.417754671502987],
],
"c2_0a": [[0.471199025241204e-1, 0.425447707110548], [a, b]],
"zero2": [[-0.481503595164821e-1]],
}
for value in d.values():
value[0] = [val / 4 for val in value[0]]
return C2Scheme("Franke 3c", d, 9, source)
def franke_5():
# DUP Noted as a duplicate in the original article
return tyler_2()
def franke_6():
a = sqrt(frac(3, 2))
b = sqrt(frac(3, 7) * (1 + sqrt(frac(10, 31))))
c = sqrt(frac(3, 7) * (1 - sqrt(frac(10, 31))))
d = {
"zero2": [[frac(392, 405)]],
"d4_aa": [[frac(16, 2025)], [a]],
"d4_ab": [[frac(1519, 4050)], [b], [c]],
}
for value in d.values():
value[0] = [val / 4 for val in value[0]]
return C2Scheme("Franke 6", d, 7, source)
def franke_7():
# DUP Noted as a duplicate in the original article
return rabinowitz_richter_1()
def franke_8():
# TODO find error in franke_8
warnings.warn("Franke(8) only has degree 1.")
d = {
"d4_a0": [[0.454163960686749], [0.488926856974369]],
"d4_aa": [
[0.214200360926862, 0.427312318657758e-1],
[0.690880550486344, 0.939565258096838],
],
"d4_ab": [[0.144452223260307], [0.918620441056722], [0.344872025364404]],
}
for value in d.values():
value[0] = [val / 4 for val in value[0]]
return C2Scheme("Franke 8", d, 1, source)
register(
[
franke_1,
franke_2a,
franke_2b,
franke_3a,
franke_3b,
franke_3c,
franke_5,
franke_6,
franke_7,
franke_8,
]
)
| |
"""The Mazda Connected Services integration."""
from datetime import timedelta
import logging
import async_timeout
from pymazda import (
Client as MazdaAPI,
MazdaAccountLockedException,
MazdaAPIEncryptionException,
MazdaAuthenticationException,
MazdaException,
MazdaTokenExpiredException,
)
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD, CONF_REGION
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import (
ConfigEntryAuthFailed,
ConfigEntryNotReady,
HomeAssistantError,
)
from homeassistant.helpers import aiohttp_client, device_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util.async_ import gather_with_concurrency
from .const import DATA_CLIENT, DATA_COORDINATOR, DATA_VEHICLES, DOMAIN, SERVICES
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["device_tracker", "lock", "sensor"]
async def with_timeout(task, timeout_seconds=10):
"""Run an async task with a timeout."""
async with async_timeout.timeout(timeout_seconds):
return await task
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Mazda Connected Services from a config entry."""
email = entry.data[CONF_EMAIL]
password = entry.data[CONF_PASSWORD]
region = entry.data[CONF_REGION]
websession = aiohttp_client.async_get_clientsession(hass)
mazda_client = MazdaAPI(email, password, region, websession)
try:
await mazda_client.validate_credentials()
except MazdaAuthenticationException as ex:
raise ConfigEntryAuthFailed from ex
except (
MazdaException,
MazdaAccountLockedException,
MazdaTokenExpiredException,
MazdaAPIEncryptionException,
) as ex:
_LOGGER.error("Error occurred during Mazda login request: %s", ex)
raise ConfigEntryNotReady from ex
async def async_handle_service_call(service_call=None):
"""Handle a service call."""
# Get device entry from device registry
dev_reg = device_registry.async_get(hass)
device_id = service_call.data["device_id"]
device_entry = dev_reg.async_get(device_id)
# Get vehicle VIN from device identifiers
mazda_identifiers = (
identifier
for identifier in device_entry.identifiers
if identifier[0] == DOMAIN
)
vin_identifier = next(mazda_identifiers)
vin = vin_identifier[1]
# Get vehicle ID and API client from hass.data
vehicle_id = 0
api_client = None
for entry_data in hass.data[DOMAIN].values():
for vehicle in entry_data[DATA_VEHICLES]:
if vehicle["vin"] == vin:
vehicle_id = vehicle["id"]
api_client = entry_data[DATA_CLIENT]
break
if vehicle_id == 0 or api_client is None:
raise HomeAssistantError("Vehicle ID not found")
api_method = getattr(api_client, service_call.service)
try:
if service_call.service == "send_poi":
latitude = service_call.data["latitude"]
longitude = service_call.data["longitude"]
poi_name = service_call.data["poi_name"]
await api_method(vehicle_id, latitude, longitude, poi_name)
else:
await api_method(vehicle_id)
except Exception as ex:
raise HomeAssistantError(ex) from ex
def validate_mazda_device_id(device_id):
"""Check that a device ID exists in the registry and has at least one 'mazda' identifier."""
dev_reg = device_registry.async_get(hass)
device_entry = dev_reg.async_get(device_id)
if device_entry is None:
raise vol.Invalid("Invalid device ID")
mazda_identifiers = [
identifier
for identifier in device_entry.identifiers
if identifier[0] == DOMAIN
]
if not mazda_identifiers:
raise vol.Invalid("Device ID is not a Mazda vehicle")
return device_id
service_schema = vol.Schema(
{vol.Required("device_id"): vol.All(cv.string, validate_mazda_device_id)}
)
service_schema_send_poi = service_schema.extend(
{
vol.Required("latitude"): cv.latitude,
vol.Required("longitude"): cv.longitude,
vol.Required("poi_name"): cv.string,
}
)
async def async_update_data():
"""Fetch data from Mazda API."""
try:
vehicles = await with_timeout(mazda_client.get_vehicles())
vehicle_status_tasks = [
with_timeout(mazda_client.get_vehicle_status(vehicle["id"]))
for vehicle in vehicles
]
statuses = await gather_with_concurrency(5, *vehicle_status_tasks)
for vehicle, status in zip(vehicles, statuses):
vehicle["status"] = status
hass.data[DOMAIN][entry.entry_id][DATA_VEHICLES] = vehicles
return vehicles
except MazdaAuthenticationException as ex:
raise ConfigEntryAuthFailed("Not authenticated with Mazda API") from ex
except Exception as ex:
_LOGGER.exception(
"Unknown error occurred during Mazda update request: %s", ex
)
raise UpdateFailed(ex) from ex
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(seconds=60),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CLIENT: mazda_client,
DATA_COORDINATOR: coordinator,
DATA_VEHICLES: [],
}
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# Register services
for service in SERVICES:
if service == "send_poi":
hass.services.async_register(
DOMAIN,
service,
async_handle_service_call,
schema=service_schema_send_poi,
)
else:
hass.services.async_register(
DOMAIN, service, async_handle_service_call, schema=service_schema
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
# Only remove services if it is the last config entry
if len(hass.data[DOMAIN]) == 1:
for service in SERVICES:
hass.services.async_remove(DOMAIN, service)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class MazdaEntity(CoordinatorEntity):
"""Defines a base Mazda entity."""
def __init__(self, client, coordinator, index):
"""Initialize the Mazda entity."""
super().__init__(coordinator)
self.client = client
self.index = index
self.vin = self.coordinator.data[self.index]["vin"]
self.vehicle_id = self.coordinator.data[self.index]["id"]
@property
def data(self):
"""Shortcut to access coordinator data for the entity."""
return self.coordinator.data[self.index]
@property
def device_info(self):
"""Return device info for the Mazda entity."""
return {
"identifiers": {(DOMAIN, self.vin)},
"name": self.get_vehicle_name(),
"manufacturer": "Mazda",
"model": f"{self.data['modelYear']} {self.data['carlineName']}",
}
def get_vehicle_name(self):
"""Return the vehicle name, to be used as a prefix for names of other entities."""
if "nickname" in self.data and len(self.data["nickname"]) > 0:
return self.data["nickname"]
return f"{self.data['modelYear']} {self.data['carlineName']}"
| |
#!/usr/bin/python
from os.path import join,exists
import numpy as np
import pickle
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
from uda_common import zero_pivot_columns, zero_nonpivot_columns, read_pivots, evaluate_and_print_scores, align_test_X_train, get_f1, find_best_c
import os
import scipy.sparse
from scipy.sparse import lil_matrix
import sys
from sklearn import svm
from sklearn.feature_selection import chi2
def main(args):
if len(args) < 4:
sys.stderr.write("Required argument(s): <labeled source data> <labeled target data> <matrix directory> <pivot index file>\n\n")
sys.exit(-1)
goal_ind = 2
(source_file, target_file, matrix_dir, pivot_file) = args
print("Reading pivot index file into a dictionary and creating pivot-only and nopivot matrices")
pivots = read_pivots(pivot_file)
num_pivots = len(pivots)
print("Reading input data from %s" % (source_file))
X_train, y_train = load_svmlight_file(source_file, dtype='float32')
num_instances, num_feats = X_train.shape
print(" Data has %d instances and %d features" % (num_instances, num_feats))
nopivot_X_train = zero_pivot_columns(X_train, pivots)
pivot_X_train = zero_nonpivot_columns(X_train, pivots)
X_test, y_test = load_svmlight_file(target_file)
X_test = align_test_X_train(X_train, X_test)
num_test_instances = X_test.shape[0]
nopivot_X_test = zero_pivot_columns(X_test, pivots)
pivot_X_test = zero_nonpivot_columns(X_test, pivots)
print("Original feature space evaluation (AKA no adaptation, AKA pivot+non-pivot)")
## C < 1 => more regularization
## C > 1 => more fitting to training
C_list = [0.001, 0.01, 0.1, 1.0, 10.0]
(l1_c, l1_f1) = find_best_c(X_train, y_train, goal_ind, penalty='l1', dual=False)
print("Optimizing l1 with cross-validation gives C=%f and f1=%f" % (l1_c, l1_f1))
(l2_c, l2_f1) = find_best_c(X_train, y_train, goal_ind)
print("Optimizing l2 with cross-validation gives C=%f and f1=%f" % (l2_c, l2_f1))
print("Evaluating with l1=%f regularization optimized for source:" % l1_c)
evaluate_and_print_scores(X_train, y_train, X_test, y_test, goal_ind, C=l1_c, penalty='l1', dual=False)
print("Evaluating l1 at c=%f" % (l1_c/10))
evaluate_and_print_scores(X_train, y_train, X_test, y_test, goal_ind, C=l1_c/10, penalty='l1', dual=False)
print("Evaluating with l2=%f regularization optimized for source:" % l2_c)
evaluate_and_print_scores(X_train, y_train, X_test, y_test, goal_ind, C=l2_c)
print("Evaluating l2 at c=%f" % (l2_c/10))
evaluate_and_print_scores(X_train, y_train, X_test, y_test, goal_ind, C=l2_c/10)
print("Feature selection in source space")
## Find features useful for the primary task:
(chi2_task, pval_task) = chi2(X_train, y_train)
task_feats_inds = np.where(pval_task < 0.05)[0]
X_train_featsel = zero_nonpivot_columns(X_train, task_feats_inds)
(l2_c, l2_f1) = find_best_c(X_train_featsel, y_train, goal_ind)
X_test_featsel = zero_nonpivot_columns(X_test, task_feats_inds)
evaluate_and_print_scores(X_train_featsel, y_train, X_test_featsel, y_test, goal_ind, C=l2_c)
#del X_train_featsel, X_test_featsel
print("Remove features useful for telling domains apart")
## Create dataset for training a classifier to distinguish between domains:
X_all = np.zeros((num_instances+num_test_instances, num_feats))
X_all[:num_instances,:] += X_train
X_all[num_instances:,:] += X_test
y_dataset_discrim = np.zeros(num_instances+num_test_instances)
y_dataset_discrim[:num_instances] = 1
svc = svm.LinearSVC()
svc.fit(X_all, y_dataset_discrim)
train_error = 1 - svc.score(X_all, y_dataset_discrim)
print("Train error in trying to differentiate these datasets is %f" % train_error)
(chi2_dd, pval_dd) = chi2(X_all, y_dataset_discrim)
dd_feats_inds = np.where(pval_dd > 0.05)[0]
X_train_domains = zero_nonpivot_columns(X_train, dd_feats_inds)
X_test_domains = zero_nonpivot_columns(X_test, dd_feats_inds)
(l2_c, l2_f1) = find_best_c(X_train_domains, y_train, goal_ind)
evaluate_and_print_scores(X_train_domains, y_train, X_test_domains, y_test, goal_ind, C=l2_c)
del X_train_domains, X_test_domains
print("Ben-david logic with feature selection intersection")
intersect_inds = np.intersect1d(task_feats_inds, dd_feats_inds)
X_train_intersect = zero_nonpivot_columns(X_train, intersect_inds)
X_test_intersect = zero_nonpivot_columns(X_test, intersect_inds)
(l2_c, l2_f1) = find_best_c(X_train_intersect, y_train, goal_ind)
evaluate_and_print_scores(X_train_intersect, y_train, X_test_intersect, y_test, goal_ind, C=l2_c)
del X_train_intersect, X_test_intersect
## FIXME -- don't add all at once?
print("Balanced bootstrapping method (add equal amounts of true/false examples)")
for percentage in [0.01, 0.1, 0.25]:
svc = svm.LinearSVC()
svc.fit(X_train, y_train)
preds = svc.decision_function(X_test)
added_X = []
added_y = []
for i in range(int(percentage * num_test_instances)):
if i % 2 == 0:
highest_ind = preds.argmax()
if preds[highest_ind] <= 0:
break
else:
highest_ind = preds.argmin()
if preds[highest_ind] >= 0:
break
added_X.append(X_test[highest_ind,:].toarray()[0])
added_y.append(1 if preds[highest_ind] < 0 else 2)
preds[highest_ind] = 0
print("Added %d instances from target dataset" % (len(added_y)))
train_plus_bootstrap_X = np.zeros((num_instances + len(added_y), num_feats))
train_plus_bootstrap_X[:num_instances, :] += X_train
train_plus_bootstrap_X[num_instances:, :] += np.array(added_X)
train_plus_bootstrap_y = np.zeros(num_instances + len(added_y))
train_plus_bootstrap_y[:num_instances] += y_train
train_plus_bootstrap_y[num_instances:] += np.array(added_y)
(l2_c, l2_f1) = find_best_c(train_plus_bootstrap_X, train_plus_bootstrap_y, goal_ind)
evaluate_and_print_scores(train_plus_bootstrap_X, train_plus_bootstrap_y, X_test, y_test, goal_ind, l2_c)
del train_plus_bootstrap_y, train_plus_bootstrap_X
print("Enriching bootstrapping method (add minority class examples only)")
for percentage in [0.01, 0.1, 0.25]:
svc = svm.LinearSVC()
svc.fit(X_train, y_train)
preds = svc.decision_function(X_test)
added_X = []
added_y = []
for i in range(int(percentage * num_test_instances)):
highest_ind = preds.argmax()
if preds[highest_ind] <= 0:
break
added_X.append(X_test[highest_ind,:].toarray()[0])
added_y.append(goal_ind)
preds[highest_ind] = 0
print("Added %d positive instances from target dataset" % (len(added_y)))
train_plus_bootstrap_X = np.zeros((num_instances + len(added_y), num_feats))
train_plus_bootstrap_X[:num_instances, :] += X_train
train_plus_bootstrap_X[num_instances:, :] += np.array(added_X)
train_plus_bootstrap_y = np.zeros(num_instances + len(added_y))
train_plus_bootstrap_y[:num_instances] += y_train
train_plus_bootstrap_y[num_instances:] += np.array(added_y)
(l2_c, l2_f1) = find_best_c(train_plus_bootstrap_X, train_plus_bootstrap_y, goal_ind)
evaluate_and_print_scores(train_plus_bootstrap_X, train_plus_bootstrap_y, X_test, y_test, goal_ind, l2_c)
del train_plus_bootstrap_y, train_plus_bootstrap_X
with open(join(matrix_dir, 'theta_svd.pkl'), 'rb') as theta_file:
theta = pickle.load(theta_file)
num_new_feats = theta.shape[1]
with open(join(matrix_dir, 'theta_full.pkl'), 'rb') as theta_file:
theta_full = pickle.load(theta_file)
num_pivots = theta_full.shape[1]
#print("Pivot-only feature space evaluation")
#evaluate_and_print_scores(pivot_X_train, y_train, pivot_X_test, y_test, goal_ind)
#print("Non-pivot only feature space evaluation")
#evaluate_and_print_scores(nopivot_X_train, y_train, nopivot_X_test, y_test, goal_ind)
#print("New-only feature space evaluation (svd)")
new_X_train = nopivot_X_train * theta
new_X_test = nopivot_X_test * theta
#evaluate_and_print_scores(new_X_train, y_train, new_X_test, y_test, goal_ind)
print("All + new feature space evaluation")
all_plus_new_train = lil_matrix((X_train.shape[0], num_feats + num_new_feats), dtype='float32')
all_plus_new_train[:, :num_feats] += X_train
all_plus_new_train[:, num_feats:] += new_X_train
all_plus_new_test = lil_matrix((X_test.shape[0], num_feats + num_new_feats), dtype='float32')
all_plus_new_test[:, :num_feats] += X_test
all_plus_new_test[:, num_feats:] += new_X_test
(l2_c, l2_f1) = find_best_c(all_plus_new_train, y_train, goal_ind)
evaluate_and_print_scores(all_plus_new_train, y_train, all_plus_new_test, y_test, goal_ind, l2_c)
del all_plus_new_train, all_plus_new_test
print("Pivot + new feature space evaluation")
pivot_plus_new_train = lil_matrix((X_train.shape[0], num_feats + num_new_feats), dtype='float32')
pivot_plus_new_train[:, :num_feats] += pivot_X_train
pivot_plus_new_train[:, num_feats:] += new_X_train
pivot_plus_new_test = lil_matrix((X_test.shape[0], num_feats + num_new_feats), dtype='float32')
pivot_plus_new_test[:, :num_feats] += pivot_X_test
pivot_plus_new_test[:, num_feats:] += new_X_test
(l2_c, l2_f1)= find_best_c(pivot_plus_new_train, y_train, goal_ind)
evaluate_and_print_scores(pivot_plus_new_train, y_train, pivot_plus_new_test, y_test, goal_ind, l2_c)
del pivot_plus_new_train, pivot_plus_new_test
#print("New-only features space evaluation (no svd)")
#pivotpred_X_train = nopivot_X_train * theta_full
#pivotpred_X_test = nopivot_X_test * theta_full
#evaluate_and_print_scores(pivotpred_X_train, y_train, pivotpred_X_test, y_test, goal_ind)
#print("All + no-svd pivot feature space")
# all_plus_pivotpred_train = np.matrix(np.zeros((X_train.shape[0], num_feats + num_pivots)))
# all_plus_pivotpred_train[:, :num_feats] += X_train
# all_plus_pivotpred_train[:, num_feats:] += pivotpred_X_train
# all_plus_pivotpred_test = np.matrix(np.zeros((X_test.shape[0], num_feats + num_pivots)))
# all_plus_pivotpred_test[:, :num_feats] += X_test
# all_plus_pivotpred_test[:, num_feats:] += pivotpred_X_test
# evaluate_and_print_scores(all_plus_pivotpred_train, y_train, all_plus_pivotpred_test, y_test, goal_ind)
# del all_plus_pivotpred_train, all_plus_pivotpred_test
# print("Pivot + pivot prediction space")
# pivot_plus_pivot_pred_train = np.matrix(np.zeros((X_train.shape[0], num_feats + num_pivots)))
# pivot_plus_pivot_pred_train[:,:num_feats] += pivot_X_train
# pivot_plus_pivot_pred_train[:, num_feats:] += pivotpred_X_train
# pivot_plus_pivot_pred_test = np.matrix(np.zeros((X_test.shape[0], num_feats + num_pivots)))
# pivot_plus_pivot_pred_test[:, :num_feats] += pivot_X_test
# pivot_plus_pivot_pred_test[:, num_feats:] += pivotpred_X_test
# evaluate_and_print_scores(pivot_plus_pivot_pred_train, y_train, pivot_plus_pivot_pred_test, y_test, goal_ind)
# del pivot_plus_pivot_pred_train, pivot_plus_pivot_pred_test
print("Original space minus missing target features")
## since X_test is a matrix a slice is a matrix and need to get the 2d array and then grab the 0th row to get a 1d array.
column_sums = abs(X_test).sum(0).A[0,:]
assert len(column_sums) == X_test.shape[1]
zero_columns = np.where(column_sums == 0)[0]
nosrconly_feats_train = lil_matrix(X_train)
nosrconly_feats_train[:, zero_columns] = 0
(l2_c, l2_f1) = find_best_c(nosrconly_feats_train, y_train, goal_ind)
evaluate_and_print_scores(nosrconly_feats_train, y_train, X_test, y_test, goal_ind, l2_c)
del nosrconly_feats_train
## TODO Fix this with feature selection?
#print("Sun et al. 15 UFEDA")
#sample_size = 100
#train_index = min(sample_size, X_train.shape[0])
#test_index = min(sample_size, X_test.shape[0])
#cov_train = np.cov(X_train[:train_index,:].toarray(), rowvar=False) + np.eye(X_train.shape[1])
#cov_test = np.cov(X_test[:test_index,:].toarray(), rowvar=False) + np.eye(X_test.shape[1])
#whitened_train = X_train * cov_train**-0.5
#recolored_train = whitened_train * cov_test.transpose()**0.5
#evaluate_and_print_scores(recolored_train, y_train, X_test, y_test, 2)
print("Yu and Jiang method (50 similarity features)")
num_exemplars = 50
indices = np.sort(np.random.choice(num_test_instances, num_exemplars, replace=False))
test_exemplars = X_test[indices]
## Normalize
test_exemplars /= test_exemplars.sum(1)
## Create a new feature for every train instance that is the similarity with each of these exemplars:
## Output matrix is num_train_instances x num_exemplars. add this to end of X_train:
similarity_features_train = X_train * test_exemplars.transpose()
similarity_features_test = X_test * test_exemplars.transpose()
all_plus_sim_X_train = lil_matrix((num_instances, num_feats + num_exemplars), dtype='float32')
all_plus_sim_X_train[:, :num_feats] += X_train
all_plus_sim_X_train[:, num_feats:] += similarity_features_train
all_plus_sim_X_test = lil_matrix((num_test_instances, num_feats + num_exemplars), dtype='float32')
all_plus_sim_X_test[:, :num_feats] += X_test
all_plus_sim_X_test[:,num_feats:] += similarity_features_test
(l2_c, l2_f1) = find_best_c(all_plus_sim_X_train, y_train, goal_ind)
evaluate_and_print_scores(all_plus_sim_X_train, y_train, all_plus_sim_X_test, y_test, goal_ind, l2_c)
del all_plus_sim_X_train, all_plus_sim_X_test
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| |
import json
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.urlresolvers import reverse
from olympia.addons.models import Addon
from olympia.compat.models import CompatReport, CompatTotals
# This is the structure sent to /compatibility/incoming from the ACR.
incoming_data = {
'appBuild': '20110429030623',
'appGUID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '6.0a1',
'clientOS': 'Intel Mac OS X 10.6',
'comments': 'what the what',
'guid': 'jid0-VsMuA0YYTKCjBh5F0pxHAudnEps@jetpack',
'otherAddons': [['yslow@yahoo-inc.com', '2.1.0']],
'version': '2.2',
'worksProperly': False,
}
class TestCompatReportModel(TestCase):
def test_none(self):
eq_(CompatReport.get_counts('xxx'), {'success': 0, 'failure': 0})
def test_some(self):
guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
CompatReport.objects.create(guid=guid, works_properly=True)
CompatReport.objects.create(guid=guid, works_properly=True)
CompatReport.objects.create(guid=guid, works_properly=False)
CompatReport.objects.create(guid='ballin', works_properly=True)
CompatReport.objects.create(guid='ballin', works_properly=False)
eq_(CompatReport.get_counts(guid), {'success': 2, 'failure': 1})
class TestIndex(TestCase):
# TODO: Test valid version processing here.
def setUp(self):
super(TestIndex, self).setUp()
self.url = reverse('compat.index', args=[amo.COMPAT[0]['main']])
CompatTotals.objects.create(app=1, total=1)
def test_no_version_redirect(self):
res = self.client.get(reverse('compat.index'))
self.assert3xx(res, self.url)
def test_previous_version_link(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)
self.assertUrlEqual(doc('h2.c a').attr('href'),
'{url}?page=1&previous=1'.format(url=self.url))
def test_previous_version_link_with_active_pagination(self):
# The current pagination is not kept when we switch to previous
# versions. See 1056022.
r = self.client.get(self.url, {'page': 2, 'type': 'all'})
eq_(r.status_code, 200)
doc = pq(r.content)
self.assertUrlEqual(
doc('h2.c a').attr('href'),
'{url}?type=all&page=1&previous=1'.format(url=self.url))
class TestIncoming(TestCase):
def setUp(self):
super(TestIncoming, self).setUp()
self.url = reverse('compat.incoming')
self.data = dict(incoming_data)
self.json = json.dumps(self.data)
def test_success(self):
count = CompatReport.objects.count()
r = self.client.post(self.url, self.json,
content_type='application/json')
eq_(r.status_code, 204)
eq_(CompatReport.objects.count(), count + 1)
cr = CompatReport.objects.order_by('-id')[0]
eq_(cr.app_build, incoming_data['appBuild'])
eq_(cr.app_guid, incoming_data['appGUID'])
eq_(cr.works_properly, incoming_data['worksProperly'])
eq_(cr.comments, incoming_data['comments'])
eq_(cr.client_ip, '127.0.0.1')
# Check that the other_addons field is stored as json.
vals = CompatReport.objects.filter(id=cr.id).values('other_addons')
eq_(vals[0]['other_addons'],
json.dumps(incoming_data['otherAddons'], separators=(',', ':')))
def test_bad_json(self):
r = self.client.post(self.url, 'wuuu#$',
content_type='application/json')
eq_(r.status_code, 400)
def test_bad_field(self):
self.data['save'] = 1
js = json.dumps(self.data)
r = self.client.post(self.url, js, content_type='application/json')
eq_(r.status_code, 400)
class TestReporter(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporter, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.url = reverse('compat.reporter') + '?guid={0}'
def test_success(self):
r = self.client.get(reverse('compat.reporter'))
eq_(r.status_code, 200)
def test_redirect(self):
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
expected = reverse('compat.reporter_detail', args=[self.addon.guid])
self.assert3xx(
self.client.get(self.url.format(self.addon.id)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.slug)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid[:5])), expected)
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_redirect_for_authorized(self):
"""Can display the reports for an unlisted addon if authorized."""
self.addon.update(is_listed=False)
self.test_redirect()
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_redirect_for_unauthorized(self):
"""If the user isn't authorized, don't redirect to unlisted addon."""
self.addon.update(is_listed=False)
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
assert self.client.get(
self.url.format(self.addon.id)).status_code == 200
assert self.client.get(
self.url.format(self.addon.slug)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid[:5])).status_code == 200
def test_unlisted_addons_listed_in_left_sidebar(self):
"""Display unlisted addons in the 'reports for your add-ons' list."""
self.addon.update(is_listed=False)
self.client.login(username='del@icio.us', password='password')
response = self.client.get(reverse('compat.reporter'))
assert self.addon in response.context['addons']
class TestReporterDetail(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporterDetail, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('compat.reporter_detail', args=[self.addon.guid])
self.reports = []
def _generate(self):
apps = [
(amo.FIREFOX.guid, '10.0.1', True), # 0
(amo.FIREFOX.guid, '10.0a1', True), # 1
(amo.FIREFOX.guid, '10.0', False), # 2
(amo.FIREFOX.guid, '6.0.1', False), # 3
(amo.THUNDERBIRD.guid, '10.0', True), # 4
(amo.THUNDERBIRD.guid, '6.6.3', False), # 5
(amo.THUNDERBIRD.guid, '6.0.1', False), # 6
(amo.SEAMONKEY.guid, '2.3.0', False), # 7
(amo.SEAMONKEY.guid, '2.3a1', False), # 8
(amo.SEAMONKEY.guid, '2.3', False), # 9
]
for app_guid, app_version, works_properly in apps:
report = CompatReport.objects.create(guid=self.addon.guid,
app_guid=app_guid,
app_version=app_version,
works_properly=works_properly)
self.reports.append(report.pk)
def check_table(self, data={}, good=0, bad=0, appver=None, report_pks=[]):
r = self.client.get(self.url, data)
eq_(r.status_code, 200)
# Check that we got the correct reports.
eq_(sorted(r.id for r in r.context['reports'].object_list),
sorted(self.reports[pk] for pk in report_pks))
doc = pq(r.content)
eq_(doc('.compat-info tbody tr').length, good + bad)
reports = doc('#reports')
if good == 0 and bad == 0:
eq_(reports.find('.good, .bad').length, 0)
eq_(doc('.no-results').length, 1)
else:
# Check "X success reports" and "X failure reports" buttons.
eq_(reports.find('.good').text().split()[0], str(good))
eq_(reports.find('.bad').text().split()[0], str(bad))
# Check "Filter by Application" field.
eq_(doc('#compat-form select[name=appver] option[selected]').val(),
appver)
return r
def test_appver_all(self):
self._generate()
self.check_table(
good=3, bad=7, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_firefox_single(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id, '6.0')
self.check_table(data={'appver': appver}, good=0, bad=1, appver=appver,
report_pks=[3])
def test_firefox_multiple(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id, '10.0')
self.check_table(data={'appver': appver}, good=2, bad=1, appver=appver,
report_pks=[0, 1, 2])
def test_firefox_empty(self):
self._generate()
appver = '%s-%s' % (amo.FIREFOX.id,
amo.COMPAT[0]['main']) # Firefox 11.
self.check_table(data={'appver': appver}, good=0, bad=0, appver=appver,
report_pks=[])
def test_firefox_unknown(self):
self._generate()
# If we have a bad app/version combination, we don't apply any filters.
appver = '%s-%s' % (amo.FIREFOX.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_thunderbird_multiple(self):
self._generate()
appver = '%s-%s' % (amo.THUNDERBIRD.id, '6.0')
self.check_table(data={'appver': appver}, good=0, bad=2, appver=appver,
report_pks=[5, 6])
def test_thunderbird_unknown(self):
self._generate()
appver = '%s-%s' % (amo.THUNDERBIRD.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_seamonkey_multiple(self):
self._generate()
appver = '%s-%s' % (amo.SEAMONKEY.id, '2.3')
self.check_table(data={'appver': appver}, good=0, bad=3, appver=appver,
report_pks=[7, 8, 9])
def test_seamonkey_unknown(self):
self._generate()
appver = '%s-%s' % (amo.SEAMONKEY.id, '0.9999')
self.check_table(
data={'appver': appver}, good=3, bad=7,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_app_unknown(self):
# Testing for some unknown application such as 'Conkeror'.
app_guid = '{a79fe89b-6662-4ff4-8e88-09950ad4dfde}'
report = CompatReport.objects.create(
guid=self.addon.guid, app_guid=app_guid, app_version='0.9.3',
works_properly=True)
self.reports.append(report.pk)
r = self.check_table(good=1, bad=0, appver='', report_pks=[0])
msg = 'Unknown (%s)' % app_guid
assert msg in r.content, 'Expected %s in body' % msg
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_details_for_authorized(self):
"""If the user is authorized, display the reports."""
self.addon.update(is_listed=False)
self._generate()
self.check_table(
good=3, bad=7, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_details_for_unauthorized(self):
"""If the user isn't authorized, don't display the reports."""
self.addon.update(is_listed=False)
self._generate()
self.check_table(
good=0, bad=0, appver='',
report_pks=[])
| |
# Copyright 2013 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance.
=======================================================================
.. automodule:: nexenta.nfs
.. moduleauthor:: Mikhail Khodos <hodosmb@gmail.com>
.. moduleauthor:: Victor Rodionov <victor.rodionov@nexenta.com>
"""
import hashlib
import os
import re
from oslo_utils import units
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.1.3'
LOG = logging.getLogger(__name__)
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
1.1.3 - Redefined volume_backend_name attribute inherited from
RemoteFsDriver.
"""
driver_prefix = 'nexenta'
volume_backend_name = 'NexentaNfsDriver'
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTIONS)
conf = self.configuration
self.nms_cache_volroot = conf.nexenta_nms_cache_volroot
self._nms2volroot = {}
self.share2nms = {}
def do_setup(self, context):
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
self._share_folder(nms, volume_name, dataset)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_volume_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
compression = nms.folder.get('compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set('compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set('compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
except nexenta.NexentaException as exc:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except nexenta.NexentaException:
LOG.warning(_LW("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise exc
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except nexenta.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except nexenta.NexentaException:
LOG.warning(_LW("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'name': self._get_clone_snapshot_name(volume)}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
return self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
LOG.error(_LE('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_LW('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
super(NexentaNfsDriver, self).delete_volume(volume)
nfs_share = volume.get('provider_location')
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
props = nms.folder.get_child_props(folder, 'origin') or {}
try:
nms.folder.destroy(folder, '-r')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Folder %s does not exist, it was '
'already deleted.'), folder)
return
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
try:
nms.snapshot.destroy(origin, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it was '
'already deleted.'), origin)
return
raise
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_LI('Snapshot %s does not exist, it was '
'already deleted.'), '%s@%s' % (folder, snapshot))
return
raise
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
nms.appliance.execute(
'truncate --size %(size)dG %(path)s' % {
'path': path,
'size': size
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_LI('Creating regular file: %s.'
'This may take some time.') % path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_LI('Regular file: %s created.') % path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = re.split(r'\s+', share, 2)
share_address = share_info[0].strip().decode('unicode_escape')
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
if not re.match(r'.+:/.+', share_address):
LOG.warn("Share %s ignored due to invalid format. Must be of "
"form address:/export." % share_address)
continue
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug('Shares loaded: %s' % self.shares)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder), '')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path =\
utils.parse_nms_url(url)
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
def _get_clone_snapshot_name(self, volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
def _is_clone_snapshot_name(self, snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
| |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""mock_protobuf.py: provides methods to create dummy protobuf message for testing
This module is used mainly for testing Python Heron Instance as well as common modules
written in Python.
"""
from heron.proto import stmgr_pb2, physical_plan_pb2, topology_pb2, common_pb2
from heronpy.api.serializer import PythonSerializer
# pylint: disable=dangerous-default-value
# pylint: disable=redefined-builtin
def get_mock_config(config_dict=None):
"""Returns a protobuf Config object from topology_pb2"""
if config_dict is None:
return topology_pb2.Config()
proto_config = topology_pb2.Config()
config_serializer = PythonSerializer()
assert isinstance(config_dict, dict)
for key, value in list(config_dict.items()):
if isinstance(value, bool):
kvs = proto_config.kvs.add()
kvs.key = key
kvs.value = "true" if value else "false"
kvs.type = topology_pb2.ConfigValueType.Value("STRING_VALUE")
elif isinstance(value, (str, int, float)):
kvs = proto_config.kvs.add()
kvs.key = key
kvs.value = str(value)
kvs.type = topology_pb2.ConfigValueType.Value("STRING_VALUE")
else:
kvs = proto_config.kvs.add()
kvs.key = key
kvs.serialized_value = config_serializer.serialize(value)
kvs.type = topology_pb2.ConfigValueType.Value("PYTHON_SERIALIZED_VALUE")
return proto_config
def get_mock_component(name="component_name",
config=get_mock_config(),
python_cls="heron.instance.src.python.example.word_spout.WordSpout"):
"""Returns a mock protobuf Component object from topology_pb2"""
component = topology_pb2.Component()
component.name = name
component.spec = topology_pb2.ComponentObjectSpec.Value("PYTHON_CLASS_NAME")
component.class_name = python_cls
component.config.CopyFrom(config)
return component
def get_mock_stream_id(id="stream_id", component_name="component_name"):
"""Returns a mock protobuf StreamId from topology_pb2"""
stream_id = topology_pb2.StreamId()
stream_id.id = id
stream_id.component_name = component_name
return stream_id
def get_mock_bolt(component=get_mock_component(), inputs=[], outputs=[]):
"""Returns a mock protobuf Bolt object from topology_pb2"""
bolt = topology_pb2.Bolt()
bolt.comp.CopyFrom(component)
for i in inputs:
added = bolt.inputs.add()
added.CopyFrom(i)
for o in outputs:
added = bolt.outputs.add()
added.CopyFrom(o)
return bolt
def get_mock_spout(component=get_mock_component(), outputs=[]):
"""Returns a mock protobuf Spout object from topology_pb2"""
spout = topology_pb2.Spout()
spout.comp.CopyFrom(component)
for out in outputs:
added = spout.outputs.add()
added.CopyFrom(out)
return spout
def get_mock_topology(id="topology_id", name="topology_name", state=1, spouts=[], bolts=[]):
"""Returns a mock protobuf Topology object from topology_pb2"""
# topology
topology = topology_pb2.Topology()
topology.id = id
topology.name = name
topology.state = state
for sp in spouts:
added = topology.spouts.add()
added.CopyFrom(sp)
for bl in bolts:
added = topology.bolts.add()
added.CopyFrom(bl)
return topology
def get_mock_stmgr(id="Stmgr_id", host="127.0.0.1", port=9999, endpoint="hello"):
"""Returns a mock protobuf StMgr object from physical_plan_pb2"""
stmgr = physical_plan_pb2.StMgr()
stmgr.id = id
stmgr.host_name = host
stmgr.data_port = port
stmgr.local_endpoint = endpoint
return stmgr
def get_mock_instance_info(task_id=123, component_index=23, component_name="hello"):
"""Returns a mock protobuf InstanceInfo object from physical_plan_pb2"""
# instance info
instance_info = physical_plan_pb2.InstanceInfo()
instance_info.task_id = task_id
instance_info.component_index = component_index
instance_info.component_name = component_name
return instance_info
def get_mock_instance(instance_id="instance_id",
stmgr_id="Stmgr_id",
info=get_mock_instance_info()):
"""Returns a mock protobuf Instance object from physical_plan_pb2"""
instance = physical_plan_pb2.Instance()
instance.instance_id = instance_id
instance.stmgr_id = stmgr_id
instance.info.CopyFrom(info)
return instance
def get_mock_pplan(topology=get_mock_topology(),
stmgrs=[],
instances=[]):
"""Returns a mock protobuf PhysicalPlan object from physical_plan_pb2"""
pplan = physical_plan_pb2.PhysicalPlan()
pplan.topology.MergeFrom(topology)
if len(stmgrs) == 0:
stmgrs.append(get_mock_stmgr())
if len(instances) == 0:
instances.append(get_mock_instance())
for stmgr in stmgrs:
added = pplan.stmgrs.add()
added.CopyFrom(stmgr)
for instance in instances:
added = pplan.instances.add()
added.CopyFrom(instance)
return pplan
def get_mock_status(status="OK", message="OK Message"):
"""Returns a mock protobuf Status object from common_pb2"""
mock_status = common_pb2.Status()
mock_status.status = common_pb2.StatusCode.Value(status)
mock_status.message = message
return mock_status
def get_mock_assignment_message(pplan=get_mock_pplan()):
"""Returns a mock protobuf NewInstanceAssignmentMessage object from stmgr_pb2"""
# message
mock_message = stmgr_pb2.NewInstanceAssignmentMessage()
mock_message.pplan.MergeFrom(pplan)
return mock_message
def get_mock_register_response(status=get_mock_status(), pplan=get_mock_pplan()):
"""Returns a mock protobuf RegisterInstanceResponse object from stmgr_pb2"""
mock_response = stmgr_pb2.RegisterInstanceResponse()
mock_response.status.MergeFrom(status)
mock_response.pplan.MergeFrom(pplan)
return mock_response
#####
def get_pplan_builder_and_typename():
"""Returns a PhysicalPlan builder callable and typename 'PhysicalPlan'"""
# pylint: disable=unnecessary-lambda
builder = lambda: physical_plan_pb2.PhysicalPlan()
typename = builder().DESCRIPTOR.full_name
return builder, typename
def get_many_mock_pplans():
"""Returns a list of 10 PhysicalPlan objects, differing just by stream manager id"""
pplans_lst = []
for i in range(10):
_id = "Stmgr-" + str(i)
pplan = get_mock_pplan(stmgrs=[get_mock_stmgr(id=_id)])
pplans_lst.append(pplan)
return pplans_lst
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""mel conversion ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# mel spectrum constants.
_MEL_BREAK_FREQUENCY_HERTZ = 700.0
_MEL_HIGH_FREQUENCY_Q = 1127.0
def _mel_to_hertz(mel_values, name=None):
"""Converts frequencies in `mel_values` from the mel scale to linear scale.
Args:
mel_values: A `Tensor` of frequencies in the mel scale.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type as `mel_values` containing linear
scale frequencies in Hertz.
"""
with ops.name_scope(name, 'mel_to_hertz', [mel_values]):
mel_values = ops.convert_to_tensor(mel_values)
return _MEL_BREAK_FREQUENCY_HERTZ * (
math_ops.exp(mel_values / _MEL_HIGH_FREQUENCY_Q) - 1.0
)
def _hertz_to_mel(frequencies_hertz, name=None):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale.
Args:
frequencies_hertz: A `Tensor` of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A `Tensor` of the same shape and type of `frequencies_hertz` containing
frequencies in the mel scale.
"""
with ops.name_scope(name, 'hertz_to_mel', [frequencies_hertz]):
frequencies_hertz = ops.convert_to_tensor(frequencies_hertz)
return _MEL_HIGH_FREQUENCY_Q * math_ops.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))
def _validate_arguments(num_mel_bins, num_spectrogram_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype):
"""Checks the inputs to linear_to_mel_weight_matrix."""
if num_mel_bins <= 0:
raise ValueError('num_mel_bins must be positive. Got: %s' % num_mel_bins)
if num_spectrogram_bins <= 0:
raise ValueError('num_spectrogram_bins must be positive. Got: %s' %
num_spectrogram_bins)
if sample_rate <= 0.0:
raise ValueError('sample_rate must be positive. Got: %s' % sample_rate)
if lower_edge_hertz < 0.0:
raise ValueError('lower_edge_hertz must be non-negative. Got: %s' %
lower_edge_hertz)
if lower_edge_hertz >= upper_edge_hertz:
raise ValueError('lower_edge_hertz %.1f >= upper_edge_hertz %.1f' %
(lower_edge_hertz, upper_edge_hertz))
if upper_edge_hertz > sample_rate / 2:
raise ValueError('upper_edge_hertz must not be larger than the Nyquist '
'frequency (sample_rate / 2). Got: %s for sample_rate: %s'
% (upper_edge_hertz, sample_rate))
if not dtype.is_floating:
raise ValueError('dtype must be a floating point type. Got: %s' % dtype)
def linear_to_mel_weight_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
sample_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype=dtypes.float32,
name=None):
"""Returns a matrix to warp linear scale spectrograms to the [mel scale][mel].
Returns a weight matrix that can be used to re-weight a `Tensor` containing
`num_spectrogram_bins` linearly sampled frequency information from
`[0, sample_rate / 2]` into `num_mel_bins` frequency information from
`[lower_edge_hertz, upper_edge_hertz]` on the [mel scale][mel].
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a "mel spectrogram"
`M` of shape `[frames, num_mel_bins]`.
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
The matrix can be used with @{tf.tensordot} to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = tf.tensordot(S, A, 1)
# tf.tensordot does not support shape inference for this case yet.
M.set_shape(S.shape[:-1].concatenate(A.shape[-1:]))
Args:
num_mel_bins: Python int. How many bands in the resulting mel spectrum.
num_spectrogram_bins: Python int. How many bins there are in the source
spectrogram data, which is understood to be `fft_size // 2 + 1`, i.e. the
spectrogram only contains the nonredundant FFT bins.
sample_rate: Python float. Samples per second of the input signal used to
create the spectrogram. We need this to figure out the actual frequencies
for each spectrogram bin, which dictates how they are mapped into the mel
scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower edge of the
lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point type.
name: An optional name for the operation.
Returns:
A `Tensor` of shape `[num_spectrogram_bins, num_mel_bins]`.
Raises:
ValueError: If num_mel_bins/num_spectrogram_bins/sample_rate are not
positive, lower_edge_hertz is negative, frequency edges are incorrectly
ordered, or upper_edge_hertz is larger than the Nyquist frequency.
[mel]: https://en.wikipedia.org/wiki/Mel_scale
"""
with ops.name_scope(name, 'linear_to_mel_weight_matrix') as name:
_validate_arguments(num_mel_bins, num_spectrogram_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
# To preserve accuracy, we compute the matrix at float64 precision and then
# cast to `dtype` at the end. This function can be constant folded by graph
# optimization since there are no Tensor inputs.
sample_rate = ops.convert_to_tensor(
sample_rate, dtypes.float64, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
lower_edge_hertz, dtypes.float64, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
upper_edge_hertz, dtypes.float64, name='upper_edge_hertz')
zero_float64 = ops.convert_to_tensor(0.0, dtypes.float64)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
zero_float64, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = shape_ops.frame(
math_ops.linspace(_hertz_to_mel(lower_edge_hertz),
_hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2), frame_length=3, frame_step=1)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(array_ops.reshape(
t, [1, num_mel_bins]) for t in array_ops.split(
band_edges_mel, 3, axis=1))
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel)
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
zero_float64, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
mel_weights_matrix = array_ops.pad(
mel_weights_matrix, [[bands_to_zero, 0], [0, 0]])
# Cast to the desired type.
return math_ops.cast(mel_weights_matrix, dtype, name=name)
| |
from collections import OrderedDict
def bl_components_glossary(name=None,return_list=False):
if return_list:
list1=[
"IC_PhotonBeamPencil",
"IC_DriftSpace",
"IC_Lens",
"BC_ElectronBeamGaussian",
"BC_BendingMagnet",
"BC_InsertionDevice",
"BC_Slit",
"BC_OpticalSurface",
"BC_Attenuator",
"BC_Mirror",
"BC_PerfectCrystal"]
return list1
if name == None:
return None
a = OrderedDict()
if name == "IC_PhotonBeamPencil":
a["__name"] = "IC_PhotonBeam"
a["__title"] = "Photon pencil beam"
a["__labels"] = ["Minimum photon energy [eV]", "Minimum photon energy [eV]"]
a["__flags"] = ["True","True"]
a["__help"] = ["Photon energy in eV","Photon energy in eV"]
a["energyMin"] = 1000.0
a["energyMax"] = 100000.0
return a
elif name == "IC_DriftSpace":
a["__name"] = "IC_DriftSpace"
a["__title"] = "Drift space in vacuum"
a["__labels"] = ["Length"]
a["__flags"] = ["True"]
a["__help"] = ["Drift space length"]
a["d"] = 0.0
elif name == "IC_Lens":
a["__name"] = "IC_Lens"
a["__title"] = "Ideal lens"
a["__labels"] = ["Focal length in horizontal", "Focal length in vertical"]
a["__flags"] = ["True","True"]
a["__help"] = ["Focal length in the horizontal plane","Focal length in the vertical plane"]
a["FH"] = 1.0
a["FV"] = 1.0
#TODO] = add gamma
elif name == "BC_ElectronBeamGaussian":
a["__name"] = "BC_ElectronBeamGaussian"
a["__title"] = "Gaussian electron beam"
a["__labels"] = [
"Electron Energy in the storage ring",
"Electron current intensity [A]",
"Orbit offset (x,x',y,y',s,delta) from where initial conditions are defined",
"Type of description",
"Spread RMS of the energy of the electrons",
"Horizontal emittance",
"Vertical emittance",
"Beta function (Horizontal)",
"Beta function (Vertical)",
"Alpha function (Horizontal)",
"Alpha function (Vertical)",
"Bunch length",
"Dispersion (Horizontal)",
"Dispersion (Vertical)",
"Dispersion Derivative (Horizontal)",
"Dispersion Derivative (Vertical)",
"Sigma matrix",
"M matrix"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = [" "] * len(a["__labels"])
a["ElectronEnergy"] = 1.0
a["ElectronCurrent"] = 0.1
a["OrbitOffset"] = "[0.0,0.0,0.0,0.0,0.0,0.0]"
a["InputType"] = ["0","Twiss description","Full description"]
a["ElectronEnergySpread"] = 0.0
a["EmittanceH"] = 0.0
a["EmittanceV"] = 0.0
a["BetaH"] = 0.0
a["BetaV"] = 0.0
a["AlphaH"] = 0.0
a["AlphaV"] = 0.0
a["BunchLength"] = 0.0
a["DispersionH"] = 0.0
a["DispersionV"] = 0.0
a["DispersionDerivH"] = 0.0
a["DispersionDerivV"] = 0.0
a["SigmaMatrix"] = """[ [0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0] ]"""
a["Mmatrix"] = """[ [0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0],\
[0.0,0.0,0.0,0.0,0.0,0.0] ]"""
elif name == "BC_BendingMagnet":
a["__name"] = "BC_BendingMagnet"
a["__title"] = "_Bending Magnet source"
a["__labels"] = ["Bending magnet magnetic field [T}",
"File with magnetic field errors",
"Length (angular) of the BM [mrad]"]
a["__flags"] = ["True","True","True"]
a["__help"] = ["Photon energy in eV","Photon energy in eV"]
a["MagneticField"] = 1.0
a["MagneticFieldErrors"] = "myfile.dat"
a["HorizontalArc"] = 1.0
elif name == "BC_InsertionDevice":
a["__name"] = "InsertionDevice"
a["__title"] = "Insertion Device source"
a["__labels"] = ["Type of ID",
"B from",
"ID period [m]",
"Number of periods",
"K value (Horizontal)",
"K value (vertical)",
"Phase between H and V magnets",
"Gap taper (Horizontal)",
"Gap taper (vertical)",
"File with harmonics",
"File with magnetic field"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = [" "] * len(a["__labels"])
a["IDType"] = ["0","Wiggler","Undulator"]
a["InputType"] = ["0","K values","B from harmonics","B from table"]
a["PeriodID"] = 1.0
a["N"] = 100
a["phase"] = 0.0
a["taperH"] = 0.0
a["taperV"] = 0.0
a["Bharmonics"] = "myfile.dat"
a["Btable"] = "myfile.dat"
elif name == "BC_Slit":
a["__name"] = "BC_Slit"
a["__title"] = "Slit or aperture"
a["__labels"] = ["Center (Horizontal)",
"Center (Vertical)",
"Shape",
"aperture of beam stop",
"gap/obstruction (Horizontal) [m]",
"gap/obstruction (vertical) [m]",
"polygone coordinates (Horizontal)",
"polygone coordinates (Vertical)"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = [" "] * len(a["__labels"])
a["centerH"] = 0.0
a["centerV"] = 0.0
a["shape"] = ["0","None (fully opened)","rectangular","elliptical","free form (polygon"]
a["Stop"] = ["0","Aperture/slit","beam stop"]
a["gapH"] = 1.0
a["gapV"] = 1.0
a["coorH"] = 0.0
a["coorV"] = 0.0
elif name == "BC_OpticalSurface":
a["__name"] = "BC_OpticalSurface"
a["__title"] = "Optical Surface (form)"
a["__labels"] = ["Limits",
"length [m]",
"Width [m]",
"shape",
"coeff",
"Geometry"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = [" "] * len(a["__labels"])
a["limits"] = ["0","Infinite surface","rectangle","ellipse","free form"]
a["length"] = 1.0
a["width"] = 1.0
a["oeshape"] = ["0","Plane","Conic coefficients","Sphere (conic)","Ellipsoid (conic)","paraboloid (conic)","hyperboloid (conic)","Toroid","Free (mesh)","Free (polynomial"]
a["conicCoeff"] = "[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]"
a["geometry"] = ["0","reflecting (e.g. mirrors)","transmitting (e.g., lenses, Laue crystals)","both (e.g., diamond crystals, beamsplitters)"]
elif name == "BC_Attenuator":
a["__name"] = "BC_Attenuator"
a["__title"] = "BC Attenuator"
a["__labels"] = ["material",
"thickness [m]",
"material density [g/cm^3]"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = ["String describing the material (e.g., Cu, H2O, etc)."
"Attenuator thickness",
"material thickness"]
a["material"] = "Si"
a["thickness"] = 1.0e-3
a["density"] = 1.0
elif name == "BC_Mirror":
a["__name"] = "BC_Mirror"
a["__title"] = "Mirror"
a["__labels"] = ["coating material",
"coating thickness",
"coating density"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = ["String describing the coating material (e.g., Cu, B4C, etc).",
"Coating thickness [m]",
"Coating density [g/cm^3]"]
a["coating"] = "Rh"
a["thickness"] = 1e-6
a["density"] = 10.0
elif name == "BC_PerfectCrystal":
a["__name"] = "BC_PerfectCrystal"
a["__title"] = "Perfect Crystal"
a["__labels"] = ["material",
"thickness",
"crystallographic cell parameters",
"number of atoms in unit cell",
"atomic number of atoms in unic cell",
"coordinates of atoms in crystallographic cell",
"occupancy",
"temperature at which unit cell is given [K]",
"Crystal temperature [K]",
"Miller indices",
"Asymmetry angle [deg]"]
a["__flags"] = ["True"]* len(a["__labels"])
a["__help"] = ["String describing the coating material (e.g., Si, Quartz, etc).",
"Crystal thickness [m]",
"Crystallographic cell parameters a,b,c,alpha,beta,gamma [A,deg]",
"Number of atoms in unit cell",
"Atomic number of atoms in unit cell",
"Coordinates of atoms in crystallographic cell",
"Occupancy coeff of atoms in unit cell",
"Temperature at which unit cell is given [K]",
"Crystal temperature [K]",
"Miller indices of selected reflection",
"Asymmetry angle [deg]"]
a["material"] = "Si"
a["thickness"] = 100e-6
a["cell"] = "[5.430700,5.430700,5.430700,90,90,90]"
a["Natoms"] = 8
a["Zatoms"] = "[14,14,14,14,14,14,14,14]"
#a["XYZ"] = "[0.000000,0.000000,0.000000] "
a["XYZ"] = """[ [0.000000,0.000000,0.000000],\
[0.000000,0.500000,0.500000],\
[0.500000,0.000000,0.500000],\
[0.500000,0.500000,0.000000],\
[0.250000,0.250000,0.250000],\
[0.250000,0.750000,0.750000],\
[0.750000,0.250000,0.750000],\
[0.750000,0.750000,0.250000] ]"""
a["occupancy"] = "[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]"
a["Temperature0"] = 300.0
a["Temperature"] = 330.0
a["Miller"] = "[1,1,1]"
a["AsymmetryAngle"] = 0.0
#TODO] =
# add BC_Multilayer, BC_LensSingle, Compound elements...
#
# list all non-empty keywords
#
return a
if __name__ == "__main__":
list1 = bl_components_glossary(return_list=True)
for k in list1:
print ("-----------------------%s ------------------------------"%(k))
tmp = bl_components_glossary(k)
for i,j in tmp.items():
#print ("**%s** " % (i[:2]) )
#if (i[:2] != "__"):
print ("%s = %s" % (i,j))
print (" ")
| |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import functools
import imath
import IECore
import Gaffer
import GafferUI
## Appends items to the IECore.MenuDefinition object passed to build a File menu containing
# standard open/save/revert/etc
def appendDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/New", { "command" : new, "shortCut" : "Ctrl+N" } )
menuDefinition.append( prefix + "/Open...", { "command" : open, "shortCut" : "Ctrl+O" } )
menuDefinition.append( prefix + "/Open Recent", { "subMenu" : openRecent } )
menuDefinition.append( prefix + "/OpenDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Save", { "command" : save, "shortCut" : "Ctrl+S" } )
menuDefinition.append( prefix + "/Save As...", { "command" : saveAs, "shortCut" : "Shift+Ctrl+S" } )
menuDefinition.append( prefix + "/Revert To Saved", { "command" : revertToSaved, "active" : __revertToSavedAvailable } )
menuDefinition.append( prefix + "/SaveDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Export Selection...", { "command" : exportSelection, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Import...", { "command" : importFile } )
menuDefinition.append( prefix + "/ImportExportDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Settings...", { "command" : showSettings } )
## A function suitable as the command for a File/New menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def new( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
newScript = Gaffer.ScriptNode()
Gaffer.NodeAlgo.applyUserDefaults( newScript )
application["scripts"].addChild( newScript )
## A function suitable as the command for a File/Open menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def open( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
path, bookmarks = __pathAndBookmarks( scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Open script", confirmLabel="Open", valid=True, leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
__open( scriptWindow.scriptNode(), str( path ) )
## Opens a script and adds it to the application, as if the user
# had done so via the File Open dialogue.
#
# :param asNew When true, the scripts file name/dirty state will be reset
# upon load. Effectively creating an untitled copy.
def addScript( application, fileName, asNew = False ) :
return __addScript( application, fileName, asNew = asNew )
def __addScript( application, fileName, dialogueParentWindow = None, asNew = False ) :
recoveryFileName = None
backups = GafferUI.Backups.acquire( application, createIfNecessary = False )
if backups is not None :
recoveryFileName = backups.recoveryFile( fileName )
if recoveryFileName :
dialogue = GafferUI.ConfirmationDialogue(
title = "Backup Available",
message = "A more recent backup is available. Open backup instead?",
confirmLabel = "Open Backup",
cancelLabel = "Open",
)
useBackup = dialogue.waitForConfirmation( parentWindow = dialogueParentWindow )
if useBackup is None :
return
elif not useBackup :
recoveryFileName = None
dialogue.setVisible( False )
script = Gaffer.ScriptNode()
script["fileName"].setValue( recoveryFileName or fileName )
with GafferUI.ErrorDialogue.ErrorHandler(
title = "Errors Occurred During Loading",
closeLabel = "Oy vey",
parentWindow = dialogueParentWindow
) :
script.load( continueOnError = True )
if asNew or recoveryFileName :
# If we loaded a backup (or as new), rename the script to the old
# filename (or nothing) so the user can resave and continue as before.
script["fileName"].setValue( "" if asNew else fileName )
script["unsavedChanges"].setValue( True )
application["scripts"].addChild( script )
if not asNew:
addRecentFile( application, fileName )
return script
def __open( currentScript, fileName ) :
application = currentScript.ancestor( Gaffer.ApplicationRoot )
currentWindow = GafferUI.ScriptWindow.acquire( currentScript )
script = __addScript( application, fileName, dialogueParentWindow = currentWindow )
if not script :
return
removeCurrentScript = False
if not currentScript["fileName"].getValue() and not currentScript["unsavedChanges"].getValue() :
# the current script is empty - the user will think of the operation as loading
# the new script into the current window, rather than adding a new window. so make it
# look like that.
newWindow = GafferUI.ScriptWindow.acquire( script )
## \todo We probably want a way of querying and setting geometry in the public API
newWindow._qtWidget().restoreGeometry( currentWindow._qtWidget().saveGeometry() )
currentWindow.setVisible( False )
removeCurrentScript = True
# We must defer the removal of the old script because otherwise we trigger a crash bug
# in PySide - I think this is because the menu item that invokes us is a child of
# currentWindow, and that will get deleted immediately when the script is removed.
if removeCurrentScript :
GafferUI.EventLoop.addIdleCallback( functools.partial( __removeScript, application, currentScript ) )
def __removeScript( application, script ) :
application["scripts"].removeChild( script )
return False # remove idle callback
## A function suitable as the submenu callable for a File/OpenRecent menu item. It must be invoked
# from a menu which has a ScriptWindow in its ancestry.
def openRecent( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
currentScript = scriptWindow.scriptNode()
applicationRoot = currentScript.ancestor( Gaffer.ApplicationRoot )
recentFiles = []
with IECore.IgnoredExceptions( AttributeError ) :
recentFiles = applicationRoot.__recentFiles
result = IECore.MenuDefinition()
if recentFiles :
for index, fileName in enumerate( recentFiles ) :
result.append(
"/" + str( index ),
{
"label": os.path.basename( fileName ),
"command" : functools.partial( __open, currentScript, fileName ),
"description" : fileName,
"active" : os.path.isfile( fileName )
}
)
else :
result.append( "/None Available", { "active" : False } )
return result
## This function adds a file to the list shown in the File/OpenRecent menu, and saves a recentFiles.py
# in the application's user startup folder so the settings will persist.
def addRecentFile( application, fileName ) :
if isinstance( application, Gaffer.Application ) :
applicationRoot = application.root()
else :
applicationRoot = application
try :
applicationRoot.__recentFiles
except AttributeError :
applicationRoot.__recentFiles = []
if fileName in applicationRoot.__recentFiles :
applicationRoot.__recentFiles.remove( fileName )
applicationRoot.__recentFiles.insert( 0, fileName )
del applicationRoot.__recentFiles[6:]
# Accessing via builtins to avoid shadowing by our own `open()` method above.
with __builtins__["open"]( os.path.join( applicationRoot.preferencesLocation(), "recentFiles.py" ), "w" ) as f :
f.write( "# This file was automatically generated by Gaffer.\n" )
f.write( "# Do not edit this file - it will be overwritten.\n\n" )
f.write( "import GafferUI\n" )
for fileName in reversed( applicationRoot.__recentFiles ) :
f.write( "GafferUI.FileMenu.addRecentFile( application, \"%s\" )\n" % fileName )
## A function suitable as the command for a File/Save menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def save( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
if script["fileName"].getValue() :
with GafferUI.ErrorDialogue.ErrorHandler( title = "Error Saving File", parentWindow = scriptWindow ) :
script.save()
else :
saveAs( menu )
## A function suitable as the command for a File/Save As menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def saveAs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
path, bookmarks = __pathAndBookmarks( scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Save script", confirmLabel="Save", leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
path = str( path )
if not path.endswith( ".gfr" ) :
path += ".gfr"
script["fileName"].setValue( path )
with GafferUI.ErrorDialogue.ErrorHandler( title = "Error Saving File", parentWindow = scriptWindow ) :
script.save()
application = script.ancestor( Gaffer.ApplicationRoot )
addRecentFile( application, path )
## A function suitable as the command for a File/Revert To Saved menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def revertToSaved( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
dialogue = GafferUI.ConfirmationDialogue(
title = "Discard Unsaved Changes?",
message = "There are unsaved changes which will be lost."
"Discard them and revert?",
confirmLabel = "Revert",
cancelLabel = "Cancel",
)
if not dialogue.waitForConfirmation( parentWindow = scriptWindow ) :
return
with GafferUI.ErrorDialogue.ErrorHandler(
title = "Errors Occurred During Loading",
closeLabel = "Oy vey",
parentWindow = scriptWindow
) :
scriptWindow.scriptNode().load( continueOnError = True )
def __revertToSavedAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
if script["fileName"].getValue() and script["unsavedChanges"].getValue() :
return True
return False
## A function suitable as the command for a File/Export Selection... menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def exportSelection( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
path, bookmarks = __pathAndBookmarks( scriptWindow )
selection = script.selection()
parent = selection[0].parent()
for node in selection :
if not parent.isAncestorOf( node ) :
assert( node.parent().isAncestorOf( parent ) )
parent = node.parent()
dialogue = GafferUI.PathChooserDialogue( path, title="Export selection", confirmLabel="Export", leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scriptWindow )
if not path :
return
path = str( path )
if not path.endswith( ".gfr" ) :
path += ".gfr"
script.serialiseToFile( path, parent, script.selection() )
## A function suitable as the command for a File/Import File... menu item. It must be invoked from a menu which
# has a ScriptWindow in its ancestry.
def importFile( menu ) :
scope = GafferUI.EditMenu.scope( menu )
path, bookmarks = __pathAndBookmarks( scope.scriptWindow )
dialogue = GafferUI.PathChooserDialogue( path, title="Import script", confirmLabel="Import", valid=True, leaf=True, bookmarks=bookmarks )
path = dialogue.waitForPath( parentWindow = scope.scriptWindow )
if path is None :
return
errorHandler = GafferUI.ErrorDialogue.ErrorHandler(
title = "Errors Occurred During Loading",
closeLabel = "Oy vey",
parentWindow = scope.scriptWindow
)
with Gaffer.UndoScope( scope.script ), errorHandler :
newChildren = []
c = scope.parent.childAddedSignal().connect( lambda parent, child : newChildren.append( child ) )
scope.script.importFile( str( path ), parent = scope.parent, continueOnError = True )
newNodes = [ c for c in newChildren if isinstance( c, Gaffer.Node ) ]
scope.script.selection().clear()
scope.script.selection().add( newNodes )
if scope.graphEditor :
fallbackPosition = scope.graphEditor.bound().size() / 2
fallbackPosition = scope.graphEditor.graphGadgetWidget().getViewportGadget().rasterToGadgetSpace(
imath.V2f( fallbackPosition.x, fallbackPosition.y ),
gadget = scope.graphEditor.graphGadget()
).p0
fallbackPosition = imath.V2f( fallbackPosition.x, fallbackPosition.y )
scope.graphEditor.graphGadget().getLayout().positionNodes(
scope.graphEditor.graphGadget(), scope.script.selection(), fallbackPosition
)
scope.graphEditor.frame( scope.script.selection(), extend = True )
## A function suitable as the command for a File/Settings... menu item.
def showSettings( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
settingsWindow = None
for window in scriptWindow.childWindows() :
if hasattr( window, "_settingsEditor" ) :
settingsWindow = window
break
if settingsWindow is None :
settingsWindow = GafferUI.Window( "Settings", borderWidth=8 )
settingsWindow._settingsEditor = True
settingsWindow.setChild( GafferUI.NodeUI.create( scriptWindow.scriptNode() ) )
scriptWindow.addChildWindow( settingsWindow )
settingsWindow.setVisible( True )
def __selectionAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return True if scriptWindow.scriptNode().selection().size() else False
def __pathAndBookmarks( scriptWindow ) :
bookmarks = GafferUI.Bookmarks.acquire(
scriptWindow,
pathType = Gaffer.FileSystemPath,
category = "script",
)
currentFileName = scriptWindow.scriptNode()["fileName"].getValue()
if currentFileName :
path = Gaffer.FileSystemPath( os.path.dirname( os.path.abspath( currentFileName ) ) )
else :
path = Gaffer.FileSystemPath( bookmarks.getDefault( scriptWindow ) )
path.setFilter( Gaffer.FileSystemPath.createStandardFilter( [ "gfr" ] ) )
return path, bookmarks
| |
#!/usr/bin/env python
try:
import cPickle as pickle
except:
import pickle
try:
from itertools import permutations
except:
from matchmaker.utils import permutations
from datetime import date
import MySQLdb as mysqldb
import os
from math import log
from collections import defaultdict
from pprint import pprint
from matchmaker import msg
from matchmaker.kmeans import *
class Database:
def __init__(self, datadir):
"""Constructor
"""
self.datadir = datadir
self.test_u = []
self.top_repos = []
self.r_matrix = {} # special pickling
self.u_matrix = {} # special pickling
self.r_idf_avg = {}
self.fields = ['test_u', 'top_repos', 'r_idf_avg']
self.save_db = False
if self.pickle_jar():
return
fields = (
# dict key = values
("watching_r repos = user", list),
("u_watching user = repos", list),
("r_info repos = author, name, creation", list),
("r_name repos_name = repos", list),
("r_langs repos = lang, kloc", list),
("r_lang_tuple repos = tuple_of_langs", list),
("r_prefixes prefix = repos", list),
("r_idf repos = user, tf_idf", list),
("forks_of_r parent = child", list),
("parent_of_r child = parent", int),
("gparent_of_r child = grandparent", int),
("lang_by_r lang = kloc, repos", list),
("u_authoring author = repos", list),
)
for defn, datatype in fields:
name, key, _ = defn.split(None, 2)
setattr(self, name, defaultdict(datatype))
self.fields.append(name)
self.fields.sort()
# collect data
self.parse_test()
self.parse_watching()
self.parse_repos()
self.parse_lang()
self.fill_pickle_jar()
def pickle_jar(self):
jar = '/'.join((self.datadir, "pickle.jar"))
if os.path.exists(jar):
try:
jarf = open(jar, 'r')
d = pickle.load(jarf)
jarf.close()
except:
return False
self.fields = d['fields']
for field in self.fields:
setattr(self, field, d[field])
return True
else:
return False
def fill_pickle_jar(self):
jar = '/'.join((self.datadir, "pickle.jar"))
d = {}
msg("Filling pickle jar '%s'" % jar)
for field in self.fields:
d[field] = getattr(self, field)
d['fields'] = self.fields
jarf = open(jar, 'w')
pickle.dump(d, jarf)
jarf.close()
def summary(self, unabridged=False):
props = ("watching_r "
"u_watching "
"r_info "
"r_name "
"r_langs "
"forks_of_r "
"parent_of_r "
"gparent_of_r "
"lang_by_r "
"u_authoring ").split()
for prop in props:
print(">> %s" % prop)
if unabridged:
pprint(dict(getattr(self, prop).items()))
else:
pprint(dict(getattr(self, prop).items()[:5]))
print("")
msg("test_u")
if unabridged:
pprint(self.test_u)
else:
pprint(self.test_u[:5])
def parse_watching(self):
"""Parse data.txt which has main user-repository relationships
"""
msg("parsing data.txt")
lines = file('/'.join((self.datadir, "data.txt"))).read().split("\n")
test_r = set()
pairs = [[int(x) for x in line.split(":")] for line in lines if line]
for user, repos in pairs:
self.watching_r[repos].append(user)
self.u_watching[user].append(repos)
if user in self.test_u:
test_r.add(repos)
msg("calculating tf-idf")
iter = 0
total_users = float(len(self.u_watching))
for repos, users in self.watching_r.items():
idf_repos = log(total_users / (1.0 + len(self.watching_r[repos])))
tf_idf_avg = 0.0
for user in users:
tf_user = 1.0 / len(self.u_watching[user])
tf_idf = tf_user * idf_repos
tf_idf_avg += tf_idf
self.r_idf[repos].append((user, tf_idf))
# counter
iter += 1
if iter % 10000 == 0:
msg("tf-idf iter %d" % iter)
self.r_idf_avg[repos] = tf_idf_avg / len(users)
msg("making top_repos")
top_repos = sorted(self.watching_r.items(),
key=lambda x:len(x[1]),
reverse=True)
self.top_repos = [repos[0] for repos in top_repos[:50]]
if not self.save_db:
return
conn = mysqldb.connect(host='127.0.0.1',
user='root',
passwd='',
db='matrix')
c = conn.cursor()
iter = 0
values = []
msg("making u_matrix_fwd")
users = sorted(self.u_watching.keys())
for i in xrange(len(users)):
for j in xrange(i + 1, len(users)):
s_i = set(self.u_watching[users[i]])
s_j = set(self.u_watching[users[j]])
diff = len(set.symmetric_difference(s_i, s_j))
values.append("(%d,%d,SQRT(%d))" % (users[i], users[j], diff))
iter += 1
if iter % 10000 == 0:
sql = "".join(("INSERT INTO u_matrix_fwd(u1,u2,val) VALUES",
",".join(values)))
c.execute(sql)
conn.commit()
values = []
msg("umf iter %d" % iter)
if values:
sql = "".join(("INSERT INTO u_matrix_fwd(u1,u2,val) VALUES",
",".join(values)))
c.execute(sql)
conn.commit()
msg("umf iter %d [END]" % iter)
iter = 0
values = []
msg("making u_matrix_bkwd")
users = sorted(self.u_watching.keys(), reverse=True)
for i in xrange(len(users)):
for j in xrange(i + 1, len(users)):
s_i = set(self.u_watching[users[i]])
s_j = set(self.u_watching[users[j]])
diff = len(set.symmetric_difference(s_i, s_j))
values.append("(%d,%d,SQRT(%d))" % (users[i], users[j], diff))
iter += 1
if iter % 10000 == 0:
sql = "".join(("INSERT INTO u_matrix_bkwd(u1,u2,val) VALUES",
",".join(values)))
c.execute(sql)
conn.commit()
values = []
msg("umb iter %d" % iter)
if values:
sql = "".join(("INSERT INTO u_matrix_bkwd(u1,u2,val) VALUES",
",".join(values)))
c.execute(sql)
conn.commit()
msg("umb iter %d [END]" % iter)
return
# ------------------------------------------------------------
iter = 0
msg("making r_matrix_fwd")
for repos in self.u_watching.values():
repos.sort()
for i in xrange(len(repos)):
for j in xrange(i + 1, len(repos)):
r_i, r_j = repos[i], repos[j]
if r_i not in self.r_matrix:
self.r_matrix[r_i] = {r_j: 1}
elif r_j not in self.r_matrix[r_i]:
self.r_matrix[r_i][r_j] = 1
else:
self.r_matrix[r_i][r_j] += 1
iter += 1
if iter % 100000 == 0:
msg("[] iter %d" % iter)
iter = 0
msg("saving r_matrix_fwd")
values = []
for r_i in self.r_matrix:
for r_j in self.r_matrix[r_i]:
values.append("(%d,%d,%d)"
% (r_i, r_j, self.r_matrix[r_i][r_j]))
iter += 1
if iter % 5000 == 0:
sql = "".join(("INSERT INTO r_matrix_fwd(r1,r2,val) VALUES",
",".join(values)))
c.execute(sql)
values = []
if iter % 10000 == 0:
msg("DB iter %d" % iter)
conn.commit()
if values:
sql = "".join(("INSERT INTO r_matrix_fwd(r1,r2,val) VALUES",
",".join(values)))
c.execute(sql)
iter = 0
msg("making r_matrix_bkwd")
for repos in self.u_watching.values():
repos.sort(reverse=True)
for i in xrange(len(repos)):
for j in xrange(i + 1, len(repos)):
r_i, r_j = repos[i], repos[j]
if r_i not in self.r_matrix:
self.r_matrix[r_i] = {r_j: 1}
elif r_j not in self.r_matrix[r_i]:
self.r_matrix[r_i][r_j] = 1
else:
self.r_matrix[r_i][r_j] += 1
iter += 1
if iter % 100000 == 0:
msg("[] iter %d" % iter)
iter = 0
msg("saving r_matrix_bkwd")
values = []
for r_i in self.r_matrix:
for r_j in self.r_matrix[r_i]:
values.append("(%d,%d,%d)"
% (r_i, r_j, self.r_matrix[r_i][r_j]))
iter += 1
if iter % 5000 == 0:
sql = "".join(("INSERT INTO r_matrix_bkwd(r1,r2,val) VALUES",
",".join(values)))
c.execute(sql)
values = []
if iter % 10000 == 0:
msg("DB iter %d" % iter)
conn.commit()
if values:
sql = "".join(("INSERT INTO r_matrix_bkwd(r1,r2,val) VALUES",
",".join(values)))
c.execute(sql)
def parse_repos(self):
"""Parse repos.txt which has repository lineage information
"""
msg("parsing repos.txt")
lines = file('/'.join((self.datadir, "repos.txt"))).read().split("\n")
pairs = [line.replace(":", ",").split(",") for line in lines if line]
pairs = [tuple([int(pair[0]),
int(pair[3]) if pair[3:4] else 0,
pair[1],
pair[2]])
for pair in pairs]
for repos, parent, name, creation in pairs:
if parent > 0:
self.forks_of_r[parent].append(repos)
self.parent_of_r[repos] = parent
author, name = name.split("/")
words = [int(x) for x in creation.split("-")]
creation = date(words[0], words[1], words[2]).toordinal()
self.r_info[repos] = (author, name, creation)
self.u_authoring[author].append(repos)
self.r_name[name].append(repos)
words = name.lower().replace("-", "_").replace(".", "_")
words = words.split("_")
prefixes = [w for w in words if len(w) > 2][:-1]
if not prefixes:
continue
for i in xrange(1, len(prefixes)):
prefix = "-".join(prefixes[0:i])
if prefix in ('the', 'test', 'php', 'acts'):
continue
self.r_prefixes[prefix].append(repos)
for repos_gen1, repos_gen2 in self.parent_of_r.items():
if repos_gen2 in self.parent_of_r:
repos_gen3 = self.parent_of_r[repos_gen2]
self.gparent_of_r[repos_gen1] = repos_gen3
def parse_lang(self):
"""Get lang.txt which has language composition information
"""
msg("parsing lang.txt")
lines = file('/'.join((self.datadir, "lang.txt"))).read().split("\n")
pairs = [line.split(":") for line in lines if line]
pairs = [(int(pair[0]),
[tuple(x.split(";")) for x in pair[1].split(",")])
for pair in pairs]
pairs = [(x, tuple([(int(z[1]), z[0].lower()) for z in y]))
for (x, y) in pairs]
all_langs = defaultdict(bool)
for repos, langs in pairs:
for kloc, lang in langs:
all_langs[lang] = True
all_langs = sorted(all_langs.keys())
msg("build lang_by_r and r_langs")
for repos, langs in pairs:
for kloc, lang in langs:
lnloc = int(log(kloc + 1, 10))
self.lang_by_r[lang].append((lnloc, repos))
self.r_langs[repos].append((lang, lnloc))
for lang in self.lang_by_r.keys():
self.lang_by_r[lang].sort(key=lambda x:x[1])
def parse_test(self):
"""Parse test.txt which has test subjects
"""
msg("parsing test.txt")
lines = file('/'.join((self.datadir, "test.txt"))).read().split("\n")
self.test_u = sorted([int(line) for line in lines if line])
| |
import os
import subprocess
import utility
import ConfigParser
import grapeConfig
import StringIO
class GrapeGitError(Exception):
# arguments must be kept as keywords to allow pickling
def __init__(self, errmsg='', returnCode=-1, gitOutput='', gitCommand='', cwd=os.getcwd()):
self.msg = errmsg
self.code = returnCode
self.gitOutput = gitOutput
self.gitCommand = gitCommand
self.commError = True if \
(self.code == 128 and "fatal: Could not read from remote" in self.gitOutput ) or \
("fatal: unable to access" in self.gitOutput) or \
("fatal: The remote end hung up unexpectedly" in self.gitOutput) \
else False
self.cwd = cwd
def __getinitargs__(self):
return (self.msg, self.code, self.gitOutput, self.gitCommand, self.cwd)
def __str__(self):
return "\nWORKING DIR: " + self.cwd + "\nCODE: " + str(self.code) + '\nCMD: ' + self.gitCommand + '\nOUTPUT: ' + self.gitOutput
def __repr__(self):
return self.__str__()
def gitcmd(cmd, errmsg):
_cmd = None
try:
cnfg = grapeConfig.grapeConfig()
_cmd = cnfg.get("git", "executable")
except ConfigParser.NoOptionError:
pass
except ConfigParser.NoSectionError:
pass
if _cmd:
_cmd += " %s" % cmd
elif os.name == "nt":
_cmd = "\"C:\\Program Files\\Git\\bin\\git.exe\" %s" % cmd
else:
_cmd = "git %s" % cmd
cwd = os.getcwd()
process = utility.executeSubProcess(_cmd, cwd, verbose=-1)
if process.returncode != 0:
raise GrapeGitError("Error: %s " % errmsg, process.returncode, process.output, _cmd, cwd=cwd)
return process.output.strip()
def add(filedescription):
return gitcmd("add %s" % filedescription, "Could not add %s" % filedescription)
def baseDir():
unixStylePath = gitcmd("rev-parse --show-toplevel", "Could not locate base directory")
path = utility.makePathPortable(unixStylePath)
return path
def allBranches():
return branch("-a").replace("*",' ').replace(" ",'').split()
def remoteBranches():
return branch("-r").replace(" ", '').split()
def branch(argstr=""):
return gitcmd("branch %s" % argstr, "Could not execute git branch command")
def branchPrefix(branchName):
return branchName.split('/')[0]
def branchUpToDateWith(branchName, targetBranch):
try:
allUpToDateBranches = gitcmd("branch -a --contains %s" % targetBranch, "branch contains failed")
except GrapeGitError as e:
# Don't fail if the only issue is a dangling reference for origin/HEAD.
allUpToDateBranches = e.gitOutput
allUpToDateBranches = allUpToDateBranches.replace("error: branch 'origin/HEAD' does not point at a commit\n","")
allUpToDateBranches = allUpToDateBranches.replace("error: some refs could not be read\n","")
if "error: " in allUpToDateBranches:
raise e
allUpToDateBranches = allUpToDateBranches.split("\n")
upToDate = False
for b in allUpToDateBranches:
# remove the * prefix from the active branch
cleanB = b.strip()
if b[0] is '*':
cleanB = b[1:].strip()
upToDate = cleanB == branchName.strip()
if upToDate:
break
return upToDate
def bundle(argstr):
return gitcmd("bundle %s" % argstr, "Bundle failed")
def checkout(argstr):
return gitcmd("checkout %s" % argstr, "Checkout failed")
def clone(argstr):
try:
return gitcmd("clone %s" % argstr, "Clone failed")
except GrapeGitError as e:
if "already exists and is not an empty directory" in e.gitOutput:
raise e
if e.commError:
print ("GRAPE: WARNING: clone failed due to connectivity issues.")
return e.gitOutput
else:
print ("GRAPE: Clone failed. Maybe you ran out of disk space?")
print e.gitOutput
raise e
def commit(argstr):
return gitcmd("commit %s" % argstr, "Commit failed")
def commitDescription(committish):
try:
descr = gitcmd("log --oneline %s^1..%s" % (committish, committish),
"commitDescription failed")
# handle the case when this is called on a 1-commit-long history (occurs mostly in unit testing)
except GrapeGitError as e:
if "unknown revision" in e.gitOutput:
try:
descr = gitcmd("log --oneline %s" % committish, "commitDescription failed")
except GrapeGitError as e:
raise e
return descr
def config(argstr, arg2=None):
if arg2 is not None:
return gitcmd('config %s "%s"' % (argstr, arg2), "Config failed")
else:
return gitcmd('config %s ' % argstr, "Config failed")
def conflictedFiles():
fileStr = diff("--name-only --diff-filter=U").strip()
lines = fileStr.split('\n') if fileStr else []
return lines
def currentBranch():
return gitcmd("rev-parse --abbrev-ref HEAD", "could not determine current branch")
def describe(argstr=""):
return gitcmd("describe %s" % argstr, "could not describe commit")
def diff(argstr):
return gitcmd("diff %s" % argstr, "could not perform diff")
def fetch(repo="", branchArg="", raiseOnCommError=False, warnOnCommError=False):
try:
return gitcmd("fetch %s %s" % (repo, branchArg), "Fetch failed")
except GrapeGitError as e:
if e.commError:
if warnOnCommError:
utility.printMsg("WARNING: could not fetch due to communication error.")
if raiseOnCommError:
raise e
else:
return e.gitOutput
else:
raise e
def getActiveSubmodules():
cwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
if os.name == "nt":
submoduleList = submodule("foreach --quiet \"echo $path\"")
else:
submoduleList = submodule("foreach --quiet \"echo \$path\"")
submoduleList = [] if not submoduleList else submoduleList.split('\n')
submoduleList = [x.strip() for x in submoduleList]
os.chdir(cwd)
return submoduleList
def getAllSubmodules():
subconfig = ConfigParser.ConfigParser()
try:
subconfig.read(os.path.join(baseDir(), ".gitmodules"))
except ConfigParser.ParsingError:
# this is guaranteed to happen due to .gitmodules format incompatibility, but it does
# read section names in successfully, which is all we need
pass
sections = subconfig.sections()
submodules = []
for s in sections:
submodules.append(s.split()[1].split('"')[1])
return submodules
def getAllSubmoduleURLMap():
subconfig = ConfigParser.ConfigParser()
fp = StringIO.StringIO('\n'.join(line.strip() for line in open(os.path.join(baseDir(), ".gitmodules"))))
subconfig.readfp(fp)
sections = subconfig.sections()
submodules = {}
for s in sections:
submodules[subconfig.get(s,"path")] = subconfig.get(s, "url")
return submodules
def getModifiedSubmodules(branch1="", branch2=""):
cwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
submodules = getAllSubmodules()
# if there are no submodules, then return the empty list
if len(submodules) == 0 or (len(submodules) ==1 and not submodules[0]):
return []
submodulesString = ' '.join(submodules)
try:
modifiedSubmodules = diff("--name-only %s %s -- %s" %
(branch1, branch2, submodulesString)).split('\n')
except GrapeGitError as e:
if "bad revision" in e.gitOutput:
utility.printMsg("getModifiedSubmodules: requested difference between one or more branches that do not exist. Assuming no modifications.")
return []
if len(modifiedSubmodules) == 1 and not modifiedSubmodules[0]:
return []
# make sure everything in modifiedSubmodules is in the original list of submodules
# (this can not be the case if the module existed as a regular directory / subtree in the other branch,
# in which case the diff command will list the contents of the directory as opposed to just the submodule)
verifiedSubmodules = []
for s in modifiedSubmodules:
if s in submodules:
verifiedSubmodules.append(s)
os.chdir(cwd)
return verifiedSubmodules
def gitDir():
base = baseDir()
gitPath = os.path.join(base, ".git")
toReturn = None
if os.path.isdir(gitPath):
toReturn = gitPath
elif os.path.isfile(gitPath):
with open(gitPath) as f:
line = f.read()
words = line.split()
if words[0] == 'gitdir:':
relUnixPath = words[1]
toReturn = utility.makePathPortable(relUnixPath)
else:
raise GrapeGitError("print .git file does not have gitdir: prefix as expected", 1, "", "grape gitDir()")
return toReturn
def hasBranch(b):
branches = branch().split()
return b in branches
def isWorkingDirectoryClean(printOutput=False):
statusOutput = status("-u --porcelain")
toRet = len(statusOutput.strip()) == 0
if (printOutput and not toRet):
print os.getcwd()+":"
print statusOutput
return toRet
def log(args=""):
return gitcmd("log %s" % args, "git log failed")
def merge(args):
return gitcmd("merge %s" % args, "merge failed")
def mergeAbort():
return gitcmd("merge --abort", "Could not determine top level git directory.")
def numberCommitsSince(commitStr):
strCount = gitcmd("rev-list --count %s..HEAD" % commitStr, "Rev-list failed")
return int(strCount)
def numberCommitsSinceRoot():
root = gitcmd("rev-list --max-parents=0 HEAD", "rev-list failed")
return numberCommitsSince(root)
def pull(args, throwOnFail=False):
try:
return gitcmd("pull %s" % args, "Pull failed")
except GrapeGitError as e:
if e.commError:
utility.printMsg("WARNING: Pull failed due to connectivity issues.")
if throwOnFail:
raise e
else:
return e.gitOutput
else:
raise e
def push(args, throwOnFail = False):
try:
return gitcmd("push --porcelain %s" % args, "Push failed")
except GrapeGitError as e:
if e.commError:
utility.printMsg("WARNING: Push failed due to connectivity issues.")
if throwOnFail:
raise e
else:
return e.gitOutput
else:
raise e
def rebase(args):
return gitcmd("rebase %s" % args, "Rebase failed")
def reset(args):
return gitcmd("reset %s" % args, "Reset failed")
def revert(args):
return gitcmd("revert %s" % args, "Revert failed")
def rm(args):
return gitcmd("rm %s" % args, "Remove failed")
def safeForceBranchToOriginRef(branchToSync):
# first, check to see that branch exists
branchExists = False
remoteRefExists = False
branches = branch("-a").split("\n")
remoteRef = "remotes/origin/%s" % branchToSync
for b in branches:
b = b.replace('*', '')
branchExists = branchExists or b.strip() == branchToSync.strip()
remoteRefExists = remoteRefExists or b.strip() == remoteRef.strip()
if branchExists and remoteRefExists:
continue
if branchExists and not remoteRefExists:
utility.printMsg("origin does not have branch %s" % branchToSync)
return False
if branchExists and remoteRefExists:
remoteUpToDateWithLocal = branchUpToDateWith(remoteRef, branchToSync)
localUpToDateWithRemote = branchUpToDateWith(branchToSync, remoteRef)
if remoteUpToDateWithLocal and not localUpToDateWithRemote:
if branchToSync == currentBranch():
utility.printMsg("Current branch %s is out of date with origin. Pulling new changes." % branchToSync)
try:
pull("origin %s" % branchToSync, throwOnFail=True)
except:
utility.printMsg("Can't pull %s. Aborting...")
return False
else:
branch("-f %s %s" % (branchToSync, remoteRef))
return True
elif remoteUpToDateWithLocal and localUpToDateWithRemote:
return True
else:
return False
if not branchExists and remoteRefExists:
utility.printMsg("local branch did not exist. Creating %s off of %s now. " % (branchToSync, remoteRef))
branch("%s %s" % (branchToSync, remoteRef))
return True
def shortSHA(branchName="HEAD"):
return gitcmd("rev-parse --short %s" % branchName, "rev-parse of %s failed!" % branchName)
def SHA(branchName="HEAD"):
return gitcmd("rev-parse %s" % branchName, "rev-parse of %s failed!" % branchName)
def showRemote():
try:
return gitcmd("remote show origin", "unable to show remote")
except GrapeGitError as e:
if e.code == 128:
utility.printMsg("WARNING: %s failed. Ignoring..." % e.gitCommand)
return e.gitOutput
else:
raise e
def stash(argstr=""):
return gitcmd("stash %s" % argstr, "git stash failed for some reason")
def status(argstr=""):
return gitcmd("status %s" % argstr, "git status failed for some reason")
def submodule(argstr):
return gitcmd("submodule %s" % argstr, "git submodule %s failed" % argstr)
def subtree(argstr):
return gitcmd("subtree %s" % argstr, "git subtree %s failed - maybe subtree isn't installed on your system?")
def tag(argstr):
return gitcmd("tag %s" % argstr, "git tag %s failed" % argstr)
def version():
return gitcmd("version", "")
| |
from ciw.auxiliary import *
from itertools import cycle
import copy
from operator import add, mul, sub, truediv
from random import (expovariate, uniform, triangular, gammavariate,
lognormvariate, weibullvariate)
class Distribution(object):
"""
A general distribution from which all other distirbutions will inherit.
"""
def __repr__(self):
return 'Distribution'
def sample(self, t=None, ind=None):
pass
def _sample(self, t=None, ind=None):
"""
Performs vaildity checks before sampling.
"""
s = self.sample(t=t, ind=ind)
if (isinstance(s, float) or isinstance(s, int)) and s >= 0:
return s
else:
raise ValueError('Invalid time sampled.')
def __add__(self, dist):
"""
Add two distributions such that sampling is the sum of the samples.
"""
return CombinedDistribution(self, dist, add)
def __sub__(self, dist):
"""
Subtract two distributions such that sampling is the difference of the samples.
"""
return CombinedDistribution(self, dist, sub)
def __mul__(self, dist):
"""
Multiply two distributions such that sampling is the product of the samples.
"""
return CombinedDistribution(self, dist, mul)
def __truediv__(self, dist):
"""
Divide two distributions such that sampling is the ratio of the samples.
"""
return CombinedDistribution(self, dist, truediv)
class CombinedDistribution(Distribution):
"""
A distribution that combines the samples of two other distributions, `dist1`
and `dist2`, using `operator`.
"""
def __init__(self, dist1, dist2, operator):
self.d1 = copy.deepcopy(dist1)
self.d2 = copy.deepcopy(dist2)
self.operator = operator
def __repr__(self):
return 'CombinedDistribution'
def sample(self, t=None, ind=None):
s1 = self.d1.sample()
s2 = self.d2.sample()
return self.operator(s1, s2)
class Uniform(Distribution):
"""
The Uniform distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
"""
def __init__(self, lower, upper):
if lower < 0.0 or upper < 0.0:
raise ValueError('Uniform distribution must sample positive numbers only.')
if upper < lower:
raise ValueError('Uniform distirbution upper bound should be >= lower bound.')
self.lower = lower
self.upper = upper
def __repr__(self):
return 'Uniform: {0}, {1}'.format(self.lower, self.upper)
def sample(self, t=None, ind=None):
return uniform(self.lower, self.upper)
class Deterministic(Distribution):
"""
The Deterministic distribution.
Takes:
- `value` the value to return
"""
def __init__(self, value):
if value < 0.0:
raise ValueError('Deterministic distribution must sample positive numbers only.')
self.value = value
def __repr__(self):
return 'Deterministic: {0}'.format(self.value)
def sample(self, t=None, ind=None):
return self.value
class Triangular(Distribution):
"""
The Triangular distribution.
Takes:
- `lower` the lower bound
- `upper` the upper bound
- `mode` the modal value
"""
def __init__(self, lower, mode, upper):
if lower < 0.0 or upper < 0.0 or mode < 0.0:
raise ValueError('Triangular distribution must sample positive numbers only.')
if not lower <= mode <= upper:
raise ValueError('Triangular distribution lower bound must be <= mode must be <= upper bound.')
self.lower = lower
self.mode = mode
self.upper = upper
def __repr__(self):
return 'Triangular: {0}, {1}, {2}'.format(self.lower, self.mode, self.upper)
def sample(self, t=None, ind=None):
return triangular(self.lower, self.upper, self.mode)
class Exponential(Distribution):
"""
The Exponential distribution.
Takes:
- `rate` the rate parameter, lambda
"""
def __init__(self, rate):
if rate <= 0.0:
raise ValueError('Exponential distribution must sample positive numbers only.')
self.rate = rate
def __repr__(self):
return 'Exponential: {0}'.format(self.rate)
def sample(self, t=None, ind=None):
return expovariate(self.rate)
class Gamma(Distribution):
"""
The Gamma distribution.
Takes:
- `shape` the shape parameter, alpha
- `scale` the scale parameter, beta
"""
def __init__(self, shape, scale):
self.shape = shape
self.scale = scale
def __repr__(self):
return 'Gamma: {0}, {1}'.format(self.shape, self.scale)
def sample(self, t=None, ind=None):
return gammavariate(self.shape, self.scale)
class Normal(Distribution):
"""
The Truncated Normal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Normal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return truncated_normal(self.mean, self.sd)
class Lognormal(Distribution):
"""
The Lognormal distribution.
Takes:
- `mean` the mean of the Normal, mu
- `sd` the standard deviation of the Normal, sigma
"""
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd
def __repr__(self):
return 'Lognormal: {0}, {1}'.format(self.mean, self.sd)
def sample(self, t=None, ind=None):
return lognormvariate(self.mean, self.sd)
class Weibull(Distribution):
"""
The Weibull distribution.
Takes:
- `scale` the scale parameter, alpha
- `shape` the shape parameter, beta
"""
def __init__(self, scale, shape):
self.scale = scale
self.shape = shape
def __repr__(self):
return 'Weibull: {0}, {1}'.format(self.scale, self.shape)
def sample(self, t=None, ind=None):
return weibullvariate(self.scale, self.shape)
class Empirical(Distribution):
"""
The Empirical distribution.
Takes:
- `observations` the observations from which to sample
"""
def __init__(self, observations):
if any(o < 0 for o in observations):
raise ValueError('Empirical distribution must sample positive numbers only.')
self.observations = observations
def __repr__(self):
return 'Empirical'
def sample(self, t=None, ind=None):
return random_choice(self.observations)
class Sequential(Distribution):
"""
The Sequential distribution.
Takes:
- `sequence` the sequence to cycle through
"""
def __init__(self, sequence):
if any(o < 0 for o in sequence):
raise ValueError('Sequential distribution must sample positive numbers only.')
self.sequence = sequence
self.generator = cycle(self.sequence)
def __repr__(self):
return 'Sequential'
def sample(self, t=None, ind=None):
return next(self.generator)
class Pmf(Distribution):
"""
A distribution defined by a probability mass function (pmf).
Takes:
- `values` the values to sample
- `probs` the associated probabilities
"""
def __init__(self, values, probs):
if any(o < 0 for o in values):
raise ValueError('Pmf must sample positive numbers only.')
if any(p < 0 or p > 1.0 for p in probs):
raise ValueError('Pmf must have valid probabilities.')
if sum(probs) != 1.0:
raise ValueError('Pmf probabilities must sum to 1.0.')
self.values = values
self.probs = probs
def __repr__(self):
return 'Pmf'
def sample(self, t=None, ind=None):
return random_choice(self.values, self.probs)
class NoArrivals(Distribution):
"""
A placeholder distribution if there are no arrivals.
"""
def __repr__(self):
return 'NoArrivals'
def sample(self, t=None, ind=None):
return float('Inf')
| |
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import re
import shlex
import threading
import time
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _LE, _LW
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
GETSTORAGEARRAY_ONCE = 100
MAX_SNAPSHOT_COUNT = 1021
SNAP_LAST_PATH_SSB = '0xB958,0x020A'
HOST_IO_SSB = '0xB958,0x0233'
INVALID_LUN_SSB = '0x2E20,0x0000'
INTERCEPT_LDEV_SSB = '0x2E22,0x0001'
HOSTGROUP_INSTALLED = '0xB956,0x3173'
LDEV_STATUS_WAITTIME = 120
LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME
LUN_DELETE_INTERVAL = 3
EXEC_MAX_WAITTIME = 30
EXEC_RETRY_INTERVAL = 5
HORCM_WAITTIME = 1
PAIR_TYPE = ('HORC', 'MRCF', 'QS')
PERMITTED_TYPE = ('CVS', 'HDP', 'HDT')
RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_'
HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_'
RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_'
STATUS_TABLE = {
'SMPL': basic_lib.SMPL,
'COPY': basic_lib.COPY,
'RCPY': basic_lib.COPY,
'PAIR': basic_lib.PAIR,
'PFUL': basic_lib.PAIR,
'PSUS': basic_lib.PSUS,
'PFUS': basic_lib.PSUS,
'SSUS': basic_lib.PSUS,
'PSUE': basic_lib.PSUE,
}
NOT_SET = '-'
HORCM_RUNNING = 1
COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d'
SNAP_NAME = basic_lib.NAME_PREFIX + 'snap'
LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d'
MAX_MUNS = 3
EX_ENAUTH = 202
EX_ENOOBJ = 205
EX_CMDRJE = 221
EX_CMDIOE = 237
EX_INVCMD = 240
EX_INVMOD = 241
EX_ENODEV = 246
EX_ENOENT = 247
EX_OPTINV = 248
EX_ATTDBG = 250
EX_ATTHOR = 251
EX_COMERR = 255
EX_UNKOWN = -1
NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT)
COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV)
HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR)
MAX_HOSTGROUPS = 254
MAX_HLUN = 2047
DEFAULT_PORT_BASE = 31000
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('hitachi_horcm_numbers',
default='200,201',
help='Instance numbers for HORCM'),
cfg.StrOpt('hitachi_horcm_user',
default=None,
help='Username of storage system for HORCM'),
cfg.StrOpt('hitachi_horcm_password',
default=None,
help='Password of storage system for HORCM',
secret=True),
cfg.BoolOpt('hitachi_horcm_add_conf',
default=True,
help='Add to HORCM configuration'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
def horcm_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
if len(args) == 1:
inst = args[0].conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = args[0].raidcom_lock
else:
inst = args[1]
raidcom_obj_lock = args[0].raidcom_pair_lock
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
return function(*args, **kargs)
return wrapper
def storage_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
serial = args[0].conf.hitachi_serial_number
resource_lock = args[0].resource_lock
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
lock = basic_lib.get_process_lock(resource_lock_file)
with resource_lock, lock:
return function(*args, **kargs)
return wrapper
class HBSDHORCM(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDHORCM, self).__init__(conf=conf)
self.copy_groups = [None] * MAX_MUNS
self.raidcom_lock = threading.Lock()
self.raidcom_pair_lock = threading.Lock()
self.horcmgr_lock = threading.Lock()
self.horcmgr_flock = None
self.resource_lock = threading.Lock()
def check_param(self):
numbers = self.conf.hitachi_horcm_numbers.split(',')
if len(numbers) != 2:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for i in numbers:
if not i.isdigit():
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
self.conf.hitachi_horcm_numbers = map(int, numbers)
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
if inst == pair_inst:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for param in ('hitachi_horcm_user', 'hitachi_horcm_password'):
if not getattr(self.conf, param):
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id:
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
raise exception.HBSDError(message=msg)
for opt in volume_opts:
getattr(self.conf, opt.name)
def set_copy_groups(self, host_ip):
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[1]
for mun in range(MAX_MUNS):
copy_group = COPY_GROUP % (host_ip, serial, inst, mun)
self.copy_groups[mun] = copy_group
def set_pair_flock(self):
inst = self.conf.hitachi_horcm_numbers[1]
name = '%s%d' % (HORCMGR_LOCK_FILE, inst)
self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock)
return self.horcmgr_flock
def check_horcm(self, inst):
args = 'HORCMINST=%d horcmgr -check' % inst
ret, _stdout, _stderr = self.exec_command('env', args=args,
printflag=False)
return ret
def shutdown_horcm(self, inst):
ret, stdout, stderr = self.exec_command(
'horcmshutdown.sh', args=six.text_type(inst), printflag=False)
return ret
def start_horcm(self, inst):
return self.exec_command('horcmstart.sh', args=six.text_type(inst),
printflag=False)
def _wait_for_horcm_shutdown(self, inst):
if self.check_horcm(inst) != HORCM_RUNNING:
raise loopingcall.LoopingCallDone()
if self.shutdown_horcm(inst):
LOG.error(_LE("Failed to shutdown horcm."))
raise loopingcall.LoopingCallDone()
@horcm_synchronized
def restart_horcm(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_horcm_shutdown, inst)
loop.start(interval=HORCM_WAITTIME).wait()
ret, stdout, stderr = self.start_horcm(inst)
if ret:
msg = basic_lib.output_err(
600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def restart_pair_horcm(self):
inst = self.conf.hitachi_horcm_numbers[1]
self.restart_horcm(inst=inst)
def setup_horcmgr(self, host_ip):
pair_inst = self.conf.hitachi_horcm_numbers[1]
self.set_copy_groups(host_ip)
if self.conf.hitachi_horcm_add_conf:
self.create_horcmconf()
self.create_horcmconf(inst=pair_inst)
self.restart_horcm()
with self.horcmgr_flock:
self.restart_pair_horcm()
ret, stdout, stderr = self.comm_login()
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def _wait_for_exec_horcm(self, cmd, args, printflag, start):
if cmd == 'raidcom':
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = self.raidcom_lock
args = '%s -s %s -I%d' % (args, serial, inst)
else:
inst = self.conf.hitachi_horcm_numbers[1]
raidcom_obj_lock = self.raidcom_pair_lock
args = '%s -ISI%d' % (args, inst)
user = self.conf.hitachi_horcm_user
passwd = self.conf.hitachi_horcm_password
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
ret, stdout, stderr = self.exec_command(cmd, args=args,
printflag=printflag)
if not ret or ret <= 127:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= EXEC_MAX_WAITTIME:
LOG.error(_LE("horcm command timeout."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (ret == EX_ENAUTH and
not re.search("-login %s %s" % (user, passwd), args)):
_ret, _stdout, _stderr = self.comm_login()
if _ret:
LOG.error(_LE("Failed to authenticate user."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret in HORCM_ERROR:
_ret = 0
with raidcom_obj_lock, lock:
if self.check_horcm(inst) != HORCM_RUNNING:
_ret, _stdout, _stderr = self.start_horcm(inst)
if _ret and _ret != HORCM_RUNNING:
LOG.error(_LE("Failed to start horcm."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret not in COMMAND_IO_TO_RAID:
LOG.error(_LE("Unexpected error occurs in horcm."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_raidcom(self, cmd, args, printflag=True):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_horcm, cmd, args, printflag, time.time())
return loop.start(interval=EXEC_RETRY_INTERVAL).wait()
def comm_login(self):
rmi_user = self.conf.hitachi_horcm_user
rmi_pass = self.conf.hitachi_horcm_password
args = '-login %s %s' % (rmi_user, rmi_pass)
return self.exec_raidcom('raidcom', args, printflag=False)
def comm_lock(self):
ret, _stdout, stderr = self.exec_raidcom('raidcom', 'lock resource')
if ret:
msg = basic_lib.output_err(
603, serial=self.conf.hitachi_serial_number,
inst=self.conf.hitachi_horcm_numbers[0], ret=ret, err=stderr)
raise exception.HBSDError(message=msg)
def comm_unlock(self):
self.exec_raidcom('raidcom', 'unlock resource')
def comm_reset_status(self):
self.exec_raidcom('raidcom', 'reset command_status')
def comm_get_status(self):
return self.exec_raidcom('raidcom', 'get command_status')
def get_command_error(self, stdout):
lines = stdout.splitlines()
line = shlex.split(lines[1])
return int(line[3])
def comm_get_ldev(self, ldev):
opt = 'get ldev -ldev_id %s' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def add_used_hlun(self, port, gid, used_list):
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
lun = int(shlex.split(line)[3])
if lun not in used_list:
used_list.append(lun)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
while start < end:
if end - start + 1 > GETSTORAGEARRAY_ONCE:
cnt = GETSTORAGEARRAY_ONCE
else:
cnt = end - start + 1
opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
ldev_num = None
for line in lines:
if re.match("LDEV :", line):
ldev_num = int(shlex.split(line)[2])
continue
if re.match("VOL_TYPE : NOT DEFINED", line):
return ldev_num
start += GETSTORAGEARRAY_ONCE
else:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
def get_hgname_gid(self, port, host_grp_name):
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[2] == host_grp_name:
return int(line[1])
return None
def get_unused_gid(self, range, port):
_min = range[0]
_max = range[1]
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
free_gid = None
for line in lines[_min + 1:]:
line = shlex.split(line)
if int(line[1]) > _max:
break
if line[2] == '-':
free_gid = int(line[1])
break
if free_gid is None:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
target_wwns = {}
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
target_wwns[port] = line[10]
LOG.debug('target wwns: %s' % target_wwns)
return target_wwns
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
opt = 'get host_grp -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
found_wwns = 0
for line in lines[1:]:
line = shlex.split(line)
if not re.match(basic_lib.NAME_PREFIX, line[2]):
continue
gid = line[1]
opt = 'get hba_wwn -port %s-%s' % (port, gid)
ret, stdout, stderr = self.exec_raidcom(
'raidcom', opt, printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
hba_info = shlex.split(line)
if hba_info[3] in wwns:
hostgroups.append({'port': six.text_type(port),
'gid': int(hba_info[1]),
'initiator_wwn': hba_info[3],
'detected': is_detected})
found_wwns += 1
if len(wwns) == found_wwns:
break
if len(wwns) == found_wwns:
break
def comm_chk_login_wwn(self, wwns, port):
opt = 'get port -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
login_info = shlex.split(line)
if login_info[1] in wwns:
return True
else:
return False
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
security_ports = []
hostgroups = []
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
security = True if line[7] == 'Y' else False
is_detected = None
if login:
is_detected = self.comm_chk_login_wwn(wwns, port)
if security:
self.comm_get_hbawwn(hostgroups, wwns, port, is_detected)
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def _get_lun(self, port, gid, ldev):
lun = None
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[5] == six.text_type(ldev):
lun = int(line[3])
break
return lun
def _wait_for_delete_lun(self, hostgroup, ldev, start):
opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'],
hostgroup['gid'], ldev)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
raise loopingcall.LoopingCallDone()
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
not self.comm_get_snapshot(ldev) or
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
msg = basic_lib.set_msg(310, ldev=ldev, reason=stderr)
LOG.warning(msg)
if time.time() - start >= LUN_DELETE_WAITTIME:
msg = basic_lib.output_err(
637, method='_wait_for_delete_lun',
timeout=LUN_DELETE_WAITTIME)
raise exception.HBSDError(message=msg)
else:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_lun_core(self, hostgroup, ldev):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_delete_lun, hostgroup, ldev, time.time())
loop.start(interval=LUN_DELETE_INTERVAL).wait()
@storage_synchronized
def comm_delete_lun(self, hostgroups, ldev):
try:
deleted_hostgroups = []
self.comm_lock()
no_ldev_cnt = 0
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
try:
self.comm_delete_lun_core(hostgroup, ldev)
except exception.HBSDCmdError as ex:
no_ldev_cnt += 1
if ex.ret == EX_ENOOBJ:
if no_ldev_cnt != len(hostgroups):
continue
raise exception.HBSDNotFound
else:
raise
deleted_hostgroups.append({'port': port, 'gid': gid})
finally:
self.comm_unlock()
def _check_ldev_status(self, ldev, status):
opt = ('get ldev -ldev_id %s -check_status %s -time %s' %
(ldev, status, LDEV_STATUS_WAITTIME))
ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt)
return ret
@storage_synchronized
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
emulation = 'OPEN-V'
if is_vvol:
opt = ('add ldev -pool snap -ldev_id %d '
'-capacity %dG -emulation %s'
% (ldev, capacity, emulation))
else:
opt = ('add ldev -pool %d -ldev_id %d '
'-capacity %dG -emulation %s'
% (pool_id, ldev, capacity, emulation))
try:
self.comm_lock()
self.comm_reset_status()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr):
raise exception.HBSDNotFound
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
if self._check_ldev_status(ldev, "NML"):
msg = basic_lib.output_err(653, ldev=ldev)
raise exception.HBSDError(message=msg)
finally:
self.comm_unlock()
@storage_synchronized
def comm_add_hostgrp(self, port, gid, host_grp_name):
opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid,
host_grp_name)
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr):
raise exception.HBSDNotFound
else:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(
message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_del_hostgrp(self, port, gid, host_grp_name):
opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name)
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_add_hbawwn(self, port, gid, wwn):
opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn)
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False):
tmp_hostgroups = hostgroups[:]
is_ok = False
used_list = []
lun = None
old_lun = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
self.add_used_hlun(port, gid, used_list)
lun = self._get_lun(port, gid, ldev)
# When 'lun' or 'old_lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
if old_lun is not None and old_lun != lun:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = lun
tmp_hostgroups.remove(hostgroup)
old_lun = lun
if is_once:
# When 'lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
return
elif len(used_list) < MAX_HLUN + 1:
break
else:
tmp_hostgroups.remove(hostgroup)
if tmp_hostgroups:
used_list = []
if not used_list:
lun = 0
elif lun is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
lun = i
break
else:
raise exception.HBSDNotFound
opt = None
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
try:
self.comm_lock()
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % (
port, gid, ldev, lun)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
is_ok = True
hostgroup['lun'] = lun
if is_once:
break
else:
msg = basic_lib.set_msg(
314, ldev=ldev, lun=lun, port=port, id=gid)
LOG.warning(msg)
finally:
self.comm_unlock()
if not is_ok:
if stderr:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
@storage_synchronized
def comm_delete_ldev(self, ldev, is_vvol):
ret = -1
stdout = ""
stderr = ""
try:
self.comm_lock()
self.comm_reset_status()
opt = 'delete ldev -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INVALID_LUN_SSB, stderr):
raise exception.HBSDNotFound
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
ret, stdout, stderr = self.comm_get_status()
if ret or self.get_command_error(stdout):
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_extend_ldev(self, ldev, old_size, new_size):
ret = -1
stdout = ""
stderr = ""
extend_size = new_size - old_size
try:
self.comm_lock()
self.comm_reset_status()
opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
ret, stdout, stderr = self.comm_get_status()
if ret or self.get_command_error(stdout):
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
def comm_get_dp_pool(self, pool_id):
opt = 'get dp_pool'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
if int(shlex.split(line)[0]) == pool_id:
free_gb = int(shlex.split(line)[3]) / 1024
total_gb = int(shlex.split(line)[4]) / 1024
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
@storage_synchronized
def comm_modify_ldev(self, ldev):
args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.set_msg(315, ldev=ldev, reason=stderr)
LOG.warning(msg)
finally:
self.comm_unlock()
def is_detected(self, port, wwn):
return self.comm_chk_login_wwn([wwn], port)
def discard_zero_page(self, ldev):
try:
self.comm_modify_ldev(ldev)
except Exception as e:
LOG.warning(_LW('Failed to discard zero page: %s') %
six.text_type(e))
@storage_synchronized
def comm_add_snapshot(self, pvol, svol):
pool = self.conf.hitachi_thin_pool_id
copy_size = self.conf.hitachi_copy_speed
args = ('add snapshot -ldev_id %d %d -pool %d '
'-snapshot_name %s -copy_size %d'
% (pvol, svol, pool, SNAP_NAME, copy_size))
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_delete_snapshot(self, ldev):
args = 'delete snapshot -ldev_id %d' % ldev
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
@storage_synchronized
def comm_modify_snapshot(self, ldev, op):
args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op))
try:
self.comm_lock()
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
finally:
self.comm_unlock()
def _wait_for_snap_status(self, pvol, svol, status, timeout, start):
if (self.get_snap_pvol_status(pvol, svol) in status and
self.get_snap_svol_status(svol) in status):
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_snap_status', timuout=timeout)
raise exception.HBSDError(message=msg)
def wait_snap(self, pvol, svol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_snap_status, pvol,
svol, status, timeout, time.time())
loop.start(interval=interval).wait()
def comm_get_snapshot(self, ldev):
args = 'get snapshot -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def check_snap_count(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return
lines = stdout.splitlines()
if len(lines) >= MAX_SNAPSHOT_COUNT + 1:
msg = basic_lib.output_err(
615, copy_method=basic_lib.THIN, pvol=ldev)
raise exception.HBSDBusy(message=msg)
def get_snap_pvol_status(self, pvol, svol):
stdout = self.comm_get_snapshot(pvol)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[6]) == svol:
return STATUS_TABLE[line[2]]
else:
return basic_lib.SMPL
def get_snap_svol_status(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
line = shlex.split(lines[1])
return STATUS_TABLE[line[2]]
@horcm_synchronized
def create_horcmconf(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
serial = self.conf.hitachi_serial_number
filename = '/etc/horcm%d.conf' % inst
port = DEFAULT_PORT_BASE + inst
found = False
if not os.path.exists(filename):
file_str = """
HORCM_MON
#ip_address service poll(10ms) timeout(10ms)
127.0.0.1 %16d 6000 3000
HORCM_CMD
""" % port
else:
file_str = utils.read_file_as_root(filename)
lines = file_str.splitlines()
for line in lines:
if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line):
found = True
break
if not found:
insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial
file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)',
r'\1\n%s\n' % insert_str, file_str)
try:
utils.execute('tee', filename, process_input=file_str,
run_as_root=True)
except putils.ProcessExecutionError as ex:
msg = basic_lib.output_err(
632, file=filename, ret=ex.exit_code, err=ex.stderr)
raise exception.HBSDError(message=msg)
def comm_get_copy_grp(self):
ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp',
printflag=False)
if ret:
opt = 'raidcom get copy_grp'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun):
args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d'
% (copy_group, pvol_group, svol_group, mun))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_copy_grp(self, copy_group):
args = 'delete copy_grp -copy_grp_name %s' % copy_group
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_device_grp(self, group_name):
args = 'get device_grp -device_grp_name %s' % group_name
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_device_grp(self, group_name, ldev_name, ldev):
args = ('add device_grp -device_grp_name %s %s -ldev_id %d'
% (group_name, ldev_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_device_grp(self, group_name, ldev):
args = ('delete device_grp -device_grp_name %s -ldev_id %d'
% (group_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_paircreate(self, copy_group, ldev_name):
args = ('-g %s -d %s -split -fq quick -c %d -vl'
% (copy_group, ldev_name, self.conf.hitachi_copy_speed))
ret, stdout, stderr = self.exec_raidcom('paircreate', args)
if ret:
opt = 'paircreate %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairsplit(self, copy_group, ldev_name):
args = '-g %s -d %s -S' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairsplit', args)
if ret:
opt = 'pairsplit %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairevtwait(self, copy_group, ldev_name, check_svol):
if not check_svol:
option = '-nowait'
else:
option = '-nowaits'
args = '-g %s -d %s %s' % (copy_group, ldev_name, option)
ret, stdout, stderr = self.exec_raidcom('pairevtwait', args,
printflag=False)
if ret > 127:
opt = 'pairevtwait %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_pairdisplay(self, copy_group, ldev_name=None):
if not ldev_name:
args = '-g %s -CLI' % copy_group
else:
args = '-g %s -d %s -CLI' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairdisplay', args,
printflag=False)
if ret and ret not in NO_SUCH_DEVICE:
opt = 'pairdisplay %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret, stdout, stderr
def check_copy_grp(self, copy_group):
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
count = 0
for line in lines[1:]:
line = shlex.split(line)
if line[0] == copy_group:
count += 1
if count == 2:
break
return count
def check_device_grp(self, group_name, ldev, ldev_name=None):
stdout = self.comm_get_device_grp(group_name)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[2]) == ldev:
if not ldev_name:
return True
else:
return line[1] == ldev_name
else:
return False
def is_smpl(self, copy_group, ldev_name):
ret, stdout, stderr = self.comm_pairdisplay(copy_group,
ldev_name=ldev_name)
if not stdout:
return True
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] in [NOT_SET, 'SMPL']:
return True
else:
return False
def get_copy_groups(self):
copy_groups = []
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[0] in self.copy_groups and line[0] not in copy_groups:
copy_groups.append(line[0])
return copy_groups
def get_matched_copy_group(self, pvol, svol, ldev_name):
for copy_group in self.get_copy_groups():
pvol_group = '%sP' % copy_group
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
return copy_group
else:
return None
def get_paired_info(self, ldev, only_flag=False):
paired_info = {'pvol': None, 'svol': []}
pvol = None
is_svol = False
stdout = self.comm_get_snapshot(ldev)
if stdout:
lines = stdout.splitlines()
line = shlex.split(lines[1])
status = STATUS_TABLE.get(line[2], basic_lib.UNKN)
if line[1] == 'P-VOL':
pvol = ldev
svol = int(line[6])
else:
is_svol = True
pvol = int(line[6])
svol = ldev
if status == basic_lib.PSUS:
status = self.get_snap_pvol_status(pvol, svol)
svol_info = {'lun': svol, 'status': status, 'is_vvol': True}
paired_info['svol'].append(svol_info)
paired_info['pvol'] = pvol
if only_flag or is_svol:
return paired_info
for copy_group in self.get_copy_groups():
ldev_name = None
pvol_status = basic_lib.UNKN
svol_status = basic_lib.UNKN
ret, stdout, stderr = self.comm_pairdisplay(copy_group)
if not stdout:
continue
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] not in ['P-VOL', 'S-VOL']:
continue
ldev0 = int(line[8])
ldev1 = int(line[12])
if ldev not in [ldev0, ldev1]:
continue
ldev_name = line[1]
if line[9] == 'P-VOL':
pvol = ldev0
svol = ldev1
pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
else:
svol = ldev0
pvol = ldev1
svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
if svol == ldev:
is_svol = True
if not ldev_name:
continue
pvol_group = '%sP' % copy_group
pvol_ok = self.check_device_grp(pvol_group, pvol,
ldev_name=ldev_name)
svol_group = '%sS' % copy_group
svol_ok = self.check_device_grp(svol_group, svol,
ldev_name=ldev_name)
if pvol_ok and svol_ok:
if pvol_status == basic_lib.PSUS:
status = svol_status
else:
status = pvol_status
svol_info = {'lun': svol, 'status': status, 'is_vvol': False}
paired_info['svol'].append(svol_info)
if is_svol:
break
# When 'pvol' is 0, it should be true.
# So, it cannot remove 'is not None'.
if pvol is not None and paired_info['pvol'] is None:
paired_info['pvol'] = pvol
return paired_info
@storage_synchronized
def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
try:
self.comm_lock()
self.comm_add_device_grp(pvol_group, ldev_name, pvol)
self.comm_add_device_grp(svol_group, ldev_name, svol)
nr_copy_groups = self.check_copy_grp(copy_group)
if nr_copy_groups == 1:
self.comm_delete_copy_grp(copy_group)
if nr_copy_groups != 2:
self.comm_add_copy_grp(copy_group, pvol_group,
svol_group, mun)
finally:
self.comm_unlock()
@storage_synchronized
def delete_pair_config(self, pvol, svol, copy_group, ldev_name):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
try:
self.comm_lock()
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
self.comm_delete_device_grp(pvol_group, pvol)
if self.check_device_grp(svol_group, svol, ldev_name=ldev_name):
self.comm_delete_device_grp(svol_group, svol)
finally:
self.comm_unlock()
def _wait_for_pair_status(self, copy_group, ldev_name,
status, timeout, check_svol, start):
if self.comm_pairevtwait(copy_group, ldev_name,
check_svol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timout=timeout)
raise exception.HBSDError(message=msg)
def wait_pair(self, copy_group, ldev_name, status, timeout,
interval, check_svol=False):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, copy_group, ldev_name,
status, timeout, check_svol, time.time())
loop.start(interval=interval).wait()
def comm_create_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_copy_check_interval
if not is_vvol:
restart = False
create = False
ldev_name = LDEV_NAME % (pvol, svol)
mun = 0
for mun in range(MAX_MUNS):
copy_group = self.copy_groups[mun]
pvol_group = '%sP' % copy_group
if not self.check_device_grp(pvol_group, pvol):
break
else:
msg = basic_lib.output_err(
615, copy_method=basic_lib.FULL, pvol=pvol)
raise exception.HBSDBusy(message=msg)
try:
self.add_pair_config(pvol, svol, copy_group, ldev_name, mun)
self.restart_pair_horcm()
restart = True
self.comm_paircreate(copy_group, ldev_name)
create = True
self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS],
timeout, interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS, basic_lib.COPY],
timeout, interval, check_svol=True)
except Exception:
with excutils.save_and_reraise_exception():
if create:
try:
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(
copy_group, ldev_name,
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
if self.is_smpl(copy_group, ldev_name):
try:
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
LOG.warning(_LW('Failed to restart horcm: %s') %
six.text_type(ex))
else:
self.check_snap_count(pvol)
self.comm_add_snapshot(pvol, svol)
try:
self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval)
self.comm_modify_snapshot(svol, 'create')
self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.comm_delete_snapshot(svol)
self.wait_snap(
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s') %
six.text_type(ex))
def delete_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_async_copy_check_interval
if not is_vvol:
ldev_name = LDEV_NAME % (pvol, svol)
copy_group = self.get_matched_copy_group(pvol, svol, ldev_name)
if not copy_group:
return
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL],
timeout, interval)
finally:
if self.is_smpl(copy_group, ldev_name):
self.delete_pair_config(pvol, svol, copy_group, ldev_name)
else:
self.comm_delete_snapshot(svol)
self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval)
def comm_raidqry(self):
ret, stdout, stderr = self.exec_command('raidqry', '-h')
if ret:
opt = 'raidqry -h'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def get_comm_version(self):
stdout = self.comm_raidqry()
lines = stdout.splitlines()
return shlex.split(lines[1])[1]
def output_param_to_log(self, conf):
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info('\t%-35s%s' % (opt.name + ': ',
six.text_type(value)))
def create_lock_file(self):
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
serial = self.conf.hitachi_serial_number
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst)
horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst)
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
basic_lib.create_empty_file(raidcom_lock_file)
basic_lib.create_empty_file(raidcom_pair_lock_file)
basic_lib.create_empty_file(horcmgr_lock_file)
basic_lib.create_empty_file(resource_lock_file)
def connect_storage(self):
properties = utils.brick_get_connector_properties()
self.setup_horcmgr(properties['ip'])
def get_max_hostgroups(self):
"""return the maximum value of hostgroup id."""
return MAX_HOSTGROUPS
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun(port, gid, list)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'serial_number'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_serial_number:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
stdout = self.comm_get_ldev(ldev)
if not stdout:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
sts_line = vol_type = ""
vol_attrs = []
size = num_port = 1
lines = stdout.splitlines()
for line in lines:
if line.startswith("STS :"):
sts_line = line
elif line.startswith("VOL_TYPE :"):
vol_type = shlex.split(line)[2]
elif line.startswith("VOL_ATTR :"):
vol_attrs = shlex.split(line)[2:]
elif line.startswith("VOL_Capacity(BLK) :"):
size = int(shlex.split(line)[2])
elif line.startswith("NUM_PORT :"):
num_port = int(shlex.split(line)[2])
if 'NML' not in sts_line:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
if 'OPEN-V' not in vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
if 'HDP' not in vol_attrs:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
for vol_attr in vol_attrs:
if vol_attr == ':':
continue
if vol_attr in PAIR_TYPE:
msg = basic_lib.output_err(705, ldev=ldev)
raise exception.HBSDError(data=msg)
if vol_attr not in PERMITTED_TYPE:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Nov 24, 2015
@author: Jong Suk Kim
comments: Interface for Dymola Simulation
Modelica is an object-oriented, equation-based language to conveniently model complex physical systems containing,
e.g., mechanical, electrical, electronic, hydraulic, thermal, control, electric power or process-oriented subcomponents.
Modelica models (with a file extension of .mo) are built, translated (compiled), and simulated in Dymola (http://www.modelon.com/products/dymola/),
which is a commercial modeling and simulation environment based on the Modelica modeling language.
This module provides an interface that allows RAVEN to utilize Modelica models built using Dymola.
General flow:
A Modelica model is built and implemented in Dymola. For example (BouncingBall.mo):
--- BEGIN MODEL FILE ---
model BouncingBall
parameter Real e=0.7 "coefficient of restitution";
parameter Real g=9.81 "gravity acceleration";
parameter Real hstart = 10 "height of ball at time zero";
parameter Real vstart = 0 "velocity of ball at time zero";
Real h(start=hstart,fixed=true) "height of ball";
Real v(start=vstart,fixed=true) "velocity of ball";
Boolean flying(start=true) "true, if ball is flying";
Boolean impact;
Real v_new;
Integer foo;
equation
impact = h <= 0.0;
foo = if impact then 1 else 2;
der(v) = if flying then -g else 0;
der(h) = v;
when {h <= 0.0 and v <= 0.0,impact} then
v_new = if edge(impact) then -e*pre(v) else 0;
flying = v_new > 0;
reinit(v, v_new);
end when;
annotation (uses(Modelica(version="3.2.1")),
experiment(StopTime=10, Interval=0.1),
__Dymola_experimentSetupOutput);
end BouncingBall;
--- END MODEL FILE ---
When a modelica model, e.g., BouncingBall model, is implemented in Dymola, the platform dependent C-code from a Modelica model
and the corresponding executable code (i.e., by default dymosim.exe on the Windows operating system) are generated for simulation.
A separate TEXT file (by default dsin.txt) containing model parameters and initial conditions are also generated as part of the build process.
After the executable is generated, it may be run multiple times (with Dymola license). There are several ways to vary input parameters:
1) Modify the model file and re-build the simulation executable.
2) Change the value(s) in the 'text' input file generated as part of the model build process.
3) Use a completely different text input file for each run.
4) Use the Matlab script file (m-file) or Python Dymola Shell interface to manipulate (perturb) input parameters.
For RAVEN purposes, this interface code will use option (2). Variation of parameters may be done by editing the input
file (dsin.txt) and then re-running the model executable (by default dymosim.exe).
An executable (dymosim.exe) and a simulation initialization file (dsin.txt) can be generated after either translating or simulating the Modelica
model (BouncingBall.mo) using the Dymola Graphical User Interface (GUI) or Dymola Application Programming Interface (API)-routines.
The output of the model is a binary file 'BouncingBall.mat' if the simulation is run in Dymola GUI or by using Dymola API-routines.
If the generated executable code is trigged directly from a command prompt, the output file is always named as 'dsres.mat'.
To change the initial height of the bouncing ball to 5.0 in the above model, we need to read and modify its value
from the 'dsin.txt,' and write it back to a different input file, e.g., DMdsin.txt. This .txt file can then be used
to re-run the simulation.
The default .mat output type needs to be converted to human-readable forms, i.e., .csv output. Note that the Python
interface that comes with the Dymola distribution cannot be used, especially when running the model with RAVEN on
cluster, as the Python interface is currently only supported on Windows.
To use RAVEN, we need to be able to perturb the input and output files from the defaults. The command line form
of this is:
<executable> -s <dsin file text> <outputfile>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import math
import scipy.io
import csv
import re
import copy
import numpy
import pandas as pd
from CodeInterfaceBaseClass import CodeInterfaceBase
from utils import mathUtils
class Dymola(CodeInterfaceBase):
"""
Provides code to interface RAVEN to Dymola
"""
def __init__(self):
"""
Initializes the GenericCode Interface.
@ In, None
@ Out, None
"""
CodeInterfaceBase.__init__(self)
self.variablesToLoad = [] # the variables that should be loaded from the mat file (by default, all of them)
def _readMoreXML(self,xmlNode):
"""
Function to read the portion of the xml input that belongs to this specialized class and initialize
some members based on inputs. This can be overloaded in specialize code interface in order to
read specific flags.
Only one option is possible. You can choose here, if multi-deck mode is activated, from which deck you want to load the results
@ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
@ Out, None.
"""
child = xmlNode.find("outputVariablesToLoad")
if child is not None:
self.variablesToLoad = [var.strip() for var in child.text.split()]
# Generate the command to run Dymola. The form of the command is:
#
# <executable> -s <dsin file txt> <outputfile>
#
# where:
# <executable> The executable generated from the Modelica model file (.mo extension)
# <dsin file txt> Text file containing the initial model parameters (as well as the start
# values of variables. We will perturb this from the one originally
# generated as part of the model build process, which is called dsin.txt.
# <outputfile> The simulation output, which is .mat file.
def generateCommand(self, inputFiles, executable, clargs=None, fargs=None, preExec=None):
"""
See base class. Collects all the clargs and the executable to produce the command-line call.
Returns tuple of commands and base file name for run.
Commands are a list of tuples, indicating parallel/serial and the execution command to use.
@ In, inputFiles, list, List of input files (lenght of the list depends on the number of inputs have been added in the Step is running this code)
@ In, executable, string, executable name with absolute path (e.g. /home/path_to_executable/code.exe)
@ In, clargs, dict, optional, dictionary containing the command-line flags the user can specify in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 i0extension =0 .inp0/ >< /Code >)
@ In, fargs, dict, optional, a dictionary containing the axuiliary input file variables the user can specify in the input (e.g. under the node < Code >< clargstype =0 input0arg =0 aux0extension =0 .aux0/ >< /Code >)
@ In, preExec, string, optional, a string the command that needs to be pre-executed before the actual command here defined
@ Out, returnCommand, tuple, tuple containing the generated command. returnCommand[0] is the command to run the code (string), returnCommand[1] is the name of the output root
"""
# Find the file in the inputFiles that has the type "DymolaInitialisation", which is what we need to work with.
foundInit = False
for index, inputFile in enumerate(inputFiles):
if inputFile.getType() == "DymolaInitialisation":
foundInit = True
indexInit = index
if not foundInit:
raise Exception('Dymola INTERFACE ERROR -> None of the input files has the type "DymolaInitialisation"!')
# Build an output file name of the form: rawout~<Base Name>, where base name is generated from the
# input file passed in: /path/to/file/<Base Name>.ext. 'rawout' indicates that this is the direct
# output from running the Dymola executable.
outputfile = 'rawout~' + inputFiles[indexInit].getBase()
executeCommand = [('parallel', executable +' -s '+ inputFiles[indexInit].getFilename() +' '+ outputfile+ '.mat')]
returnCommand = executeCommand, outputfile
return returnCommand
def getInputExtension(self):
"""
Return a tuple of possible file extensions for a simulation initialization file (i.e., dsin.txt).
@ In, None
@ Out, validExtensions, tuple, tuple of valid extensions
"""
validExtensions = ('txt', 'TXT')
return validExtensions
def createNewInput(self, currentInputFiles, oriInputFiles, samplerType, **Kwargs):
"""
Generate a new Dymola input file (txt format) from the original, changing parameters
as specified in Kwargs['SampledVars']. In addition, it creaes an additional input file including the vector data to be
passed to Dymola.
@ In, currentInputFiles, list, list of current input files (input files from last this method call)
@ In, oriInputFiles, list, list of the original input files
@ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section)
@ In, Kwargs, dictionary, kwarded dictionary of parameters. In this dictionary there is another dictionary called "SampledVars"
where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40})
@ Out, newInputFiles, list, list of newer input files, list of the new input files (modified and not)
"""
# Start with the original input file, which we have to find first.
# The types have to be "DymolaInitialisation" and "DymolaVectors"
foundInit = False
foundVect = False
for index, inputFile in enumerate(oriInputFiles):
if inputFile.getType() == "DymolaInitialisation":
foundInit = True
indexInit = index
if inputFile.getType() == "DymolaVectors":
foundVect = True
indexVect = index
if not foundInit:
raise Exception('Dymola INTERFACE ERROR -> None of the input files has the type "DymolaInitialisation"!')
if not foundVect:
print('Dymola INTERFACE WARNING -> None of the input files has the type "DymolaVectors"! ')
# Figure out the new file name and put it into the proper place in the return list
#newInputFiles = copy.deepcopy(currentInputFiles)
originalPath = oriInputFiles[indexInit].getAbsFile()
#newPath = os.path.join(os.path.split(originalPath)[0], "DM" + Kwargs['prefix'] + os.path.split(originalPath)[1])
#currentInputFiles[index].setAbsFile(newPath)
# Define dictionary of parameters and pre-process the values.
# Each key is a parameter name (including the full model path in Modelica_ dot notation) and
# each entry is a parameter value. The parameter name includes array indices (if any) in
# Modelica_ representation (1-based indexing). The values must be representable as scalar
# numbers (integer or floating point). *True* and *False* (not 'true' and 'false') are
# automatically mapped to 1 and 0. Enumerations must be given explicitly as the unsigned integer
# equivalent. Strings, functions, redeclarations, etc. are not supported.
varDict = Kwargs['SampledVars']
vectorsToPass= {}
for key, value in list(varDict.items()):
if isinstance(value, bool):
varDict[key] = 1 if value else 0
if isinstance(value, numpy.ndarray):
# print warning here (no access to RAVEN Message Handler)
print("Dymola INTERFACE WARNING -> Dymola interface found vector data to be passed. If %s" %key)
print(" is supposed to go into the simulation initialisation file of type")
print(" 'DymolaInitialisation' the array must be split into scalars.")
print(" => It is assumed that the array goes into the input file with type 'DymolaVectors'")
if not foundVect:
raise Exception('Dymola INTERFACE ERROR -> None of the input files has the type "DymolaVectors"! ')
# extract dict entry
vectorsToPass[key] = varDict.pop(key)
assert not type(value).__name__ in ['str','bytes','unicode'], ("Strings cannot be "
"used as values in the simulation initialization file.")
# create aditional input file for vectors if needed
if bool(vectorsToPass):
with open(currentInputFiles[indexVect].getAbsFile(), 'w') as Fvect:
Fvect.write("#1\n")
for key, value in sorted(vectorsToPass.items()) :
inc = 0
Fvect.write("double %s(%s,2) #Comments here\n" %(key, len(value)))
for val in value:
Fvect.write("%s\t%s\n" %(inc,val))
inc += 1
# Do the search and replace in input file "DymolaInitialisation"
# Aliases for some regular sub-expressions.
u = '\\d+' # Unsigned integer
i = '[+-]?' + u # Integer
f = i + '(?:\\.' + u + ')?(?:[Ee][+-]' + u + ')?' # Floating point number
# Possible regular expressions for a parameter specification (with '%s' for
# the parameter name)
patterns = [# Dymola 1- or 2-line parameter specification
(r'(^\s*%s\s+)%s(\s+%s\s+%s\s+%s\s+%s\s*#\s*%s\s*$)'
% (i, f, f, f, u, u, '%s')),
(r'(^\s*)' + i + r'(\s*#\s*%s)'),
(r'(^\s*)' + f + r'(\s*#\s*%s)'),
# From Dymola:
# column 1: Type of initial value
# = -2: special case: for continuing simulation
# (column 2 = value)
# = -1: fixed value (column 2 = fixed value)
# = 0: free value, i.e., no restriction
# (column 2 = initial value)
# > 0: desired value (column 1 = weight for
# optimization
# column 2 = desired value)
# use weight=1, since automatic scaling usually
# leads to equally weighted terms
# column 2: fixed, free or desired value according to column 1.
# column 3: Minimum value (ignored, if Minimum >= Maximum).
# column 4: Maximum value (ignored, if Minimum >= Maximum).
# Minimum and maximum restrict the search range in
# initial value calculation. They might also be used
# for scaling.
# column 5: Category of variable.
# = 1: parameter.
# = 2: state.
# = 3: state derivative.
# = 4: output.
# = 5: input.
# = 6: auxiliary variable.
# column 6: Data type of variable.
# = 0: real.
# = 1: boolean.
# = 2: integer.
]
# These are tried in order until there is a match. The first group or pair
# of parentheses contains the text before the parameter value and the second
# contains the text after it (minus one space on both sides for clarity).
# Read the file.
with open(originalPath, 'r') as src:
text = src.read()
# Set the parameters.
for name, value in varDict.items():
# skip in the special key for the index mapper
if name == '_indexMap':
continue
namere = re.escape(name) # Escape the dots, square brackets, etc.
for pattern in patterns:
text, n = re.subn(pattern % namere, r'\g<1>%s\2' % value, text, 1,
re.MULTILINE)
if n == 1:
break
else:
raise AssertionError(
"Parameter %s does not exist or is not formatted as expected "
"in %s." % (name, originalPath))
# Re-write the file.
with open(currentInputFiles[indexInit].getAbsFile(), 'w') as src:
src.write(text)
return currentInputFiles
def checkForOutputFailure(self, output, workingDir):
"""
Sometimes (e.g. when the license file is missing) the command returns 0 despite failing.
Check for creation of the "success" file as a determination of success for Dymola runs.
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, failure, bool, True if the job is failed, False otherwise
"""
try:
open(os.path.join(workingDir, 'success'), 'r')
except FileNotFoundError:
return True
return False
def finalizeCodeOutput(self, command, output, workingDir):
"""
Called by RAVEN to modify output files (if needed) so that they are in a proper form.
In this case, the default .mat output needs to be converted to .csv output, which is the
format that RAVEN can communicate with.
@ In, command, string, the command used to run the just ended job
@ In, output, string, the Output name root
@ In, workingDir, string, current working dir
@ Out, output, string, optional, present in case the root of the output file gets changed in this method.
"""
_vars = {}
_blocks = []
_namesData1 = []
_namesData2 = []
_timeSeriesData1 = []
_timeSeriesData2 = []
# Load the output file (.mat file) that have been generated by running Dymola executable and
# store the data in this file to variable 'mat'.
matSourceFileName = os.path.join(workingDir, output)
matSourceFileName += '.mat'
###################################################################
#FIXME: LOADMAT HAS A DIFFERENT BEHAVIOR IN SCIPY VERSION >= 0.18 #
#if int(scipy.__version__.split(".")[1])>17:
# warnings.warn("SCIPY version >0.17.xx has a different behavior in reading .mat files!")
mat = scipy.io.loadmat(matSourceFileName, chars_as_strings=False)
###################################################################
# Define the functions that extract strings from the matrix:
# - strMatNormal: for parallel string
# - strMatTrans: for vertical string
# These functions join the strings together, resulting in one string in each row, and remove
# trailing whitespace.
strMatNormal = lambda a: [''.join(s).rstrip() for s in a]
strMatTrans = lambda a: [''.join(s).rstrip() for s in zip(*a)]
# Define the function that returns '1.0' with the sign of 'x'
sign = lambda x: math.copysign(1.0, x)
# Check the structure of the output file.
try:
fileInfo = strMatNormal(mat['Aclass'])
except KeyError:
raise Exception('File structure not supported!')
# Check the version of the output file (version 1.1).
if fileInfo[1] == '1.1' and fileInfo[3] == 'binTrans':
names = strMatTrans(mat['name']) # names
descr = strMatTrans(mat['description']) # descriptions
for i in range(len(names)):
d = mat['dataInfo'][0][i] # data block
x = mat['dataInfo'][1][i] # column (original)
c = abs(x)-1 # column (reduced)
s = sign(x) # sign
if c:
_vars[names[i]] = (descr[i], d, c, float(s))
if not d in _blocks:
_blocks.append(d)
else:
_absc = (names[i], descr[i])
# Extract the trajectory for the variable 'Time' and store the data in the variable 'timeSteps'.
timeSteps = mat['data_2'][0]
# Compute the number of output points of trajectory (time series data).
numOutputPts = timeSteps.shape[0]
# Convert the variable type of 'timeSteps' from '1-d array' to '2-d array'.
timeStepsArray = numpy.array([timeSteps])
# Extract the names and output points of all variables and store them in the variables:
# - _namesData1: Names of parameters
# - _namesData2: Names of the variables that are not parameters
# - _timeSeriesData1: Trajectories (time series data) of '_namesData1'
# - _timeSeriesData2: Trajectories (time series data) of '_namesData2'
for (k,v) in _vars.items():
readIt = True
if len(self.variablesToLoad) > 0 and k not in self.variablesToLoad:
readIt = False
if readIt:
dataValue = mat['data_%d' % (v[1])][v[2]]
if v[3] < 0:
dataValue = dataValue * -1.0
if v[1] == 1:
_namesData1.append(k)
_timeSeriesData1.append(dataValue)
elif v[1] == 2:
_namesData2.append(k)
_timeSeriesData2.append(dataValue)
else:
raise Exception('File structure not supported!')
timeSeriesData1 = numpy.array(_timeSeriesData1)
timeSeriesData2 = numpy.array(_timeSeriesData2)
# The csv writer places quotes arround variables that contain a ',' in the name, i.e.
# a, "b,c", d would represent 3 variables 1) a 2) b,c 3) d. The csv reader in RAVEN does not
# suport this convention.
# => replace ',' in variable names with '@', i.e.
# a, "b,c", d will become a, b@c, d
for mylist in [_namesData1, _namesData2]:
for i in range(len(mylist)):
if ',' in mylist[i]:
mylist[i] = mylist[i].replace(',', '@')
# Recombine the names of the variables and insert the variable 'Time'.
# Order of the variable names should be 'Time', _namesData1, _namesData2.
# Also, convert the type of the resulting variable from 'list' to '2-d array'.
varNames = numpy.array([[_absc[0]] + _namesData1 + _namesData2])
# Compute the number of parameters.
sizeParams = timeSeriesData1.shape[0]
# Create a 2-d array whose size is 'the number of parameters' by 'number of ouput points of the trajectories'.
# Fill each row in a 2-d array with the parameter value.
Data1Array = numpy.full((sizeParams,numOutputPts),1.)
for n in range(sizeParams):
Data1Array[n,:] = timeSeriesData1[n,0]
# Create an array of trajectories, which are to be written to CSV file.
varTrajectories = numpy.matrix.transpose(numpy.concatenate((timeStepsArray,Data1Array,timeSeriesData2), axis=0))
# create output response dictionary
t = pd.Series(varTrajectories[:,0])
m = t.duplicated()
if len(t[m]):
# duplicated values
tIndex = None
iIndex = 1
for i in range(len(t[m])):
index = t[m].index[i]
if tIndex is None:
tIndex = t[index]
else:
if mathUtils.compareFloats(tIndex, t[index], tol=1.0E-15):
iIndex += 1
else:
iIndex = 1
tIndex = t[index]
t[index] = t[index] + numpy.finfo(float).eps*t[index]*iIndex
varTrajectories[:,0] = t.to_numpy()
response = {var:varTrajectories[:,i] for (i, var) in enumerate(varNames[0])}
else:
raise Exception('File structure not supported!')
#release memory
del _vars
del _blocks
del _namesData1
del _namesData2
del _timeSeriesData1
del _timeSeriesData2
del _absc
del Data1Array
del timeSeriesData1
del timeSeriesData2
return response
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
"""
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
tophub_context = self._tophub_context(target)
with tophub_context:
self._lower(mod, target, target_host)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : str, :any:`tvm.target.Target`, or dict of str (i.e.
device/context name) to str/tvm.target.Target, optional
target_host : str or :any:`tvm.target.Target`, optional
The compilation target for host.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
target = self._update_target(target)
target_host = self._update_target_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, target, target_host), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def _update_target(self, target):
"""Update target."""
target = target if target else tvm.target.Target.current()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
tgts = {}
if isinstance(target, (str, tvm.target.Target)):
dev_type = tvm.tir.IntImm("int32", tvm.nd.context(str(target)).device_type)
tgts[dev_type] = tvm.target.Target(target)
elif isinstance(target, dict):
for dev, tgt in target.items():
dev_type = tvm.tir.IntImm("int32", tvm.nd.context(dev).device_type)
tgts[dev_type] = tvm.target.Target(tgt)
else:
raise TypeError(
"target is expected to be str, tvm.target.Target, "
+ "or dict of str to str/tvm.target.Target, but received "
+ "{}".format(type(target))
)
return tgts
def _update_target_host(self, target, target_host):
"""Update target host."""
target_host = None if target_host == "" else target_host
if not target_host:
for device_type, tgt in target.items():
if device_type.value == tvm.nd.cpu(0).device_type:
target_host = tgt
break
if not target_host:
target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
return target_host
def _tophub_context(self, target):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context = autotvm.util.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
ctx : :py:class:`~tvmContext`
The runtime context to run the code on.
target : :py:class:`Target`
The target option to build the function.
"""
def __init__(self, mod, ctx, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.ctx = ctx
self.target = target
self.executable = compile(mod, target)
self.vm = vm_rt.VirtualMachine(self.executable, ctx)
def _make_executor(self, expr=None):
main = self.mod["main"]
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(main, args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
| |
from app import authentication
from app import google_storage
from app import models
from app import schemas
from app.models import db
from app.es.googledailyresource import GoogleDailyResource
from app.es.googlemetric import GoogleMetric
from app.gcloud.utils import get_credentials_from_identity
from collections import OrderedDict
import apiclient
import app
import config
import datetime
import flask
import httplib2
import oauth2client
import url
google_bp = flask.Blueprint('google_bp', __name__)
# Retrieve Google account userinfo.
def get_google_userinfo(http, google_oauth2_service=None):
if not google_oauth2_service:
google_oauth2_service = apiclient.discovery.build('oauth2', 'v2', http=http)
profile = google_oauth2_service.userinfo().get().execute()
return profile, google_oauth2_service
google_key_registration_flow = oauth2client.client.OAuth2WebServerFlow(scope='https://www.googleapis.com/auth/monitoring '
'https://www.googleapis.com/auth/monitoring.read '
'https://www.googleapis.com/auth/monitoring.write '
'https://www.googleapis.com/auth/cloud-platform.read-only '
'https://www.googleapis.com/auth/compute.readonly '
'https://www.googleapis.com/auth/userinfo.email '
'https://www.googleapis.com/auth/plus.login',
redirect_uri='https://' + config.HOST + config.OAUTH_URIS['key_registration_google_callback'],
**(config.GOOGLE_OAUTH))
google_key_registration_flow.params['access_type'] = 'offline'
google_key_registration_flow.params['prompt'] = 'consent'
@app.app.route('/gcloud/identity/initiate', methods=['GET'])
@authentication.with_login(True)
def initiate_auth_google_cloud_user(user):
"""---
get:
tags:
- gcloud
produces:
- application/json
description: |
Initiate flow to register Google Cloud account: remember the
attempt and return the corresponding URL which the client shall
follow.
This was built so to circumvent cross-site-request policies as the
API is served on a port different than that of the origin.
summary: Initiate flow to register Google Cloud account.
parameters: []
responses:
200:
description: The flow was initiated.
"""
token = models.GoogleCloudIdentityRegistrationToken.for_user(user)
db.session.add(token)
auth_uri = google_key_registration_flow.step1_get_authorize_url(state=token.id)
db.session.commit()
return flask.jsonify(uri=auth_uri), 200
@app.app.route('/gcloud/identity/callback', methods=['GET'])
def callback_auth_google_cloud_user():
"""---
get:
tags:
- gcloud
description: |
Callback for the Google Cloud account registration flow. Recall an
attempt and register the Google identity for the initiating user.
summary: Callback for the Google Cloud account registration flow.
response:
400:
description: The `state` field is missing.
403:
description: |
The `state` field is either expired or does not exist.
302:
description: |
The flow successfully concluded. The Google Cloud identity
has been added to the user's cloud service accounts.
"""
try:
state = flask.request.args.get('state')
if not state:
return flask.jsonify(error='No state.'), 400
token = models.GoogleCloudIdentityRegistrationToken.query.get(state)
if not token:
return flask.jsonify(error='Bad state.'), 403
elif token.has_expired():
db.session.delete(token)
db.session.commit()
return flask.jsonify(error='Expired state.'), 403
assert token.id_user
credentials = google_key_registration_flow.step2_exchange(flask.request.args.get('code'))
http = httplib2.Http()
http = credentials.authorize(http)
profile, _ = get_google_userinfo(http)
email = profile['email']
identity = models.GoogleCloudIdentity(id_user=token.id_user,
email=email,
credentials=credentials.to_json())
db.session.add(identity)
db.session.delete(token)
db.session.commit()
return flask.redirect('https://%s/#/app/keyselect' % (config.WEB_UI_HOST,), code=302)
except oauth2client.client.FlowExchangeError, e:
return flask.jsonify(error='Failed to negotiate API tokens.')
@app.app.route('/gcloud/identity', methods=['GET'])
@authentication.with_login(True)
def get_gcloud_identity_list(user):
"""---
get:
tags:
- gcloud
description: |
Retrieve all configured Google Cloud identities for the currently
authenticated user.
summary: Retrieve this user's Google Cloud identities.
produces:
- application/json
response:
403:
description: |
The user is not authenticated.
200:
description: |
List of Google Cloud identities.
"""
return flask.jsonify(identities=map(lambda i: schemas.google_cloud_identity_schema.dump(i).data, user.gcloud_identities))
@app.app.route('/gcloud/identity/<int:identity_id>', methods=['GET'])
@authentication.with_login(True)
def get_gcloud_identity(user, identity_id):
"""---
get:
tags:
- gcloud
description: |
Retrieve a configured Google Cloud identity which belongs to
the currently authenticated user by its ID.
summary: |
Retrieve a Google Cloud identity by ID.
produces:
- application/json
response:
200:
description: Google Cloud identity.
403:
description: The user is not authenticated.
404:
description: The user has no identity with this ID.
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
return flask.jsonify(schemas.google_cloud_identity_schema.dump(identity).data)
else:
return flask.jsonify(error='No such identity.'), 404
@app.app.route('/gcloud/identity/<int:identity_id>', methods=['DELETE'])
@authentication.with_login(True)
def delete_gcloud_identity(user, identity_id):
"""
delete:
tags:
- gcloud
description: |
Deletes one of the user's Google Cloud identities by its ID. Note
that this will recursively delete all associated projects, buckets,
records and measurements.
summary: Delete a Google Cloud identity.
produces:
- application/json
response:
200:
description: Success.
403:
description: The user is not authenticated.
404:
description: The user has no identity with this ID.
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
try:
credentials = get_credentials_from_identity(identity)
credentials.revoke(httplib2.Http())
except:
pass
db.session.delete(identity)
db.session.commit()
return flask.jsonify({})
@app.app.route('/gcloud/estimate', methods=['GET'])
def gcloud_estimate():
"""---
get:
tags:
- gcloud
description: |
Compute an estimate of the user's spendings on Google Cloud Storage
depending on their needs in terms of services and volumes.
summary: |
Compute an estimate of the user's spednings on Google Cloud Storage.
produces:
- application/json
response:
200:
description: The estimate was computed.
"""
args = {}
for key in filter(lambda k: k in google_storage.fields, flask.request.args):
try:
args[key] = int(flask.request.args[key])
except:
args[key] = flask.request.args[key]
return flask.jsonify(google_storage.current_model(**args)), 200
@app.app.route('/gcloud/identity/<int:identity_id>/stats/dailycostbyproduct', methods=['GET'])
@authentication.with_login(True)
def gcloud_identity_daily_cost_by_product(user, identity_id):
"""---
get:
tags:
- gcloud
description: |
Get daily costs summed by product
summary: Get daily costs summed by product
produces:
- application/json
response:
200:
description: Success.
404:
description: The user has no identity with this ID.
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
res = GoogleDailyResource.daily_cost_by_product(identity.email)
res['days'] = res['days'][-3:]
return flask.jsonify(res)
else:
return flask.jsonify(error='No such identity.'), 404
@app.app.route('/gcloud/identity/<int:identity_id>/stats/monthcostbyproduct', methods=['GET'])
@authentication.with_login(True)
def gcloud_identity_month_cost_by_product(user, identity_id):
"""---
get:
tags:
- gcloud
description: |
Get last month's cost by product
summary: Get last month's cost by product
produces:
- application/json
response:
200:
description: Success.
404:
description: The user has no identity with this ID.
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
return flask.jsonify(GoogleDailyResource.month_cost_by_product(identity.email))
else:
return flask.jsonify(error='No such identity.'), 404
@app.app.route('/gcloud/identity/<int:identity_id>/stats/costbyresource', methods=['GET'])
@authentication.with_login(True)
def gcloud_identity_cost_by_resource(user, identity_id):
"""---
get:
tags:
- gcloud
description: |
Retrieve the monthly cost by resource aggregated by price category
summary: Get monthly cost by resource
produces:
- application/json
response:
200:
description: Success.
404:
description: The user has no identity with this ID.
"""
cost_categories = [1, 10, 100, 1000, 10000, 100000, 1000000]
def get_max_cost(month):
max_cost = 0
for resource in month['resources']:
if resource['cost'] > max_cost:
max_cost = resource['cost']
return max_cost
def get_max_cost_category(max_cost):
for category in reversed(cost_categories):
if max_cost >= category:
return category
return 1
def get_categories_dict(max_cost):
categories_dict = OrderedDict()
if max_cost < 1:
categories_dict['<1'] = dict(resources=[], total=0)
return categories_dict
for category in cost_categories:
categories_dict['<'+str(category)] = dict(resources=[], total=0)
if category == max_cost_category:
categories_dict['>'+str(category)] = dict(resources=[], total=0)
break
return categories_dict
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
res = []
monthly_cost_by_resource = GoogleDailyResource.monthly_aggregates_resource(identity.email)
for month in monthly_cost_by_resource['months']:
max_cost = get_max_cost(month)
max_cost_category = get_max_cost_category(max_cost)
month_aggregate = {
'month': month['month'],
'categories': get_categories_dict(max_cost),
'total': 0
}
month_aggregate['category_list'] = month_aggregate['categories'].keys()
for resource in month['resources']:
if resource['cost'] < 0:
continue
elif resource['cost'] >= max_cost_category:
month_aggregate['categories']['>'+str(max_cost_category)]['resources'].append(resource)
month_aggregate['categories']['>'+str(max_cost_category)]['total'] += resource['cost']
else:
for cost_category in cost_categories:
if resource['cost'] < cost_category:
month_aggregate['categories']['<'+str(cost_category)]['resources'].append(resource)
month_aggregate['categories']['<'+str(cost_category)]['total'] += resource['cost']
break
month_aggregate['total'] += resource['cost']
res.append(month_aggregate)
return flask.jsonify(months=res)
else:
return flask.jsonify(error='No such identity.'), 404
@app.app.route('/gcloud/identity/<int:identity_id>/stats/costbyproject', methods=['GET'])
@authentication.with_login(True)
def gcloud_identity_cost_by_project(user, identity_id):
"""---
get:
tags:
- gcloud
description: |
Retrieve the monthly cost by project
summary: Get monthly cost by project
produces:
- application/json
response:
200:
description: Success.
404:
description: The user has no identity with this ID.
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
monthly_cost_by_project = GoogleDailyResource.monthly_aggregates_project(identity.email)
for month in monthly_cost_by_project['months']:
total = 0
for project in month['projects']:
total = total + project['cost']
month.update(dict(total=total))
return flask.jsonify(months=monthly_cost_by_project)
else:
return flask.jsonify(error='No such identity.'), 404
@app.app.route('/gcloud/identity/<int:identity_id>/stats/usagecost')
@authentication.with_login(True)
def gcloud_usage_cost(user, identity_id):
"""---
get:
tags:
- cloud
description: |
Retrieve the Google CPU usage VS cost
summary: Get the Google CPU usage VS cost
produces:
- application/json
responses:
200:
description: List of stats
schema:
properties:
days:
type: array
items:
properties:
day:
type: string
cpu:
type: number
cost:
type: number
404:
description: The user has no identity with this ID
"""
identity = models.GoogleCloudIdentity.query.filter_by(id_user=user.id, id=identity_id).first()
if identity:
cpu = dict(GoogleMetric.daily_cpu_utilization(identity.email))
cost = dict(GoogleDailyResource.daily_compute_cost(identity.email))
days = sorted(set(cpu.keys()) & set(cost.keys()))
daily = [dict(day=d, cpu=cpu[d] * 100, cost=cost[d]) for d in days]
return flask.jsonify(dict(days=daily))
else:
return flask.jsonify(error='No such identity.'), 404
| |
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from dipy.reconst.dti import fractional_anisotropy, color_fa
from scipy.ndimage.filters import median_filter
try:
from skimage.filters import threshold_otsu as otsu
except:
from .threshold import otsu
from scipy.ndimage import binary_dilation, generate_binary_structure
def multi_median(input, median_radius, numpass):
""" Applies median filter multiple times on input data.
Parameters
----------
input : ndarray
The input volume to apply filter on.
median_radius : int
Radius (in voxels) of the applied median filter
numpass: int
Number of pass of the median filter
Returns
-------
input : ndarray
Filtered input volume.
"""
outvol = np.zeros_like(input)
# Array representing the size of the median window in each dimension.
medarr = np.ones_like(input.shape) * ((median_radius * 2) + 1)
# Multi pass
for i in range(0, numpass):
median_filter(input, medarr, output=input)
return input
def applymask(vol, mask):
""" Mask vol with mask.
Parameters
----------
vol : ndarray
Array with $V$ dimensions
mask : ndarray
Binary mask. Has $M$ dimensions where $M <= V$. When $M < V$, we
append $V - M$ dimensions with axis length 1 to `mask` so that `mask`
will broadcast against `vol`. In the typical case `vol` can be 4D,
`mask` can be 3D, and we append a 1 to the mask shape which (via numpy
broadcasting) has the effect of appling the 3D mask to each 3D slice in
`vol` (``vol[..., 0]`` to ``vol[..., -1``).
Returns
-------
masked_vol : ndarray
`vol` multiplied by `mask` where `mask` may have been extended to match
extra dimensions in `vol`
"""
mask = mask.reshape(mask.shape + (vol.ndim - mask.ndim) * (1,))
return vol * mask
def bounding_box(vol):
"""Compute the bounding box of nonzero intensity voxels in the volume.
Parameters
----------
vol : ndarray
Volume to compute bounding box on.
Returns
-------
npmins : list
Array containg minimum index of each dimension
npmaxs : list
Array containg maximum index of each dimension
"""
# Find bounds on first dimension
temp = vol
for i in range(vol.ndim - 1):
temp = temp.any(-1)
mins = [temp.argmax()]
maxs = [len(temp) - temp[::-1].argmax()]
# Check that vol is not all 0
if mins[0] == 0 and temp[0] == 0:
warn('No data found in volume to bound. Returning empty bounding box.')
return [0] * vol.ndim, [0] * vol.ndim
# Find bounds on remaining dimensions
if vol.ndim > 1:
a, b = bounding_box(vol.any(0))
mins.extend(a)
maxs.extend(b)
return mins, maxs
def crop(vol, mins, maxs):
"""Crops the input volume.
Parameters
----------
vol : ndarray
Volume to crop.
mins : array
Array containg minimum index of each dimension.
maxs : array
Array containg maximum index of each dimension.
Returns
-------
vol : ndarray
The cropped volume.
"""
return vol[tuple(slice(i, j) for i, j in zip(mins, maxs))]
def median_otsu(input_volume, median_radius=4, numpass=4,
autocrop=False, vol_idx=None, dilate=None):
"""Simple brain extraction tool method for images from DWI data.
It uses a median filter smoothing of the input_volumes `vol_idx` and an
automatic histogram Otsu thresholding technique, hence the name
*median_otsu*.
This function is inspired from Mrtrix's bet which has default values
``median_radius=3``, ``numpass=2``. However, from tests on multiple 1.5T
and 3T data from GE, Philips, Siemens, the most robust choice is
``median_radius=4``, ``numpass=4``.
Parameters
----------
input_volume : ndarray
ndarray of the brain volume
median_radius : int
Radius (in voxels) of the applied median filter (default: 4).
numpass: int
Number of pass of the median filter (default: 4).
autocrop: bool, optional
if True, the masked input_volume will also be cropped using the
bounding box defined by the masked data. Should be on if DWI is
upsampled to 1x1x1 resolution. (default: False).
vol_idx : None or array, optional
1D array representing indices of ``axis=3`` of a 4D `input_volume` None
(the default) corresponds to ``(0,)`` (assumes first volume in
4D array).
dilate : None or int, optional
number of iterations for binary dilation
Returns
-------
maskedvolume : ndarray
Masked input_volume
mask : 3D ndarray
The binary brain mask
Notes
-----
Copyright (C) 2011, the scikit-image team
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of skimage nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
if len(input_volume.shape) == 4:
if vol_idx is not None:
b0vol = np.mean(input_volume[..., tuple(vol_idx)], axis=3)
else:
b0vol = input_volume[..., 0].copy()
else:
b0vol = input_volume.copy()
# Make a mask using a multiple pass median filter and histogram
# thresholding.
mask = multi_median(b0vol, median_radius, numpass)
thresh = otsu(mask)
mask = mask > thresh
if dilate is not None:
cross = generate_binary_structure(3, 1)
mask = binary_dilation(mask, cross, iterations=dilate)
# Auto crop the volumes using the mask as input_volume for bounding box
# computing.
if autocrop:
mins, maxs = bounding_box(mask)
mask = crop(mask, mins, maxs)
croppedvolume = crop(input_volume, mins, maxs)
maskedvolume = applymask(croppedvolume, mask)
else:
maskedvolume = applymask(input_volume, mask)
return maskedvolume, mask
def segment_from_cfa(tensor_fit, roi, threshold, return_cfa=False):
"""
Segment the cfa inside roi using the values from threshold as bounds.
Parameters
-------------
tensor_fit : TensorFit object
TensorFit object
roi : ndarray
A binary mask, which contains the bounding box for the segmentation.
threshold : array-like
An iterable that defines the min and max values to use for the thresholding.
The values are specified as (R_min, R_max, G_min, G_max, B_min, B_max)
return_cfa : bool, optional
If True, the cfa is also returned.
Returns
----------
mask : ndarray
Binary mask of the segmentation.
cfa : ndarray, optional
Array with shape = (..., 3), where ... is the shape of tensor_fit.
The color fractional anisotropy, ordered as a nd array with the last
dimension of size 3 for the R, G and B channels.
"""
FA = fractional_anisotropy(tensor_fit.evals)
FA[np.isnan(FA)] = 0
FA = np.clip(FA, 0, 1) # Clamp the FA to remove degenerate tensors
cfa = color_fa(FA, tensor_fit.evecs)
roi = np.asarray(roi, dtype=bool)
include = (cfa >= threshold[0::2]) & (cfa <= threshold[1::2]) & roi[..., None]
mask = np.all(include, axis=-1)
if return_cfa:
return mask, cfa
return mask
def clean_cc_mask(mask):
"""
Cleans a segmentation of the corpus callosum so no random pixels are included.
Parameters
----------
mask : ndarray
Binary mask of the coarse segmentation.
Returns
-------
new_cc_mask : ndarray
Binary mask of the cleaned segmentation.
"""
from scipy.ndimage.measurements import label
new_cc_mask = np.zeros(mask.shape)
# Flood fill algorithm to find contiguous regions.
labels, numL = label(mask)
volumes = [len(labels[np.where(labels == l_idx+1)]) for l_idx in np.arange(numL)]
biggest_vol = np.arange(numL)[np.where(volumes == np.max(volumes))] + 1
new_cc_mask[np.where(labels == biggest_vol)] = 1
return new_cc_mask
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from StringIO import StringIO
import sys
from appengine_wrappers import webapp
from appengine_wrappers import memcache
from appengine_wrappers import urlfetch
from api_data_source import APIDataSource
from api_list_data_source import APIListDataSource
from appengine_blobstore import AppEngineBlobstore
from in_memory_object_store import InMemoryObjectStore
from appengine_url_fetcher import AppEngineUrlFetcher
from branch_utility import BranchUtility
from example_zipper import ExampleZipper
from compiled_file_system import CompiledFileSystem
import compiled_file_system as compiled_fs
from github_file_system import GithubFileSystem
from intro_data_source import IntroDataSource
from known_issues_data_source import KnownIssuesDataSource
from local_file_system import LocalFileSystem
from memcache_file_system import MemcacheFileSystem
from reference_resolver import ReferenceResolver
from samples_data_source import SamplesDataSource
from server_instance import ServerInstance
from sidenav_data_source import SidenavDataSource
from subversion_file_system import SubversionFileSystem
from template_data_source import TemplateDataSource
from third_party.json_schema_compiler.model import UnixName
import url_constants
# Increment this version to force the server to reload all pages in the first
# cron job that is run.
_VERSION = 1
# The default channel to serve docs for if no channel is specified.
_DEFAULT_CHANNEL = 'stable'
BRANCH_UTILITY_MEMCACHE = InMemoryObjectStore('branch_utility')
BRANCH_UTILITY = BranchUtility(url_constants.OMAHA_PROXY_URL,
AppEngineUrlFetcher(None),
BRANCH_UTILITY_MEMCACHE)
GITHUB_MEMCACHE = InMemoryObjectStore('github')
GITHUB_FILE_SYSTEM = GithubFileSystem(
AppEngineUrlFetcher(url_constants.GITHUB_URL),
GITHUB_MEMCACHE,
AppEngineBlobstore())
GITHUB_COMPILED_FILE_SYSTEM = CompiledFileSystem.Factory(GITHUB_FILE_SYSTEM,
GITHUB_MEMCACHE)
EXTENSIONS_PATH = 'chrome/common/extensions'
DOCS_PATH = 'docs'
API_PATH = 'api'
TEMPLATE_PATH = DOCS_PATH + '/templates'
INTRO_PATH = TEMPLATE_PATH + '/intros'
ARTICLE_PATH = TEMPLATE_PATH + '/articles'
PUBLIC_TEMPLATE_PATH = TEMPLATE_PATH + '/public'
PRIVATE_TEMPLATE_PATH = TEMPLATE_PATH + '/private'
EXAMPLES_PATH = DOCS_PATH + '/examples'
JSON_PATH = TEMPLATE_PATH + '/json'
# Global cache of instances because Handler is recreated for every request.
SERVER_INSTANCES = {}
def _GetURLFromBranch(branch):
if branch == 'trunk':
return url_constants.SVN_TRUNK_URL + '/src'
return url_constants.SVN_BRANCH_URL + '/' + branch + '/src'
def _SplitFilenameUnix(base_dir, files):
return [UnixName(os.path.splitext(f.split('/')[-1])[0]) for f in files]
def _CreateMemcacheFileSystem(branch, branch_memcache):
svn_url = _GetURLFromBranch(branch) + '/' + EXTENSIONS_PATH
stat_fetcher = AppEngineUrlFetcher(
svn_url.replace(url_constants.SVN_URL, url_constants.VIEWVC_URL))
fetcher = AppEngineUrlFetcher(svn_url)
return MemcacheFileSystem(SubversionFileSystem(fetcher, stat_fetcher),
branch_memcache)
_default_branch = BRANCH_UTILITY.GetBranchNumberForChannelName(_DEFAULT_CHANNEL)
APPS_MEMCACHE = InMemoryObjectStore(_default_branch)
APPS_FILE_SYSTEM = _CreateMemcacheFileSystem(_default_branch, APPS_MEMCACHE)
APPS_COMPILED_FILE_SYSTEM = CompiledFileSystem.Factory(
APPS_FILE_SYSTEM,
APPS_MEMCACHE).Create(_SplitFilenameUnix, compiled_fs.APPS_FS)
EXTENSIONS_MEMCACHE = InMemoryObjectStore(_default_branch)
EXTENSIONS_FILE_SYSTEM = _CreateMemcacheFileSystem(_default_branch,
EXTENSIONS_MEMCACHE)
EXTENSIONS_COMPILED_FILE_SYSTEM = CompiledFileSystem.Factory(
EXTENSIONS_FILE_SYSTEM,
EXTENSIONS_MEMCACHE).Create(_SplitFilenameUnix, compiled_fs.EXTENSIONS_FS)
KNOWN_ISSUES_DATA_SOURCE = KnownIssuesDataSource(
InMemoryObjectStore('KnownIssues'),
AppEngineUrlFetcher(None))
def _MakeInstanceKey(branch, number):
return '%s/%s' % (branch, number)
def _GetInstanceForBranch(channel_name, local_path):
branch = BRANCH_UTILITY.GetBranchNumberForChannelName(channel_name)
# The key for the server is a tuple of |channel_name| with |branch|, since
# sometimes stable and beta point to the same branch.
instance_key = _MakeInstanceKey(channel_name, branch)
instance = SERVER_INSTANCES.get(instance_key, None)
if instance is not None:
return instance
branch_memcache = InMemoryObjectStore(branch)
file_system = _CreateMemcacheFileSystem(branch, branch_memcache)
cache_factory = CompiledFileSystem.Factory(file_system, branch_memcache)
api_list_data_source_factory = APIListDataSource.Factory(cache_factory,
file_system,
API_PATH,
PUBLIC_TEMPLATE_PATH)
api_data_source_factory = APIDataSource.Factory(
cache_factory,
API_PATH)
# Give the ReferenceResolver a memcache, to speed up the lookup of
# duplicate $refs.
ref_resolver_factory = ReferenceResolver.Factory(
api_data_source_factory,
api_list_data_source_factory,
branch_memcache)
api_data_source_factory.SetReferenceResolverFactory(ref_resolver_factory)
samples_data_source_factory = SamplesDataSource.Factory(
channel_name,
file_system,
GITHUB_FILE_SYSTEM,
cache_factory,
GITHUB_COMPILED_FILE_SYSTEM,
ref_resolver_factory,
EXAMPLES_PATH)
api_data_source_factory.SetSamplesDataSourceFactory(
samples_data_source_factory)
intro_data_source_factory = IntroDataSource.Factory(
cache_factory,
ref_resolver_factory,
[INTRO_PATH, ARTICLE_PATH])
sidenav_data_source_factory = SidenavDataSource.Factory(cache_factory,
JSON_PATH)
template_data_source_factory = TemplateDataSource.Factory(
channel_name,
api_data_source_factory,
api_list_data_source_factory,
intro_data_source_factory,
samples_data_source_factory,
KNOWN_ISSUES_DATA_SOURCE,
sidenav_data_source_factory,
cache_factory,
ref_resolver_factory,
PUBLIC_TEMPLATE_PATH,
PRIVATE_TEMPLATE_PATH)
example_zipper = ExampleZipper(file_system,
cache_factory,
DOCS_PATH)
instance = ServerInstance(template_data_source_factory,
example_zipper,
cache_factory)
SERVER_INSTANCES[instance_key] = instance
return instance
def _CleanBranches():
keys = [_MakeInstanceKey(branch, number)
for branch, number in BRANCH_UTILITY.GetAllBranchNumbers()]
for key in SERVER_INSTANCES.keys():
if key not in keys:
SERVER_INSTANCES.pop(key)
class _MockResponse(object):
def __init__(self):
self.status = 200
self.out = StringIO()
self.headers = {}
def set_status(self, status):
self.status = status
class _MockRequest(object):
def __init__(self, path):
self.headers = {}
self.path = path
self.url = 'http://localhost' + path
class Handler(webapp.RequestHandler):
def __init__(self, request, response, local_path=EXTENSIONS_PATH):
self._local_path = local_path
super(Handler, self).__init__(request, response)
def _HandleGet(self, path):
channel_name, real_path = BRANCH_UTILITY.SplitChannelNameFromPath(path)
# TODO: Detect that these are directories and serve index.html out of them.
if real_path.strip('/') == 'apps':
real_path = 'apps/index.html'
if real_path.strip('/') == 'extensions':
real_path = 'extensions/index.html'
if (not real_path.startswith('extensions/') and
not real_path.startswith('apps/') and
not real_path.startswith('static/')):
if self._RedirectBadPaths(real_path, channel_name):
return
_CleanBranches()
# Yes, do this after it's passed to RedirectBadPaths. That needs to know
# whether or not a branch was specified.
if channel_name is None:
channel_name = _DEFAULT_CHANNEL
_GetInstanceForBranch(channel_name, self._local_path).Get(real_path,
self.request,
self.response)
def _Render(self, files, channel):
original_response = self.response
for f in files:
if f.endswith('404.html'):
continue
path = channel + f.split(PUBLIC_TEMPLATE_PATH)[-1]
self.request = _MockRequest(path)
self.response = _MockResponse()
try:
self._HandleGet(path)
except Exception as e:
logging.error('Error rendering %s: %s' % (path, str(e)))
self.response = original_response
class _ValueHolder(object):
"""Class to allow a value to be changed within a lambda.
"""
def __init__(self, starting_value):
self._value = starting_value
def Set(self, value):
self._value = value
def Get(self):
return self._value
def _HandleCron(self, path):
# Cache population strategy:
#
# We could list all files in PUBLIC_TEMPLATE_PATH then render them. However,
# this would be inefficient in the common case where files haven't changed
# since the last cron.
#
# Instead, let the CompiledFileSystem give us clues when to re-render: we
# use the CFS to check whether the templates, examples, or API folders have
# been changed. If there has been a change, the compilation function will
# be called. The same is then done separately with the apps samples page,
# since it pulls its data from Github.
channel = path.split('/')[-1]
branch = BRANCH_UTILITY.GetBranchNumberForChannelName(channel)
logging.info('Running cron job for %s.' % branch)
branch_memcache = InMemoryObjectStore(branch)
file_system = _CreateMemcacheFileSystem(branch, branch_memcache)
factory = CompiledFileSystem.Factory(file_system, branch_memcache)
needs_render = self._ValueHolder(False)
invalidation_cache = factory.Create(lambda _, __: needs_render.Set(True),
compiled_fs.CRON_INVALIDATION,
version=_VERSION)
for path in [TEMPLATE_PATH, EXAMPLES_PATH, API_PATH]:
invalidation_cache.GetFromFile(path + '/')
if needs_render.Get():
file_listing_cache = factory.Create(lambda _, x: x,
compiled_fs.CRON_FILE_LISTING)
self._Render(file_listing_cache.GetFromFileListing(PUBLIC_TEMPLATE_PATH),
channel)
else:
# If |needs_render| was True, this page was already rendered, and we don't
# need to render again.
github_invalidation_cache = GITHUB_COMPILED_FILE_SYSTEM.Create(
lambda _, __: needs_render.Set(True),
compiled_fs.CRON_GITHUB_INVALIDATION)
if needs_render.Get():
self._Render([PUBLIC_TEMPLATE_PATH + '/apps/samples.html'], channel)
# It's good to keep the extensions samples page fresh, because if it
# gets dropped from the cache ALL the extensions pages time out.
self._Render([PUBLIC_TEMPLATE_PATH + '/extensions/samples.html'], channel)
self.response.out.write('Success')
def _RedirectSpecialCases(self, path):
google_dev_url = 'http://developer.google.com/chrome'
if path == '/' or path == '/index.html':
self.redirect(google_dev_url)
return True
if path == '/apps.html':
self.redirect('/apps/about_apps.html')
return True
return False
def _RedirectBadPaths(self, path, channel_name):
if '/' in path or path == '404.html':
return False
apps_templates = APPS_COMPILED_FILE_SYSTEM.GetFromFileListing(
PUBLIC_TEMPLATE_PATH + '/apps')
extensions_templates = EXTENSIONS_COMPILED_FILE_SYSTEM.GetFromFileListing(
PUBLIC_TEMPLATE_PATH + '/extensions')
unix_path = UnixName(os.path.splitext(path)[0])
if channel_name is None:
apps_path = '/apps/%s' % path
extensions_path = '/extensions/%s' % path
else:
apps_path = '/%s/apps/%s' % (channel_name, path)
extensions_path = '/%s/extensions/%s' % (channel_name, path)
if unix_path in extensions_templates:
self.redirect(extensions_path)
elif unix_path in apps_templates:
self.redirect(apps_path)
else:
self.redirect(extensions_path)
return True
def _RedirectFromCodeDotGoogleDotCom(self, path):
if (not self.request.url.startswith(('http://code.google.com',
'https://code.google.com'))):
return False
newUrl = 'http://developer.chrome.com/'
# switch to https if necessary
if (self.request.url.startswith('https')):
newUrl = newUrl.replace('http', 'https', 1)
path = path.split('/')
if len(path) > 0 and path[0] == 'chrome':
path.pop(0)
for channel in BRANCH_UTILITY.GetAllBranchNames():
if channel in path:
position = path.index(channel)
path.pop(position)
path.insert(0, channel)
newUrl += '/'.join(path)
self.redirect(newUrl)
return True
def get(self):
path = self.request.path
if self._RedirectSpecialCases(path):
return
if path.startswith('/cron'):
self._HandleCron(path)
return
# Redirect paths like "directory" to "directory/". This is so relative
# file paths will know to treat this as a directory.
if os.path.splitext(path)[1] == '' and path[-1] != '/':
self.redirect(path + '/')
return
path = path.strip('/')
if not self._RedirectFromCodeDotGoogleDotCom(path):
self._HandleGet(path)
| |
import hashlib
import os
import re
import subprocess
import time
import uuid
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.staticfiles.finders import find as find_static_path
from django.utils.encoding import force_bytes
import six
from olympia.lib.jingo_minify_helpers import ensure_path_exists
def run_command(command):
"""Run a command and correctly poll the output and write that to stdout"""
process = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, universal_newlines=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
return process.poll()
class Command(BaseCommand):
help = ('Compresses css and js assets defined in settings.MINIFY_BUNDLES')
# This command must not do any system checks because Django runs db-field
# related checks since 1.10 which require a working MySQL connection.
# We don't have that during our docker builds and since `compress_assets`
# is being used while building our docker images we have to disable them.
requires_system_checks = False
checked_hash = {}
bundle_hashes = {}
missing_files = 0
minify_skipped = 0
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'force', action='store_true',
help='Ignores modified/created dates and forces compression.')
def generate_build_id(self):
return uuid.uuid4().hex[:8]
def update_hashes(self):
# Adds a time based hash on to the build id.
self.build_id = '%s-%s' % (
self.generate_build_id(), hex(int(time.time()))[2:])
build_id_file = os.path.realpath(
os.path.join(settings.ROOT, 'build.py'))
with open(build_id_file, 'w') as f:
f.write('BUILD_ID_CSS = "%s"\n' % self.build_id)
f.write('BUILD_ID_JS = "%s"\n' % self.build_id)
f.write('BUILD_ID_IMG = "%s"\n' % self.build_id)
f.write('BUNDLE_HASHES = %s\n' % self.bundle_hashes)
def handle(self, **options):
self.force_compress = options.get('force', False)
# This will loop through every bundle, and do the following:
# - Concat all files into one
# - Cache bust all images in CSS files
# - Minify the concatted files
for ftype, bundle in six.iteritems(settings.MINIFY_BUNDLES):
for name, files in six.iteritems(bundle):
# Set the paths to the files.
concatted_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-all.%s' % (name, ftype,))
compressed_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-min.%s' % (name, ftype,))
ensure_path_exists(concatted_file)
ensure_path_exists(compressed_file)
files_all = []
for fn in files:
processed = self._preprocess_file(fn)
# If the file can't be processed, we skip it.
if processed is not None:
files_all.append(processed)
# Concat all the files.
tmp_concatted = '%s.tmp' % concatted_file
if len(files_all) == 0:
raise CommandError(
'No input files specified in '
'MINIFY_BUNDLES["%s"]["%s"] in settings.py!' %
(ftype, name)
)
run_command('cat {files} > {tmp}'.format(
files=' '.join(files_all),
tmp=tmp_concatted
))
# Cache bust individual images in the CSS.
if ftype == 'css':
bundle_hash = self._cachebust(tmp_concatted, name)
self.bundle_hashes['%s:%s' % (ftype, name)] = bundle_hash
# Compresses the concatenations.
is_changed = self._is_changed(concatted_file)
self._clean_tmp(concatted_file)
if is_changed or not os.path.isfile(compressed_file):
self._minify(ftype, concatted_file, compressed_file)
else:
print(
'File unchanged, skipping minification of %s' % (
concatted_file))
self.minify_skipped += 1
# Write out the hashes
self.update_hashes()
if self.minify_skipped:
print(
'Unchanged files skipped for minification: %s' % (
self.minify_skipped))
def _preprocess_file(self, filename):
"""Preprocess files and return new filenames."""
css_bin = filename.endswith('.less') and settings.LESS_BIN
source = find_static_path(filename)
target = source
if css_bin:
target = '%s.css' % source
run_command('{lessc} {source} {target}'.format(
lessc=css_bin,
source=str(source),
target=str(target)))
return target
def _is_changed(self, concatted_file):
"""Check if the file has been changed."""
if self.force_compress:
return True
tmp_concatted = '%s.tmp' % concatted_file
file_exists = (
os.path.exists(concatted_file) and
os.path.getsize(concatted_file) == os.path.getsize(tmp_concatted))
if file_exists:
orig_hash = self._file_hash(concatted_file)
temp_hash = self._file_hash(tmp_concatted)
return orig_hash != temp_hash
return True # Different filesize, so it was definitely changed
def _clean_tmp(self, concatted_file):
"""Replace the old file with the temp file."""
tmp_concatted = '%s.tmp' % concatted_file
if os.path.exists(concatted_file):
os.remove(concatted_file)
os.rename(tmp_concatted, concatted_file)
def _cachebust(self, css_file, bundle_name):
"""Cache bust images. Return a new bundle hash."""
self.stdout.write(
'Cache busting images in %s\n' % re.sub('.tmp$', '', css_file))
if not os.path.exists(css_file):
return
css_content = ''
with open(css_file, 'r') as css_in:
css_content = css_in.read()
def _parse(url):
return self._cachebust_regex(url, css_file)
css_parsed = re.sub(r'url\(([^)]*?)\)', _parse, css_content)
with open(css_file, 'w') as css_out:
css_out.write(css_parsed)
# Return bundle hash for cachebusting JS/CSS files.
file_hash = hashlib.md5(force_bytes(css_parsed)).hexdigest()[0:7]
self.checked_hash[css_file] = file_hash
if self.missing_files:
self.stdout.write(
' - Error finding %s images\n' % (self.missing_files,))
self.missing_files = 0
return file_hash
def _minify(self, ftype, file_in, file_out):
"""Run the proper minifier on the file."""
if ftype == 'js' and hasattr(settings, 'UGLIFY_BIN'):
opts = {'method': 'UglifyJS', 'bin': settings.UGLIFY_BIN}
run_command('{uglify} -v -o {target} {source} -m'.format(
uglify=opts['bin'],
target=file_out,
source=file_in))
elif ftype == 'css' and hasattr(settings, 'CLEANCSS_BIN'):
opts = {'method': 'clean-css', 'bin': settings.CLEANCSS_BIN}
run_command('{cleancss} -o {target} {source}'.format(
cleancss=opts['bin'],
target=file_out,
source=file_in))
self.stdout.write(
'Minifying %s (using %s)\n' % (file_in, opts['method']))
def _file_hash(self, url):
"""Open the file and get a hash of it."""
if url in self.checked_hash:
return self.checked_hash[url]
file_hash = ''
try:
with open(url, 'rb') as f:
file_hash = hashlib.md5(f.read()).hexdigest()[0:7]
except IOError:
self.missing_files += 1
self.stdout.write(' - Could not find file %s\n' % url)
self.checked_hash[url] = file_hash
return file_hash
def _cachebust_regex(self, img, parent):
"""Run over the regex; img is the structural regex object."""
url = img.group(1).strip('"\'')
if url.startswith('data:') or url.startswith('http'):
return 'url(%s)' % url
url = url.split('?')[0]
full_url = os.path.join(
settings.ROOT, os.path.dirname(parent), url)
return 'url(%s?%s)' % (url, self._file_hash(full_url))
| |
"""Virtual Dwelling Generator - Generates a virtual dwelling stock
"""
import numpy as np
from energy_demand.basic import lookup_tables
from energy_demand.technologies import diffusion_technologies
class Dwelling(object):
"""Dwelling or aggregated group of dwellings
Arguments
----------
curr_yr : int
Current year of simulation
coordinates : float
coordinates
dwtype : int
Dwelling type id. Description can be found in `daytype_lu`
house_id : int
Unique ID of dwelling or dwelling group
age : int
Age of dwelling in years (year the building was built)
pop : float
Dwelling population
floorarea : float
Floor area of dwelling
hlc : float
Heat loss coefficient
hdd : float
Heating degree days
Note
-----
- Depending on service or residential model, not all attributes
are filled (then they are inistialised as None or zero)
- For every dwelling, the scenario drivers are calculated for each enduse
"""
def __init__(
self,
curr_yr,
coordinates,
floorarea,
enduses,
driver_assumptions,
population=None,
age=None,
dwtype=None,
sector=None,
gva=None
):
"""Constructor of Dwelling Class
"""
self.curr_yr = curr_yr
self.enduses = enduses
self.longitude = coordinates['longitude']
self.latitude = coordinates['latitude']
self.dwtype = dwtype
self.age = age
self.population = population
self.floorarea = floorarea
self.sector = sector
self.gva = gva
self.hlc = get_hlc(dwtype, age) # Calculate heat loss coefficient with age and dwelling type if possible
self.calc_scenario_driver(driver_assumptions) # Generate attribute for each enduse containing calculated scenario driver value
assert floorarea != 0
def calc_scenario_driver(self, driver_assumptions):
"""Sum scenario drivers per enduse and add as attribute
Arguments
---------
driver_assumptions : dict
Scenario drivers for every enduse
"""
for enduse in self.enduses:
scenario_driver_value = 1 #used to sum (not zero!)
# If there is no scenario drivers for enduse, set to standard value 1
if enduse not in driver_assumptions:
Dwelling.__setattr__(self, enduse, scenario_driver_value)
else:
scenario_drivers = driver_assumptions[enduse]
# Iterate scenario driver and get attriute to multiply values
try:
for scenario_driver in scenario_drivers:
# If scenario driver is set to zero, do not use this driver
driver_value = getattr(self, scenario_driver)
# Ignore zero driver values
if driver_value == 0:
pass
else:
scenario_driver_value *= driver_value
except TypeError:
#logging.info("Scenario driver `%s` calculation not possible", scenario_driver)
pass
Dwelling.add_new_attribute(
self,
enduse,
scenario_driver_value)
assert scenario_driver_value != 0
def add_new_attribute(self, name, value):
"""Add a new self asttribute to DwellingStock
"""
setattr(self, name, value)
class DwellingStock(object):
"""Class of the building stock in a region
"""
def __init__(self, dwellings, enduses):
"""Returns a new building stock object for every `region`.
Arguments
----------
dwellings : list
List containing all dwelling objects
enduses : list
Enduses
"""
self.dwellings = dwellings
self.population = get_tot_pop(dwellings) # Calculate pop of dwelling stock
# Calculate enduse specific scenario driver
for enduse in enduses:
enduse_scenario_driver = self.get_scenario_driver(enduse)
DwellingStock.add_new_attribute(
self, enduse, enduse_scenario_driver)
def get_scenario_driver(self, enduse):
"""Sum all scenario driver for an enduse
Arguments
----------
enduse: string
Enduse to calculate scenario drivers
"""
sum_driver = 0
for dwelling in self.dwellings:
sum_driver += getattr(dwelling, enduse)
return sum_driver
def add_new_attribute(self, name, value):
"""Add a new self asttribute to DwellingStock
"""
setattr(self, name, value)
def get_tot_pop(dwellings):
"""Get total population of all dwellings
Return
------
tot_pop : float or bool
If population is not provided, return `None`,
otherwise summed population of all dwellings
"""
tot_pop = 0
for dwelling in dwellings:
if dwelling.population is None:
return None
else:
tot_pop += dwelling.population
return tot_pop
def get_floorare_pp(
floorarea,
reg_pop_by,
base_yr,
sim_period,
assump_diff_floorarea_pp
):
"""Calculate future floor area per person depending
on assumptions on final change and base year data
Arguments
----------
floorarea : dict
Floor area base year for all regions
reg_pop_by : dict
Population of base year for all regions
base_yr, : int
base year
sim_period: list
Simulation period
assump_diff_floorarea_pp : float
Assumption of change in floor area up to end of simulation
Returns
-------
floor_area_pp : dict
Contains all values for floor area per person for every year
Note
----
- Linear change of floor area per person is assumed over time
"""
floor_area_pp = {}
if reg_pop_by == 0:
floor_area_pp[base_yr] = 0
else:
# Floor area per person of base year
floor_area_pp[base_yr] = floorarea / reg_pop_by
for curr_yr in sim_period:
if curr_yr == base_yr:
pass
else:
# Floor area of current year = floor area of base year * change
floor_area_pp[curr_yr] = floor_area_pp[base_yr] * (1 + assump_diff_floorarea_pp[curr_yr])
return floor_area_pp
def get_dwtype_floor_area(
dwtype_floorarea_by,
dwtype_floorarea_future,
base_yr,
sim_period
):
"""Calculates the floor area per dwelling type for every year
Arguments
----------
dwtype_distr_by : dict
Distribution of dwelling types base year
dwtype_floorarea_future : dict
Distribution of future dwelling types end year
base_yr : list
Simulation parameters
sim_period : list
Simulation period
sim_period_yrs : list
Nr of simlated years
Returns
-------
dwtype_floor_area : dict
Contains the floor area change per dwelling type
Note
-----
- A linear change over time is assumed
Example
-------
out = {year: {'dwtype': 0.3}}
"""
dwtype_floor_area = {}
dwtype_floor_area[base_yr] = dwtype_floorarea_by # Base year
# Simulation years
for curr_yr in sim_period:
if curr_yr == base_yr:
pass
else:
y_distr = {}
for dwtype in dwtype_floorarea_by:
val_by = dwtype_floorarea_by[dwtype]
val_future = dwtype_floorarea_future[dwtype]
yr_until_changed = dwtype_floorarea_future['yr_until_changed']
val_cy = diffusion_technologies.linear_diff(
base_yr,
curr_yr,
val_by,
val_future,
yr_until_changed)
y_distr[dwtype] = val_cy
dwtype_floor_area[curr_yr] = y_distr
return dwtype_floor_area
def get_dwtype_distr(
dwtype_distr_by,
dwtype_distr_fy,
base_yr,
sim_period
):
"""Calculates the annual distribution of dwelling types
based on assumption of base and end year distribution
Arguments
----------
dwtype_distr_by : dict
Distribution of dwelling types base year
dwtype_distr_fy : dict
Distribution of dwelling types end year
Returns
-------
dwtype_distr : dict
Contains all dwelling type distribution for every year
Note
-----
- A linear change over time is assumed
Example
-------
out = {year: {'dwtype': 0.3}}
"""
dwtype_distr = {}
dwtype_distr[base_yr] = dwtype_distr_by # Base year
for curr_yr in sim_period:
if curr_yr == base_yr:
pass
else:
y_distr = {}
for dwtype in dwtype_distr_by:
val_by = dwtype_distr_by[dwtype]
val_future = dwtype_distr_fy[dwtype]
yr_until_changed = dwtype_distr_fy['yr_until_changed']
val_cy = diffusion_technologies.linear_diff(
base_yr,
curr_yr,
val_by,
val_future,
yr_until_changed)
y_distr[dwtype] = val_cy
dwtype_distr[curr_yr] = y_distr
# Test if distribution is 100%
for year in dwtype_distr:
np.testing.assert_almost_equal(
sum(dwtype_distr[year].values()),
1.0,
decimal=5,
err_msg='The distribution of dwelling types went wrong', verbose=True)
return dwtype_distr
def ss_dw_stock(
region,
enduses,
sectors,
scenario_data,
reg_coord,
assumptions,
curr_yr,
base_yr,
virtual_building_stock_criteria
):
"""Create dwelling stock for service sector
Arguments
----------
regions : dict
Regions
data : dict
Data container
Returns
-------
dwelling_stock : list
List with objects
Note
----
- Iterate years and change floor area depending on assumption on
linear change up to ey
"""
dw_stock = []
for sector in sectors:
pop_by = scenario_data['population'][base_yr][region]
pop_cy = scenario_data['population'][curr_yr][region]
# Floor area
if virtual_building_stock_criteria:
# If virtual building stock, change floor area proportionally to population
lin_diff_factor = pop_cy / pop_by
floorarea_sector_by = scenario_data['floor_area']['ss_floorarea'][base_yr][region][sector]
floorarea_sector_cy = floorarea_sector_by * lin_diff_factor
else:
try:
floorarea_sector_by = scenario_data['floor_area']['ss_floorarea'][base_yr][region][sector]
except IndexError:
floorarea_sector_by = scenario_data['floor_area']['ss_floorarea'][base_yr][region]
try:
floorarea_sector_cy = scenario_data['floor_area']['ss_floorarea'][curr_yr][region][sector]
except IndexError:
floorarea_sector_cy = scenario_data['floor_area']['ss_floorarea'][curr_yr][region]
# GVA data
try:
gva_sector_lu = lookup_tables.economic_sectors_regional_MISTRAL()
gva_nr = gva_sector_lu[sector]['match_int']
gva_dw_data = scenario_data['gva_industry'][curr_yr][region][gva_nr]
except KeyError:
# If not sector specific GVA, use overal GVA per head
gva_dw_data = scenario_data['gva_per_head'][curr_yr][region]
# Create dwelling objects
dw_stock.append(
Dwelling(
curr_yr=curr_yr,
coordinates=reg_coord[region],
population=pop_cy,
floorarea=floorarea_sector_cy,
enduses=enduses,
driver_assumptions=assumptions.scenario_drivers,
sector=sector,
gva=gva_dw_data))
dwelling_stock = DwellingStock(dw_stock, enduses)
return dwelling_stock
def rs_dw_stock(
region,
assumptions,
scenario_data,
sim_yrs,
dwelling_types,
enduses,
reg_coord,
driver_assumptions,
curr_yr,
base_yr,
virtual_building_stock_criteria
):
"""Creates a virtual building stock for every year and region
Arguments
----------
region : dict
Region name
curr_yr : int
Current year
Returns
-------
dwelling_stock : dict
Building stock wei
reg_dw_stock_by : Base year building stock
reg_building_stock_yr : Building stock for every simulation year
Notes
-----
- The assumption about internal temperature change is
used as for each dwelling the hdd are calculated
based on wheater data and assumption on t_base
- Doesn't take floor area as an input but calculates floor area
based on floor area pp parameter. However, floor area
could be read in by:
1.) Inserting `tot_floorarea_cy = data['rs_floorarea'][curr_yr]`
2.) Replacing 'dwtype_floor_area', 'dwtype_distr' and 'data_floorarea_pp'
with more specific information from real building stock model
"""
# Get changes in absolute floor area per dwelling type over time
dwtype_floor_area = get_dwtype_floor_area(
assumptions.dwtype_floorarea_by,
assumptions.dwtype_floorarea_fy,
base_yr,
sim_yrs)
# Get distribution of dwelling types of all simulation years
dwtype_distr = get_dwtype_distr(
assumptions.dwtype_distr_by,
assumptions.dwtype_distr_fy,
base_yr,
sim_yrs)
# Get fraction of total floorarea for every dwelling type
floorarea_p = get_floorarea_dwtype_p(
dwelling_types,
dwtype_floor_area,
dwtype_distr)
population_by = scenario_data['population'][base_yr][region]
population_cy = scenario_data['population'][curr_yr][region]
floorarea_by = scenario_data['floor_area']['rs_floorarea'][base_yr][region]
if virtual_building_stock_criteria:
# Get floor area per person for every simulation year
data_floorarea_pp = get_floorare_pp(
floorarea_by,
scenario_data['population'][base_yr][region],
base_yr,
sim_yrs,
assumptions.non_regional_vars['assump_diff_floorarea_pp'])
# Calculate new necessary floor area per person of current year
floorarea_pp_cy = data_floorarea_pp[curr_yr]
# Calculate new floor area
tot_floorarea_cy = floorarea_pp_cy * population_cy
else:
tot_floorarea_cy = scenario_data['floor_area']['rs_floorarea'][curr_yr][region]
# Get floor area per person for every simulation year
data_floorarea_pp = get_floorare_pp(
tot_floorarea_cy,
scenario_data['population'][base_yr][region],
base_yr,
sim_yrs,
assumptions.non_regional_vars['assump_diff_floorarea_pp'])
# Calculate new necessary floor area per person of current year
floorarea_pp_cy = data_floorarea_pp[curr_yr]
if population_by != 0:
floorarea_pp_by = floorarea_by / population_by # [m2 / person]
else:
floorarea_pp_by = 0
new_floorarea_cy = tot_floorarea_cy - floorarea_by
# Only calculate changing
if curr_yr == base_yr:
dw_stock_base = generate_dw_existing(
driver_assumptions=driver_assumptions,
enduses=enduses,
reg_coord=reg_coord,
region=region,
curr_yr=curr_yr,
dw_lu=dwelling_types,
floorarea_p=floorarea_p[base_yr],
floorarea_by=floorarea_by,
dwtype_age_distr_by=assumptions.dwtype_age_distr[base_yr],
floorarea_pp=floorarea_pp_by,
gva_dw_data=scenario_data['gva_per_head'][curr_yr][region])
# Create regional base year building stock
dwelling_stock = DwellingStock(
dw_stock_base,
enduses)
else:
"""The number of people in the base year dwelling stock may change.
If the floor area pp decreased with constant pop, the same number of
people will be living in too large houses. It is not assumed
that area is demolished.
"""
if virtual_building_stock_criteria:
floor_area_cy = floorarea_pp_cy * population_by
else:
floor_area_cy = scenario_data['floor_area']['rs_floorarea'][curr_yr][region]
if floor_area_cy > floorarea_by:
demolished_area = 0
else:
demolished_area = floorarea_by - floor_area_cy
remaining_area = floorarea_by - demolished_area
# Generate stock for existing area
dw_stock_cy = generate_dw_existing(
driver_assumptions=driver_assumptions,
enduses=enduses,
reg_coord=reg_coord,
region=region,
curr_yr=curr_yr,
dw_lu=dwelling_types,
floorarea_p=floorarea_p[curr_yr],
floorarea_by=remaining_area,
dwtype_age_distr_by=assumptions.dwtype_age_distr[base_yr],
floorarea_pp=floorarea_pp_cy,
gva_dw_data=scenario_data['gva_per_head'][curr_yr][region])
# Append buildings of new floor area to
if new_floorarea_cy > 0:
dw_stock_cy = generate_dw_new(
driver_assumptions=driver_assumptions,
reg_coord=reg_coord,
enduses=enduses,
dwtypes=dwelling_types,
region=region,
curr_yr=curr_yr,
floorarea_p_by=floorarea_p[curr_yr],
floorarea_pp_cy=floorarea_pp_cy,
dw_stock_new_dw=dw_stock_cy,
new_floorarea_cy=new_floorarea_cy,
gva_dw_data=scenario_data['gva_per_head'][curr_yr][region])
else:
pass # no new floor area is added
# Generate region and save it in dictionary (Add old and new buildings to stock)
dwelling_stock = DwellingStock(
dw_stock_cy,
enduses)
return dwelling_stock
def get_floorarea_dwtype_p(dw_lookup, dw_floorarea, dwtype_distr):
"""Calculates the percentage of the total floor area
belonging to each dwelling type. Depending on average
floor area per dwelling type and the dwelling type
distribution, the percentages are calculated
for ever simulation year
Arguments
----------
dw_lookup : dw_lookup
Dwelling types
dw_floorarea : dict
Floor area per type and year
dwtype_distr : dict
Distribution of dwelling type over the simulation period
Returns
-------
dw_floorarea_p : dict
Contains the percentage of the total floor
area for each dwtype for every simulation year (must be 1.0 in tot)
Notes
-----
This calculation is necessary as the share of dwelling types may differ depending the year
"""
dw_floorarea_p = {}
for curr_yr, type_distr_p in dwtype_distr.items():
area_dw_type = {}
# Calculate share of dwelling area based on absolute size and distribution
for dw_type in dw_lookup.values():
# Get absolut size of dw_type
area_dw_type[dw_type] = type_distr_p[dw_type] * dw_floorarea[curr_yr][dw_type]
# Convert absolute values into percentages
tot_area = sum(area_dw_type.values())
for dw_type, dw_type_area in area_dw_type.items():
area_dw_type[dw_type] = dw_type_area / tot_area
dw_floorarea_p[curr_yr] = area_dw_type
return dw_floorarea_p
def generate_dw_existing(
driver_assumptions,
enduses,
reg_coord,
region,
curr_yr,
dw_lu,
floorarea_p,
floorarea_by,
dwtype_age_distr_by,
floorarea_pp,
gva_dw_data
):
"""Generates dwellings according to age, floor area
and distribution assumption
Arguments
----------
assumptions : dict
Assumptions
enduses : list
Enduses
region : dict
Region name
curr_yr : int
Base year
dw_lu : dict
Dwelling type look-up
floorarea_p : dict
Fraction of floor area per dwelling type
floorarea_by : dict
Floor area of base year
dwtype_age_distr_by : dict
Age distribution of dwelling
floorarea_pp : dict
Floor area per person
tot_floorarea_cy : float
Floor are in current year
pop_by : dict
Population in base year
Return
------
dw_stock_by : list
Dwelling stocks in a list
"""
dw_stock_by, control_pop, control_floorarea = [], 0, 0
for dwtype_name in dw_lu.values():
# Calculate floor area per dwelling type
dwtype_floorarea = floorarea_p[dwtype_name] * floorarea_by
# Distribute according to age
for dwtype_age, distribution in dwtype_age_distr_by.items():
# Floor area of dwelling_class_age (distribute proportionally floor area)
dwtype_age_class_floorarea = dwtype_floorarea * distribution
# Floor area per person is divided by base area value to calc pop
if floorarea_pp != 0:
pop_dwtype_age_class = dwtype_age_class_floorarea / floorarea_pp
else:
pop_dwtype_age_class = 0
# create building object
dw_stock_by.append(
Dwelling(
curr_yr=curr_yr,
coordinates=reg_coord[region],
floorarea=dwtype_age_class_floorarea,
enduses=enduses,
driver_assumptions=driver_assumptions,
population=pop_dwtype_age_class,
age=float(dwtype_age),
dwtype=dwtype_name,
gva=gva_dw_data))
control_floorarea += dwtype_age_class_floorarea
control_pop += pop_dwtype_age_class
return dw_stock_by
def generate_dw_new(
driver_assumptions,
reg_coord,
enduses,
dwtypes,
region,
curr_yr,
floorarea_p_by,
floorarea_pp_cy,
dw_stock_new_dw,
new_floorarea_cy,
gva_dw_data
):
"""Generate dwelling objects for all new dwellings
All new dwellings are appended to the existing
building stock of the region
Arguments
----------
data : dict
Data container
region : str
Region
curr_yr : int
Current year
floorarea_p_by : dict
Fraction of floorarea in base year
floorarea_pp_cy : dict
Floor area per person in current year
dw_stock_new_dw : dict
New dwellings
new_floorarea_cy : dict
New floorarea in current year
Returns
-------
dw_stock_new_dw : list
List with appended dwellings
Notes
-----
The floor area id divided proprtionally depending on dwelling type
Then the population is distributed
builindg is creatd
"""
control_pop, control_floorarea = 0, 0
for dwtype_name in dwtypes.values():
# Calculate new floor area per dewlling type
dw_type_new_floorarea = floorarea_p_by[dwtype_name] * new_floorarea_cy
# Calculate pop (Floor area is divided by floorarea_per_person)
pop_dwtype_new_build_cy = dw_type_new_floorarea / floorarea_pp_cy
# create building object
dw_stock_new_dw.append(
Dwelling(
curr_yr=curr_yr,
coordinates=reg_coord[region],
floorarea=dw_type_new_floorarea,
enduses=enduses,
driver_assumptions=driver_assumptions,
population=pop_dwtype_new_build_cy,
age=curr_yr,
dwtype=dwtype_name,
gva=gva_dw_data))
control_floorarea += dw_type_new_floorarea
control_pop += pop_dwtype_new_build_cy
# Test if floor area and pop are the same
#assert round(new_floorarea_cy, 3) == round(control_floorarea, 3)
#assert round(new_floorarea_cy/floorarea_pp_cy, 2) == round(control_pop, 2)
return dw_stock_new_dw
def get_hlc(dw_type, age):
"""Calculates the linearly derived heat loss coeeficients
depending on age and dwelling type
Arguments
----------
dw_type : int
Dwelling type
age : int
Age of dwelling
Returns
-------
hls : Heat loss coefficient [W/m2 * K]
Notes
-----
Source: Linear trends derived from Table 3.17 ECUK Tables
https://www.gov.uk/government/collections/energy-consumption-in-the-uk
"""
if dw_type is None or age is None:
#logging.debug("The HLC could not be calculated of a dwelling age: {} dw_type: {}".format(dw_type, age))
return None
else:
# Dict with linear fits for all different dwelling types {dw_type: [slope, constant]}
linear_fits_hlc = {
'detached': [-0.0223, 48.292],
'semi_detached': [-0.0223, 48.251],
'terraced': [-0.0223, 48.063],
'flat': [-0.0223, 47.02],
'bungalow': [-0.0223, 48.261]}
# Get linearly fitted value
hlc = linear_fits_hlc[dw_type][0] * age + linear_fits_hlc[dw_type][1]
return hlc
| |
"""This module provides utility classes and functions to load spike sorting
data sets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path
import re
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
from loader import (Loader, default_group_info, reorder, renumber_clusters,
default_cluster_info)
from tools import (load_text, normalize,
load_binary, load_pickle, save_text, get_array,
first_row, load_binary_memmap)
from selection import (select, select_pairs, get_spikes_in_clusters,
get_some_spikes_in_clusters, get_some_spikes, get_indices)
from kwiklib.utils.logger import (register, unregister, FileLogger,
debug, info, warn)
from kwiklib.utils.colors import COLORS_COUNT, generate_colors
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def find_index(filename):
"""Search the file index of the filename, if any, or return None."""
r = re.search(r"([^\n]+)\.([^\.]+)\.([0-9]+)$", filename)
if r:
return int(r.group(3))
# If the filename has no index in it, and if the file does not actually
# exist, return the index of an existing filename.
# if not os.path.exists(filename):
return find_index(find_filename(filename, 'fet'))
def find_indices(filename, dir='', files=[]):
"""Return the list of all indices for the given filename, present
in the filename's directory."""
# get the extension-free filename, extension, and file index
# template: FILENAME.xxx.0 => FILENAME (can contain points), 0 (index)
# try different patterns
patterns = [r"([^\n]+)\.([^\.]+)\.([0-9]+)$",
r"([^\n]+)\.([^\.]+)$"]
for pattern in patterns:
r = re.search(pattern, filename)
if r:
filename = r.group(1)
# extension = r.group(2)
break
# get the full path
dir = dir.strip()
if not dir:
dir = os.path.dirname(os.path.realpath(filename))
filename = os.path.basename(filename)
# try obtaining the list of all files in the directory
if not files:
try:
files = os.listdir(dir)
except (OSError, IOError):
raise IOError("Error when accessing '{0:s}'.".format(dir))
# If the requested filename does not have a file index, then get the
# smallest available fileindex in the files list.
fileindex_set = set()
for file in files:
r = re.search(r"([^\n]+)\.([^\.]+)\.([0-9]+)$", file)
if r:
if r.group(1) == filename:
fileindex_set.add(int(r.group(3)))
return sorted(fileindex_set)
def find_filename(filename, extension_requested, dir='', files=[]):
"""Search the most plausible existing filename corresponding to the
requested approximate filename, which has the required file index and
extension.
Arguments:
* filename: the full filename of an existing file in a given dataset
* extension_requested: the extension of the file that is requested
"""
# get the extension-free filename, extension, and file index
# template: FILENAME.xxx.0 => FILENAME (can contain points), 0 (index)
# try different patterns
patterns = [r"([^\n]+)\.([^\.]+)\.([0-9]+)$",
r"([^\n]+)\.([^\.]+)$"]
fileindex = None
for pattern in patterns:
r = re.search(pattern, filename)
if r:
filename = r.group(1)
extension = r.group(2)
if len(r.groups()) >= 3:
fileindex = int(r.group(3))
# else:
# fileindex = None
break
# get the full path
dir = dir.strip()
if not dir:
dir = os.path.dirname(os.path.realpath(filename))
filename = os.path.basename(filename)
# try obtaining the list of all files in the directory
if not files:
try:
files = os.listdir(dir)
except (OSError, IOError):
raise IOError("Error when accessing '{0:s}'.".format(dir))
# If the requested filename does not have a file index, then get the
# smallest available fileindex in the files list.
if fileindex is None:
fileindex_set = set()
for file in files:
r = re.search(r"([^\n]+)\.([^\.]+)\.([0-9]+)$", file)
if r:
fileindex_set.add(int(r.group(3)))
if fileindex_set:
fileindex = sorted(fileindex_set)[0]
# try different suffixes
if fileindex is not None:
suffixes = [
'.{0:s}.{1:d}'.format(extension_requested, fileindex),
'.{0:s}'.format(extension_requested),
]
else:
suffixes = [
# '.{0:s}.{1:d}'.format(extension_requested, fileindex),
'.{0:s}'.format(extension_requested),
]
# find the real filename with the longest path that fits the requested
# filename
for suffix in suffixes:
filtered = []
prefix = filename
while prefix and not filtered:
filtered = filter(lambda file: (file.startswith(prefix) and
file.endswith(suffix)), files)
prefix = prefix[:-1]
# order by increasing length and return the shortest
filtered = sorted(filtered, cmp=lambda k, v: len(k) - len(v))
if filtered:
return os.path.join(dir, filtered[0])
return None
def find_any_filename(filename, extension_requested, dir='', files=[]):
# get the full path
dir = dir.strip()
if not dir:
dir = os.path.dirname(os.path.realpath(filename))
# try obtaining the list of all files in the directory
if not files:
try:
files = os.listdir(dir)
except (OSError, IOError):
raise IOError("Error when accessing '{0:s}'.".format(dir))
filtered = filter(lambda f: f.endswith('.' + extension_requested), files)
if filtered:
return os.path.join(dir, filtered[0])
def find_filename_or_new(filename, extension_requested,
have_file_index=True, dir='', files=[]):
"""Find an existing filename with a requested extension, or create
a new filename based on an existing file."""
# Find the filename with the requested extension.
filename_found = find_filename(filename, extension_requested, dir=dir, files=files)
# If it does not exist, find a file that exists, and replace the extension
# with the requested one.
if not filename_found:
if have_file_index:
file, fileindex = os.path.splitext(filename)
try:
fileindex = int(fileindex[1:])
file = '.'.join(file.split('.')[:-1])
except:
# We request a filename with a file index but none exists.
fileindex = 1
filename_new = "{0:s}.{1:s}.{2:d}".format(file,
extension_requested, int(fileindex))
else:
dots = filename.split('.')
# Trailing file index?
try:
if int(dots[-1]) >= 0:
file = '.'.join(dots[:-2])
except:
file = '.'.join(dots[:-1])
filename_new = "{0:s}.{1:s}".format(file, extension_requested)
return filename_new
else:
return filename_found
def find_filenames(filename):
"""Find the filenames of the different files for the current
dataset."""
filenames = {}
for ext in ['xml', 'fet', 'spk', 'uspk', 'res', 'dat',]:
filenames[ext] = find_filename(filename, ext) or ''
for ext in ['clu', 'aclu', 'cluinfo', 'acluinfo', 'groupinfo', 'kvwlg']:
filenames[ext] = find_filename_or_new(filename, ext)
filenames['probe'] = (find_filename(filename, 'probe') or
find_any_filename(filename, 'probe'))
filenames['mask'] = (find_filename(filename, 'fmask') or
find_filename(filename, 'mask'))
# HDF5 file format
filenames.update(find_hdf5_filenames(filename))
return filenames
def filename_to_triplet(filename):
patterns = [r"([^\n]+)\.([^\.]+)\.([0-9]+)$",
r"([^\n]+)\.([^\.]+)$"]
fileindex = None
for pattern in patterns:
r = re.search(pattern, filename)
if r:
filename = r.group(1)
extension = r.group(2)
if len(r.groups()) >= 3:
fileindex = int(r.group(3))
return (filename, extension, fileindex)
return (filename, )
def triplet_to_filename(triplet):
return '.'.join(map(str, triplet))
def find_hdf5_filenames(filename):
filenames = {}
# Find KWIK and KWA files.
for key in ['kwik', 'kwa']:
filenames['hdf5_' + key] = os.path.abspath(
find_filename_or_new(filename, key, have_file_index=False))
# Find KWD files.
for key in ['raw', 'low', 'high']:
filenames['hdf5_' + key] = os.path.abspath(
find_filename_or_new(filename, key + '.kwd', have_file_index=False))
return filenames
# -----------------------------------------------------------------------------
# File reading functions
# -----------------------------------------------------------------------------
def read_xml(filename_xml, fileindex=1):
"""Read the XML file associated to the current dataset,
and return a metadata dictionary."""
tree = ET.parse(filename_xml)
root = tree.getroot()
d = {}
ac = root.find('acquisitionSystem')
if ac is not None:
nc = ac.find('nChannels')
if nc is not None:
d['total_channels'] = int(nc.text)
sr = ac.find('samplingRate')
if sr is not None:
d['rate'] = float(sr.text)
sd = root.find('spikeDetection')
if sd is not None:
cg = sd.find('channelGroups')
if cg is not None:
# find the group corresponding to the fileindex
g = cg.findall('group')[fileindex - 1]
if g is not None:
ns = g.find('nSamples')
if ns is not None:
d['nsamples'] = int(ns.text)
nf = g.find('nFeatures')
if nf is not None:
d['fetdim'] = int(nf.text)
c = g.find('channels')
if c is not None:
d['nchannels'] = len(c.findall('channel'))
if 'nchannels' not in d:
d['nchannels'] = d['total_channels']
if 'nsamples' not in d:
ne = root.find('neuroscope')
if ne is not None:
sp = ne.find('spikes')
if sp is not None:
ns = sp.find('nSamples')
if ns is not None:
d['nsamples'] = int(ns.text)
# If no nFeatures, default to 3 (really old XML from Neuroscope).
if 'fetdim' not in d:
d['fetdim'] = 3
# klusters tests
metadata = dict(
nchannels=d['nchannels'],
nsamples=d['nsamples'],
fetdim=d['fetdim'],
freq=d['rate'])
return metadata
# Features.
def process_features(features, fetdim, nchannels, freq, nfet=None):
features = np.array(features, dtype=np.float32)
nspikes, ncol = features.shape
if nfet is not None:
nextrafet = nfet - fetdim * nchannels
else:
nextrafet = ncol - fetdim * nchannels
# get the spiketimes
spiketimes = features[:,-1].copy()
spiketimes *= (1. / freq)
# normalize normal features while keeping symmetry
features_normal = normalize(features[:,:fetdim * nchannels],
symmetric=True)
features_time = spiketimes.reshape((-1, 1)) * 1. / spiketimes[-1] * 2 - 1
# features_time = spiketimes.reshape((-1, 1)) * 1. / spiketimes[-1]# * 2 - 1
# normalize extra features without keeping symmetry
if nextrafet > 1:
features_extra = normalize(features[:,-nextrafet:-1],
symmetric=False)
features = np.hstack((features_normal, features_extra, features_time))
else:
features = np.hstack((features_normal, features_time))
return features, spiketimes
def read_features(filename_fet, nchannels, fetdim, freq, do_process=True):
"""Read a .fet file and return the normalize features array,
as well as the spiketimes."""
try:
features = load_text(filename_fet, np.int64, skiprows=1, delimiter=' ')
except ValueError:
features = load_text(filename_fet, np.float32, skiprows=1, delimiter='\t')
if do_process:
return process_features(features, fetdim, nchannels, freq,
nfet=first_row(filename_fet))
else:
return features
# Clusters.
def process_clusters(clusters):
return clusters[1:]
def read_clusters(filename_clu):
clusters = load_text(filename_clu, np.int32)
return process_clusters(clusters)
# RES file.
def process_res(spiketimes, freq=None):
if freq is None:
return spiketimes
else:
return spiketimes * 1. / freq
def read_res(filename_res, freq=None):
res = load_text(filename_res, np.int32)
return process_res(res, freq)
# Cluster info.
def process_cluster_info(cluster_info):
cluster_info = pd.DataFrame({'color': cluster_info[:, 1],
'group': cluster_info[:, 2]}, dtype=np.int32, index=cluster_info[:, 0])
return cluster_info
def read_cluster_info(filename_acluinfo):
# For each cluster (absolute indexing): cluster index, color index,
# and group index
cluster_info = load_text(filename_acluinfo, np.int32)
return process_cluster_info(cluster_info)
# Group info.
def process_group_info(group_info):
group_info = pd.DataFrame(
{'color': group_info[:, 1].astype(np.int32),
'name': group_info[:, 2]}, index=group_info[:, 0].astype(np.int32))
return group_info
def read_group_info(filename_groupinfo):
# For each group (absolute indexing): color index, and name
group_info = load_text(filename_groupinfo, str, delimiter='\t')
return process_group_info(group_info)
# Masks.
def process_masks(masks_full, fetdim):
masks = masks_full[:,:-1:fetdim]
return masks, masks_full
def read_masks(filename_mask, fetdim):
masks_full = load_text(filename_mask, np.float32, skiprows=1)
return process_masks(masks_full, fetdim)
# Waveforms.
def process_waveforms(waveforms, nsamples, nchannels):
waveforms = np.array(waveforms, dtype=np.float32)
waveforms = normalize(waveforms, symmetric=True)
waveforms = waveforms.reshape((-1, nsamples, nchannels))
return waveforms
def read_waveforms(filename_spk, nsamples, nchannels):
waveforms = np.array(load_binary(filename_spk), dtype=np.float32)
n = waveforms.size
if n % nsamples != 0 or n % nchannels != 0:
waveforms = load_text(filename_spk, np.float32)
return process_waveforms(waveforms, nsamples, nchannels)
# DAT.
def read_dat(filename_dat, nchannels, dtype=np.int16):
nsamples = (os.path.getsize(filename_dat) //
(nchannels * np.dtype(dtype).itemsize))
return load_binary_memmap(filename_dat, dtype=dtype,
shape=(nsamples, nchannels))
# Probe.
def process_probe(probe):
return normalize(probe)
def read_probe(filename_probe, fileindex):
"""fileindex is the shank index."""
if not filename_probe:
return
if os.path.exists(filename_probe):
# Try the text-flavored probe file.
try:
probe = load_text(filename_probe, np.float32)
except:
# Or try the Python-flavored probe file (SpikeDetekt, with an
# extra field 'geometry').
try:
ns = {}
execfile(filename_probe, ns)
probe = ns['geometry'][fileindex]
probe = np.array([probe[i] for i in sorted(probe.keys())],
dtype=np.float32)
except:
return None
return process_probe(probe)
# -----------------------------------------------------------------------------
# File saving functions
# -----------------------------------------------------------------------------
def save_cluster_info(filename_cluinfo, cluster_info):
cluster_info_array = np.hstack((cluster_info.index.reshape((-1, 1)),
cluster_info.values))
save_text(filename_cluinfo, cluster_info_array)
def save_group_info(filename_groupinfo, group_info):
group_info_array = np.hstack((group_info.index.reshape((-1, 1)),
group_info.values))
save_text(filename_groupinfo, group_info_array, fmt='%s', delimiter='\t')
def save_clusters(filename_clu, clusters):
save_text(filename_clu, clusters, header=len(np.unique(clusters)))
def convert_to_clu(clusters, cluster_groups):
# cluster_groups = cluster_info['group']
clusters_new = np.array(clusters, dtype=np.int32)
for i in (0, 1):
clusters_new[cluster_groups.ix[clusters] == i] = i
# clusters_unique = np.unique(set(clusters_new).union(set([0, 1])))
# clusters_renumbered = reorder(clusters_new, clusters_unique)
# return clusters_renumbered
return clusters_new
# -----------------------------------------------------------------------------
# Klusters Loader
# -----------------------------------------------------------------------------
class KlustersLoader(Loader):
def open(self, filename):
"""Open a file."""
self.filename = filename
# Find the file index associated to the filename, or 1 by default.
self.fileindex = find_index(filename) or 1
self.find_filenames()
self.save_original_clufile()
self.read()
def find_filenames(self):
# """Find the filenames of the different files for the current
# dataset."""
for ext, filename in find_filenames(self.filename).iteritems():
setattr(self, 'filename_' + ext, filename)
def save_original_clufile(self):
filename_clu_original = find_filename(self.filename, 'clu_original')
if filename_clu_original is None:
if os.path.exists(self.filename_clu):
# Save the original clu file if it does not exist yet.
with open(self.filename_clu, 'r') as f:
clu = f.read()
with open(self.filename_clu.replace('.clu.',
'.clu_original.'), 'w') as f:
f.write(clu)
if os.path.exists(self.filename_aclu):
# Save the original clu file if it does not exist yet.
with open(self.filename_aclu, 'r') as f:
clu = f.read()
with open(self.filename_aclu.replace('.aclu.',
'.aclu_original.'), 'w') as f:
f.write(clu)
# Internal read methods.
# ----------------------
def read_metadata(self):
try:
self.metadata = read_xml(self.filename_xml, self.fileindex)
except:
# Die if no XML file is available for this dataset, as it contains
# critical metadata.
raise IOError("The XML file is missing.")
self.nsamples = self.metadata.get('nsamples')
self.nchannels = self.metadata.get('nchannels')
self.fetdim = self.metadata.get('fetdim')
self.freq = self.metadata.get('freq')
def read_probe(self):
if self.filename_probe is None:
info("No probe file has been found.")
self.probe = None
else:
try:
self.probe = read_probe(self.filename_probe, self.fileindex)
info("Successfully loaded {0:s}".format(self.filename_probe))
except Exception as e:
info(("There was an error while loading the probe file "
"'{0:s}' : {1:s}").format(self.filename_probe,
e.message))
self.probe = None
def read_features(self):
try:
self.features, self.spiketimes = read_features(self.filename_fet,
self.nchannels, self.fetdim, self.freq)
info("Successfully loaded {0:s}".format(self.filename_fet))
except IOError:
raise IOError("The FET file is missing.")
# Convert to Pandas.
self.features = pd.DataFrame(self.features, dtype=np.float32)
self.duration = self.spiketimes[-1]
self.spiketimes = pd.Series(self.spiketimes, dtype=np.float32)
# Count the number of spikes and save it in the metadata.
self.nspikes = self.features.shape[0]
self.metadata['nspikes'] = self.nspikes
self.nextrafet = self.features.shape[1] - self.nchannels * self.fetdim
def read_res(self):
try:
self.spiketimes_res = read_res(self.filename_res, self.freq)
self.spiketimes_res = pd.Series(self.spiketimes_res, dtype=np.float32)
except IOError:
warn("The RES file is missing.")
def read_clusters(self):
try:
# Try reading the ACLU file, or fallback on the CLU file.
if os.path.exists(self.filename_aclu):
self.clusters = read_clusters(self.filename_aclu)
info("Successfully loaded {0:s}".format(self.filename_aclu))
else:
self.clusters = read_clusters(self.filename_clu)
info("Successfully loaded {0:s}".format(self.filename_clu))
except IOError:
warn("The CLU file is missing.")
# Default clusters if the CLU file is not available.
self.clusters = np.zeros(self.nspikes, dtype=np.int32)
# Convert to Pandas.
self.clusters = pd.Series(self.clusters, dtype=np.int32)
# Count clusters.
self._update_data()
def read_cluster_info(self):
try:
self.cluster_info = read_cluster_info(self.filename_acluinfo)
info("Successfully loaded {0:s}".format(self.filename_acluinfo))
except IOError:
info("The CLUINFO file is missing, generating a default one.")
self.cluster_info = default_cluster_info(self.clusters_unique)
if not np.array_equal(self.cluster_info.index, self.clusters_unique):
info("The CLUINFO file does not correspond to the loaded CLU file.")
self.cluster_info = default_cluster_info(self.clusters_unique)
self.cluster_colors = self.cluster_info['color'].astype(np.int32)
self.cluster_groups = self.cluster_info['group'].astype(np.int32)
def read_group_info(self):
try:
self.group_info = read_group_info(self.filename_groupinfo)
info("Successfully loaded {0:s}".format(self.filename_groupinfo))
except IOError:
info("The GROUPINFO file is missing, generating a default one.")
self.group_info = default_group_info()
# Convert to Pandas.
self.group_colors = self.group_info['color'].astype(np.int32)
self.group_names = self.group_info['name']
def read_masks(self):
try:
self.masks, self.masks_full = read_masks(self.filename_mask,
self.fetdim)
info("Successfully loaded {0:s}".format(self.filename_mask))
except IOError:
warn("The MASKS/FMASKS file is missing.")
# Default masks if the MASK/FMASK file is not available.
self.masks = np.ones((self.nspikes, self.nchannels))
self.masks_full = np.ones(self.features.shape)
self.masks = pd.DataFrame(self.masks)
self.masks_full = pd.DataFrame(self.masks_full)
def read_waveforms(self):
try:
self.waveforms = read_waveforms(self.filename_spk, self.nsamples,
self.nchannels)
info("Successfully loaded {0:s}".format(self.filename_spk))
except IOError:
warn("The SPK file is missing.")
self.waveforms = np.zeros((self.nspikes, self.nsamples,
self.nchannels))
# Convert to Pandas.
self.waveforms = pd.Panel(self.waveforms, dtype=np.float32)
def read_dat(self):
try:
self.dat = read_dat(self.filename_dat, self.nchannels)
except IOError:
warn("The DAT file is missing.")
def read_fil(self):
try:
self.fil = read_dat(self.filename_fil, self.nchannels)
except IOError:
warn("The FIL file is missing.")
# def read_stats(self):
# self.ncorrbins = 100 #SETTINGS.get('correlograms.ncorrbins', 100)
# self.corrbin = .001 #SETTINGS.get('correlograms.corrbin', .001)
# Log file.
# ---------
def initialize_logfile(self):
# filename = self.filename_fet.replace('.fet.', '.kvwlg.')
self.logfile = FileLogger(self.filename_kvwlg, name='datafile',
level=self.userpref['loglevel_file'])
# Register log file.
register(self.logfile)
# Public methods.
# ---------------
def read(self):
self.initialize_logfile()
# Load the similarity measure chosen by the user in the preferences
# file: 'gaussian' or 'kl'.
# Refresh the preferences file when a new file is opened.
# USERPREF.refresh()
self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
debug("Similarity measure: {0:s}.".format(self.similarity_measure))
info("Opening {0:s}.".format(self.filename))
self.report_progress(0, 5)
self.read_metadata()
self.read_probe()
self.report_progress(1, 5)
self.read_features()
self.report_progress(2, 5)
self.read_res()
self.read_clusters()
self.report_progress(3, 5)
self.read_cluster_info()
self.read_group_info()
self.read_masks()
self.report_progress(4, 5)
self.read_waveforms()
self.report_progress(5, 5)
# self.read_stats()
def save(self, renumber=False):
self.update_cluster_info()
self.update_group_info()
if renumber:
self.renumber()
clusters = get_array(self.clusters_renumbered)
cluster_info = self.cluster_info_renumbered
else:
clusters = get_array(self.clusters)
cluster_info = self.cluster_info
# Save both ACLU and CLU files.
save_clusters(self.filename_aclu, clusters)
save_clusters(self.filename_clu,
convert_to_clu(clusters, cluster_info['group']))
# Save CLUINFO and GROUPINFO files.
save_cluster_info(self.filename_acluinfo, cluster_info)
save_group_info(self.filename_groupinfo, self.group_info)
def close(self):
if hasattr(self, 'logfile'):
unregister(self.logfile)
def __del__(self):
self.close()
# -----------------------------------------------------------------------------
# Memory Loader
# -----------------------------------------------------------------------------
class MemoryLoader(Loader):
def __init__(self, parent=None, **kwargs):
super(MemoryLoader, self).__init__(parent)
self.read(**kwargs)
# Internal read methods.
# ----------------------
def read_metadata(self, nsamples=None, nchannels=None, fetdim=None,
freq=None):
self.nsamples = nsamples
self.nchannels = nchannels
self.fetdim = fetdim
self.freq = freq
def read_probe(self, probe):
try:
self.probe = process_probe(probe)
except Exception as e:
info(("There was an error while loading the probe: "
"'{0:s}'").format(e.message))
self.probe = None
def read_features(self, features):
self.features, self.spiketimes = process_features(features,
self.nchannels, self.fetdim, self.freq)
# Convert to Pandas.
self.features = pd.DataFrame(self.features, dtype=np.float32)
self.duration = self.spiketimes[-1]
self.spiketimes = pd.Series(self.spiketimes, dtype=np.float32)
# Count the number of spikes and save it in the metadata.
self.nspikes = self.features.shape[0]
self.nextrafet = self.features.shape[1] - self.nchannels * self.fetdim
def read_clusters(self, clusters):
self.clusters = process_clusters(clusters)
# Convert to Pandas.
self.clusters = pd.Series(self.clusters, dtype=np.int32)
# Count clusters.
self._update_data()
def read_cluster_info(self, cluster_info):
self.cluster_info = process_cluster_info(cluster_info)
assert np.array_equal(self.cluster_info.index, self.clusters_unique), \
"The CLUINFO file does not correspond to the loaded CLU file."
self.cluster_colors = self.cluster_info['color'].astype(np.int32)
self.cluster_groups = self.cluster_info['group'].astype(np.int32)
def read_group_info(self, group_info):
self.group_info = process_group_info(group_info)
# Convert to Pandas.
self.group_colors = self.group_info['color'].astype(np.int32)
self.group_names = self.group_info['name']
def read_masks(self, masks):
self.masks, self.masks_full = process_masks(masks, self.fetdim)
self.masks = pd.DataFrame(self.masks)
self.masks_full = pd.DataFrame(self.masks_full)
def read_waveforms(self, waveforms):
self.waveforms = process_waveforms(waveforms, self.nsamples,
self.nchannels)
# Convert to Pandas.
self.waveforms = pd.Panel(self.waveforms, dtype=np.float32)
# def read_stats(self):
# self.ncorrbins = 100 #SETTINGS.get('correlograms.ncorrbins', 100)
# self.corrbin = .001 #SETTINGS.get('correlograms.corrbin', .001)
# Public methods.
# ---------------
def read(self, nsamples=None, nchannels=None, fetdim=None,
freq=None, probe=None, features=None, clusters=None,
cluster_info=None, group_info=None, channel_info=None,
channel_group_info=None, masks=None, waveforms=None):
self.read_metadata(nsamples=nsamples, nchannels=nchannels,
fetdim=fetdim, freq=freq)
self.read_probe(probe)
self.read_features(features)
self.read_clusters(clusters)
self.read_cluster_info(cluster_info)
self.read_group_info(group_info)
self.read_masks(masks)
self.read_waveforms(waveforms)
# self.read_stats()
| |
# coding: utf-8
# PYTHON IMPORTS
import operator
import json
from functools import reduce
# DJANGO IMPORTS
from django.http import HttpResponse
from django.db import models, connection
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query import QuerySet
from django.views.decorators.cache import never_cache
from django.views.generic import View
from django.utils.translation import ungettext, ugettext as _
from django.utils.encoding import smart_text
from django.core.exceptions import PermissionDenied
from django.contrib.admin.utils import prepare_lookup_value
from django.core.serializers.json import DjangoJSONEncoder
from django.apps import apps
# GRAPPELLI IMPORTS
from grappelli.settings import AUTOCOMPLETE_LIMIT, AUTOCOMPLETE_SEARCH_FIELDS
# from grappelli.views.related import RelatedLookup as BaseRelatedLookup
def get_label(f):
if getattr(f, "related_label", None):
return f.related_label()
return smart_text(f)
def get_dropdown_label(f):
if getattr(f, "custom_related_dropdown_label", None):
return f.custom_related_dropdown_label()
return smart_text(f)
def get_selected_fk_display(f):
if getattr(f, "custom_related_fk_selected_display", None):
return f.custom_related_fk_selected_display()
return get_label(f)
def get_selected_m2m_display(f):
if getattr(f, "custom_related_m2m_selected_display", None):
return f.custom_related_m2m_selected_display()
return get_label(f)
def import_from(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def ajax_response(data):
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder), content_type='application/javascript')
def get_autocomplete_search_fields(model):
"""
Returns the fields to be used for autocomplete of the given model,
first using the autocomplete_search_fields() static method when defined on
the model.
If the staticmethod is not declared, looks for the fields value in the
GRAPPELLI_AUTOCOMPLETE_SEARCH_FIELDS setting for the given app/model.
"""
if hasattr(model, 'autocomplete_search_fields'):
return model.autocomplete_search_fields()
try:
return AUTOCOMPLETE_SEARCH_FIELDS[model._meta.app_label][model._meta.model_name]
except KeyError:
return
class CustomRelatedLookup(View):
"Related Lookup"
def check_user_permission(self):
if not (self.request.user.is_active and self.request.user.is_staff):
raise PermissionDenied
def request_is_valid(self):
return 'object_id' in self.GET and 'app_label' in self.GET and 'model_name' in self.GET
def get_model(self):
try:
self.model = apps.get_model(self.GET['app_label'], self.GET['model_name'])
except LookupError:
self.model = None
return self.model
def get_filtered_queryset(self, qs):
filters = {}
query_string = self.GET.get('query_string', None)
if query_string:
for item in query_string.split(":"):
k, v = item.split("=")
if k != "_to_field":
filters[smart_text(k)] = prepare_lookup_value(smart_text(k), smart_text(v))
return qs.filter(**filters)
def get_queryset(self):
qs = self.model._default_manager.get_queryset()
qs = self.get_filtered_queryset(qs)
return qs
def get_data(self):
obj_id = self.GET['object_id']
data = []
if obj_id:
try:
obj = self.get_queryset().get(pk=obj_id)
data.append({"value": obj_id, "label": get_label(obj), "dropdown_label": get_dropdown_label(obj), "selected_fk_display": get_selected_fk_display(obj), "selected_m2m_display": get_selected_m2m_display(obj)})
except (self.model.DoesNotExist, ValueError):
data.append({"value": obj_id, "label": _("?"), "dropdown_label": _("?"), "selected_fk_display": _("?"), "selected_m2m_display": _("?")})
return data
@never_cache
def get(self, request, *args, **kwargs):
self.check_user_permission()
self.GET = self.request.GET
if self.request_is_valid():
self.get_model()
if self.model is not None:
data = self.get_data()
if data:
return ajax_response(data)
data = [{"value": None, "label": ""}]
return ajax_response(data)
class CustomM2MLookup(CustomRelatedLookup):
"M2M Lookup"
def get_data(self):
obj_ids = self.GET['object_id'].split(',')
data = []
for obj_id in (i for i in obj_ids if i):
try:
obj = self.get_queryset().get(pk=obj_id)
data.append({"value": obj_id, "label": get_label(obj), "dropdown_label": get_dropdown_label(obj), "selected_fk_display": get_selected_fk_display(obj), "selected_m2m_display": get_selected_m2m_display(obj)})
except (self.model.DoesNotExist, ValueError):
data.append({"value": obj_id, "label": _("?"), "dropdown_label": _("?"), "selected_fk_display": _("?"), "selected_m2m_display": _("?")})
return data
class CustomAutocompleteLookup(CustomRelatedLookup):
"AutocompleteLookup"
def request_is_valid(self):
return 'term' in self.GET and 'app_label' in self.GET and 'model_name' in self.GET
def get_searched_queryset(self, qs):
model = self.model
term = self.GET["term"]
try:
term = model.autocomplete_term_adjust(term)
except AttributeError:
pass
search_fields = get_autocomplete_search_fields(self.model)
if search_fields:
for word in term.split():
search = [models.Q(**{smart_text(item): smart_text(word)}) for item in search_fields]
search_qs = QuerySet(model)
search_qs.query.select_related = qs.query.select_related
search_qs = search_qs.filter(reduce(operator.or_, search))
qs &= search_qs
else:
qs = model.objects.none()
return qs
def get_final_ordering(self, model, previous_lookup_parts=None):
"""
This recursive function returns the final lookups
for the default ordering of a model.
Considering the models below, `get_final_ordering(Book)` will return
`['-type__name', 'name']` instead of the simple `['-type', 'name']`
one would get using `Book._meta.ordering`.
class BookType(Model):
name = CharField(max_length=50)
class Meta:
ordering = ['name']
class Book(Model):
name = CharField(max_length=50)
type = ForeignKey(BookType)
class Meta:
ordering = ['-type', 'name']
"""
ordering = []
for lookup in model._meta.ordering:
opts = model._meta
for part in lookup.lstrip('-').split(LOOKUP_SEP):
field = opts.get_field(part)
if field.is_relation:
opts = field.rel.to._meta
if previous_lookup_parts is not None:
lookup = previous_lookup_parts + LOOKUP_SEP + lookup
if field.is_relation:
ordering.extend(self.get_final_ordering(opts.model, lookup))
else:
ordering.append(lookup)
return ordering
def get_queryset(self):
qs = super(CustomAutocompleteLookup, self).get_queryset()
qs = self.get_filtered_queryset(qs)
qs = self.get_searched_queryset(qs)
if connection.vendor == 'postgresql':
ordering = self.get_final_ordering(self.model)
distinct_columns = [o.lstrip('-') for o in ordering]
pk_name = self.model._meta.pk.name
if pk_name not in distinct_columns:
distinct_columns.append(pk_name)
return qs.order_by(*ordering).distinct(*distinct_columns)
return qs.distinct()
def get_data(self):
return [{"value": f.pk, "label": get_label(f), "dropdown_label": get_dropdown_label(f), "selected_fk_display": get_selected_fk_display(f), "selected_m2m_display": get_selected_m2m_display(f)} for f in self.get_queryset()[:AUTOCOMPLETE_LIMIT]]
@never_cache
def get(self, request, *args, **kwargs):
self.check_user_permission()
self.GET = self.request.GET
if self.request_is_valid():
self.get_model()
data = self.get_data()
if data:
return ajax_response(data)
# overcomplicated label translation
label = ungettext('%(counter)s result', '%(counter)s results', 0) % {'counter': 0}
data = [{"value": None, "label": label}]
return ajax_response(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.