gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeseries head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.summary import summary
class _NoStatePredictOutput(export_lib.PredictOutput):
def as_signature_def(self, receiver_tensors):
no_state_receiver_tensors = {
key: value for key, value in receiver_tensors.items()
if not key.startswith(feature_keys.State.STATE_PREFIX)}
return super(_NoStatePredictOutput, self).as_signature_def(
receiver_tensors=no_state_receiver_tensors)
class TimeSeriesRegressionHead(head_lib._Head): # pylint:disable=protected-access
"""Determines input and output signatures for a time series model."""
def __init__(self,
model,
state_manager,
optimizer,
input_statistics_generator=None,
name=None):
"""Creates a `_Head` for time series regression.
Args:
model: A model for time series regression.
state_manager: A state manager.
optimizer: An optimizer.
input_statistics_generator: A input statistics generator.
name: An optional name for the model.
"""
self.model = model
self.state_manager = state_manager
self.optimizer = optimizer
self.input_statistics_generator = input_statistics_generator
self._name = name
@property
def name(self):
return self._name
# TODO(terrytangyuan): consolidate `model_outputs` and `_Head.LossSpec`
# once `_Head.create_loss` becomes extendable
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(
self.model, features, mode)
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
@property
def logits_dimension(self):
"""See `_Head`."""
return 1
def _train_ops(self, features):
"""Add training ops to the graph."""
mode = estimator_lib.ModeKeys.TRAIN
with variable_scope.variable_scope(
"model",
# Use ResourceVariables to avoid race conditions.
use_resource=True):
model_outputs = self.create_loss(features, mode)
train_op = optimizers.optimize_loss(
model_outputs.loss,
global_step=training_util.get_global_step(),
optimizer=self.optimizer,
# Learning rate is set in the Optimizer object
learning_rate=None)
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
train_op=train_op)
def _evaluate_ops(self, features):
"""Add ops for evaluation (aka filtering) to the graph."""
mode = estimator_lib.ModeKeys.EVAL
with variable_scope.variable_scope("model", use_resource=True):
model_outputs = self.create_loss(features, mode)
metrics = {}
# Just output in-sample predictions for the last chunk seen
for prediction_key, prediction_value in model_outputs.predictions.items():
metrics[prediction_key] = _identity_metric_single(prediction_key,
prediction_value)
metrics[feature_keys.FilteringResults.TIMES] = _identity_metric_single(
feature_keys.FilteringResults.TIMES, model_outputs.prediction_times)
metrics[feature_keys.FilteringResults.STATE_TUPLE] = (
_identity_metric_nested(feature_keys.FilteringResults.STATE_TUPLE,
model_outputs.end_state))
return estimator_lib.EstimatorSpec(
loss=model_outputs.loss,
mode=mode,
eval_metric_ops=metrics,
predictions={})
def _predict_ops(self, features):
"""Add ops for prediction to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction = self.model.predict(features=features)
prediction[feature_keys.PredictionResults.TIMES] = features[
feature_keys.PredictionFeatures.TIMES]
return estimator_lib.EstimatorSpec(
predictions=prediction, mode=estimator_lib.ModeKeys.PREDICT)
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
prediction_outputs = self.model.predict(features=features)
with variable_scope.variable_scope("model", reuse=True):
filtering_outputs = self.create_loss(
features, estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope("model", reuse=True):
no_state_features = {
k: v for k, v in features.items()
if not k.startswith(feature_keys.State.STATE_PREFIX)}
# Ignore any state management when cold-starting. The model's default
# start state is replicated across the batch.
cold_filtering_outputs = self.model.define_loss(
features=no_state_features, mode=estimator_lib.ModeKeys.EVAL)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
export_lib.PredictOutput(prediction_outputs),
feature_keys.SavedModelLabels.FILTER:
export_lib.PredictOutput(
state_to_dictionary(filtering_outputs.end_state)),
feature_keys.SavedModelLabels.COLD_START_FILTER:
_NoStatePredictOutput(
state_to_dictionary(cold_filtering_outputs.end_state))
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _convert_feature_to_tensor(self, name, value):
"""Casts features to the correct dtype based on their name."""
if name in [
feature_keys.TrainEvalFeatures.TIMES,
feature_keys.PredictionFeatures.TIMES
]:
return math_ops.cast(value, dtypes.int64)
if name == feature_keys.TrainEvalFeatures.VALUES:
return math_ops.cast(value, self.model.dtype)
if name == feature_keys.PredictionFeatures.STATE_TUPLE:
return value # Correct dtypes are model-dependent
return ops.convert_to_tensor(value)
def _gather_state(self, features):
"""Returns `features` with state packed, indicates if packing was done."""
prefixed_state_re = re.compile(r"^" + feature_keys.State.STATE_PREFIX +
r"_(\d+)$")
numbered_state = []
for key, tensor in features.items():
search_result = prefixed_state_re.search(key)
if search_result:
numbered_state.append((int(search_result.group(1)), key, tensor))
if not numbered_state:
return features, False
features = features.copy()
for _, key, _ in numbered_state:
del features[key]
numbered_state.sort(key=lambda number, *_: number)
features[feature_keys.State.STATE_TUPLE] = nest.pack_sequence_as(
structure=self.model.get_start_state(),
flat_sequence=[tensor for _, _, tensor in numbered_state])
return features, True
def create_estimator_spec(self, features, mode, labels=None):
"""Performs basic error checking and returns an EstimatorSpec."""
with ops.name_scope(self._name, "head"):
if labels:
raise ValueError(
"The model received a `labels` dictionary, which is "
"not supported. Pass '{}' and '{}' as "
"features.".format(feature_keys.TrainEvalFeatures.TIMES,
feature_keys.TrainEvalFeatures.VALUES))
del labels
features = {
name: self._convert_feature_to_tensor(name=name, value=value)
for name, value in features.items()
}
if self.input_statistics_generator is not None:
input_statistics = self.input_statistics_generator.initialize_graph(
features, update_statistics=(mode == estimator_lib.ModeKeys.TRAIN))
else:
input_statistics = None
self.model.initialize_graph(input_statistics=input_statistics)
# _gather_state requires the model to have its graph initialized (so it
# has access to the structure of the model's state)
features, passed_flat_state = self._gather_state(features)
if (mode == estimator_lib.ModeKeys.TRAIN or
mode == estimator_lib.ModeKeys.EVAL):
_check_train_eval_features(features, self.model)
elif mode == estimator_lib.ModeKeys.PREDICT:
_check_predict_features(features)
else:
raise ValueError("Unknown mode '{}' passed to model_fn.".format(mode))
self.state_manager.initialize_graph(
model=self.model, input_statistics=input_statistics)
if mode == estimator_lib.ModeKeys.TRAIN:
return self._train_ops(features)
elif mode == estimator_lib.ModeKeys.EVAL:
return self._evaluate_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and not passed_flat_state:
return self._predict_ops(features)
elif mode == estimator_lib.ModeKeys.PREDICT and passed_flat_state:
# The mode is PREDICT, but we're actually in export_savedmodel for
# serving. We want to return two graphs: one for filtering (state + data
# -> state) and one for predicting (state -> prediction).
return self._serving_ops(features)
class OneShotPredictionHead(TimeSeriesRegressionHead):
"""A time series head which exports a single stateless serving signature.
The serving default signature exported by this head expects `times`, `values`,
and any exogenous features, but no state. `values` has shape `[batch_size,
filter_length, num_features]` and `times` has shape `[batch_size,
total_length]`, where `total_length > filter_length`. Any exogenous features
must have their shapes prefixed by the shape of the `times` feature.
When serving, first performs filtering on the series up to `filter_length`
starting from the default start state for the model, then computes predictions
on the remainder of the series, returning them.
Model state is neither accepted nor returned, so filtering must be performed
each time predictions are requested when using this head.
"""
def _serving_ops(self, features):
"""Add ops for serving to the graph."""
with variable_scope.variable_scope("model", use_resource=True):
filtering_features = {}
prediction_features = {}
values_length = array_ops.shape(
features[feature_keys.FilteringFeatures.VALUES])[1]
for key, value in features.items():
if key == feature_keys.State.STATE_TUPLE:
# Ignore state input. The model's default start state is replicated
# across the batch.
continue
if key == feature_keys.FilteringFeatures.VALUES:
filtering_features[key] = value
else:
filtering_features[key] = value[:, :values_length]
prediction_features[key] = value[:, values_length:]
cold_filtering_outputs = self.model.define_loss(
features=filtering_features, mode=estimator_lib.ModeKeys.EVAL)
prediction_features[feature_keys.State.STATE_TUPLE] = (
cold_filtering_outputs.end_state)
with variable_scope.variable_scope("model", reuse=True):
prediction_outputs = self.model.predict(
features=prediction_features)
return estimator_lib.EstimatorSpec(
mode=estimator_lib.ModeKeys.PREDICT,
export_outputs={
feature_keys.SavedModelLabels.PREDICT:
_NoStatePredictOutput(prediction_outputs),
},
# Likely unused, but it is necessary to return `predictions` to satisfy
# the Estimator's error checking.
predictions={})
def _check_feature_shapes_compatible_with(features,
compatible_with_name,
compatible_with_value,
ignore=None):
"""Checks all features are compatible with the given time-like feature."""
if ignore is None:
ignore = set()
for name, value in features.items():
if name in ignore:
continue
feature_shape = value.get_shape()
if feature_shape.ndims is None:
continue
if feature_shape.ndims < 2:
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"(got rank {} for feature '{}')").format(feature_shape.ndims, name))
if not feature_shape[:2].is_compatible_with(
compatible_with_value.get_shape()):
raise ValueError(
("Features must have shape (batch dimension, window size, ...) "
"where batch dimension and window size match the "
"'{times_feature}' feature (got shape {feature_shape} for "
"feature '{feature_name}' but shape {times_shape} for feature "
"'{times_feature}')").format(
times_feature=compatible_with_name,
feature_shape=feature_shape,
feature_name=name,
times_shape=compatible_with_value.get_shape()))
def _check_predict_features(features):
"""Raises errors if features are not suitable for prediction."""
if feature_keys.PredictionFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.TIMES))
if feature_keys.PredictionFeatures.STATE_TUPLE not in features:
raise ValueError("Expected a '{}' feature for prediction.".format(
feature_keys.PredictionFeatures.STATE_TUPLE))
times_feature = features[feature_keys.PredictionFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.PredictionFeatures.TIMES,
times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.PredictionFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.PredictionFeatures.STATE_TUPLE # Model-dependent shapes
]))
def _check_train_eval_features(features, model):
"""Raise errors if features are not suitable for training/evaluation."""
if feature_keys.TrainEvalFeatures.TIMES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.TIMES))
if feature_keys.TrainEvalFeatures.VALUES not in features:
raise ValueError("Expected a '{}' feature for training/evaluation.".format(
feature_keys.TrainEvalFeatures.VALUES))
times_feature = features[feature_keys.TrainEvalFeatures.TIMES]
if not times_feature.get_shape().is_compatible_with([None, None]):
raise ValueError(
("Expected shape (batch dimension, window size) for feature '{}' "
"(got shape {})").format(feature_keys.TrainEvalFeatures.TIMES,
times_feature.get_shape()))
values_feature = features[feature_keys.TrainEvalFeatures.VALUES]
if not values_feature.get_shape().is_compatible_with(
[None, None, model.num_features]):
raise ValueError(
("Expected shape (batch dimension, window size, {num_features}) "
"for feature '{feature_name}', since the model was configured "
"with num_features={num_features} (got shape {got_shape})").format(
num_features=model.num_features,
feature_name=feature_keys.TrainEvalFeatures.VALUES,
got_shape=times_feature.get_shape()))
_check_feature_shapes_compatible_with(
features=features,
compatible_with_name=feature_keys.TrainEvalFeatures.TIMES,
compatible_with_value=times_feature,
ignore=set([
feature_keys.State.STATE_TUPLE # Model-dependent shapes
]))
def _identity_metric_single(name, input_tensor):
"""A metric which takes on its last updated value.
This keeps evaluation metrics in sync with one another, since update ops are
run separately from their result Tensors. Simply returning (input_tensor,
no_op) as a metric with a value but no update means that a metric will come
from a different batch of data than metrics which cache values in a Variable
(e.g. the default loss metric).
Args:
name: A name for the metric.
input_tensor: Any Tensor.
Returns:
A tuple of (value, update_op).
"""
metric_variable = variable_scope.variable(
name="{}_identity_metric".format(name),
initial_value=array_ops.zeros([], dtype=input_tensor.dtype),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False)
update_op = state_ops.assign(
metric_variable, input_tensor, validate_shape=False)
# This shape will be correct once the first update runs (but may be
# incomplete, so is not helpful for initializing the variable).
metric_variable.set_shape(input_tensor.get_shape())
return (metric_variable.value(), update_op)
def _identity_metric_nested(name, input_tensors):
"""Create identity metrics for a nested tuple of Tensors."""
update_ops = []
value_tensors = []
for tensor_number, tensor in enumerate(nest.flatten(input_tensors)):
value_tensor, update_op = _identity_metric_single(
name="{}_{}".format(name, tensor_number), input_tensor=tensor)
update_ops.append(update_op)
value_tensors.append(value_tensor)
return (nest.pack_sequence_as(input_tensors, value_tensors),
control_flow_ops.group(*update_ops))
def state_to_dictionary(state_tuple):
"""Flatten model state into a dictionary with string keys."""
flattened = {}
for state_number, state_value in enumerate(nest.flatten(state_tuple)):
prefixed_state_name = "{}_{:02d}".format(feature_keys.State.STATE_PREFIX,
state_number)
flattened[prefixed_state_name] = state_value
return flattened
| |
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCRoute(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'route'
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCRoute, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self._route = None
@property
def route(self):
''' property function for route'''
if not self._route:
self.get()
return self._route
@route.setter
def route(self, data):
''' setter function for route '''
self._route = data
def exists(self):
''' return whether a route exists '''
if self.route:
return True
return False
def get(self):
'''return route information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.route = Route(content=result['results'][0])
elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
skip = []
return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=self.verbose)
@staticmethod
def get_cert_data(path, content):
'''get the data for a particular value'''
if not path and not content:
return None
rval = None
if path and os.path.exists(path) and os.access(path, os.R_OK):
rval = open(path).read()
elif content:
rval = content
return rval
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent asnible code
params comes from the ansible portion for this module
files: a dictionary for the certificates
{'cert': {'path': '',
'content': '',
'value': ''
}
}
check_mode: does the module support check mode. (module.check_mode)
'''
files = {'destcacert': {'path': params['dest_cacert_path'],
'content': params['dest_cacert_content'],
'value': None, },
'cacert': {'path': params['cacert_path'],
'content': params['cacert_content'],
'value': None, },
'cert': {'path': params['cert_path'],
'content': params['cert_content'],
'value': None, },
'key': {'path': params['key_path'],
'content': params['key_content'],
'value': None, }, }
if params['tls_termination'] and params['tls_termination'].lower() != 'passthrough': # E501
for key, option in files.items():
if key == 'destcacert' and params['tls_termination'] != 'reencrypt':
continue
option['value'] = OCRoute.get_cert_data(option['path'], option['content']) # E501
if not option['value']:
return {'failed': True,
'msg': 'Verify that you pass a value for %s' % key}
rconfig = RouteConfig(params['name'],
params['namespace'],
params['kubeconfig'],
files['destcacert']['value'],
files['cacert']['value'],
files['cert']['value'],
files['key']['value'],
params['host'],
params['tls_termination'],
params['service_name'],
params['wildcard_policy'],
params['weight'],
params['port'])
oc_route = OCRoute(rconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_route.get()
#####
# Get
#####
if state == 'list':
return {'changed': False,
'results': api_rval['results'],
'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_route.exists():
if check_mode:
return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
api_rval = oc_route.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_route.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
# Create it here
api_rval = oc_route.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
########
# Update
########
if oc_route.needs_update():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
api_rval = oc_route.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
# return the created object
api_rval = oc_route.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
return {'changed': False, 'results': api_rval, 'state': "present"}
# catch all
return {'failed': True, 'msg': "Unknown State passed"}
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from collections import namedtuple
from cryptography.fernet import Fernet
from parameterized import parameterized
from airflow.models import Connection, crypto
from tests.test_utils.config import conf_vars
ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"])
class TestConnection(unittest.TestCase):
def setUp(self):
crypto._fernet = None
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
self.assertFalse(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
test_connection = Connection(extra='testextra')
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
test_connection = Connection(extra='testextra')
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
self.assertEqual(Fernet(key1).decrypt(test_connection._extra.encode()), b'testextra')
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
self.assertEqual(test_connection.extra, 'testextra')
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
self.assertTrue(test_connection.is_extra_encrypted)
self.assertEqual(test_connection.extra, 'testextra')
self.assertEqual(Fernet(key2).decrypt(test_connection._extra.encode()), b'testextra')
def test_connection_from_uri_without_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertIsNone(connection.extra)
def test_connection_from_uri_with_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_empty_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema?' \
'extra1=a%20value&extra2='
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': ''})
def test_connection_from_uri_with_colon_in_hostname(self):
uri = 'scheme://user:password@host%2flocation%3ax%3ay:1234/schema?' \
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
def test_connection_from_uri_with_encoded_password(self):
uri = 'scheme://user:password%20with%20space@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_user(self):
uri = 'scheme://domain%2fuser:password@host%2flocation%3ax%3ay:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location:x:y')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'domain/user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_encoded_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234/schema%2ftest'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, 'schema/test')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_no_schema(self):
uri = 'scheme://user:password%20with%20space@host:1234'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password with space')
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_underscore(self):
uri = 'google-cloud-platform://?extra__google_cloud_platform__key_' \
'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope=' \
'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra' \
'__google_cloud_platform__project=airflow'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'google_cloud_platform')
self.assertEqual(connection.host, '')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, None)
self.assertEqual(connection.password, None)
self.assertEqual(connection.extra_dejson, dict(
extra__google_cloud_platform__key_path='/keys/key.json',
extra__google_cloud_platform__project='airflow',
extra__google_cloud_platform__scope='https://www.googleapis.com/'
'auth/cloud-platform'))
def test_connection_from_uri_without_authinfo(self):
uri = 'scheme://host:1234'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, None)
self.assertEqual(connection.password, None)
self.assertEqual(connection.port, 1234)
def test_connection_from_uri_with_path(self):
uri = 'scheme://%2FTmP%2F:1234'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, '/TmP/')
self.assertEqual(connection.schema, '')
self.assertEqual(connection.login, None)
self.assertEqual(connection.password, None)
self.assertEqual(connection.port, 1234)
@parameterized.expand(
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login='', password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
]
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, uri_parts.conn_type)
self.assertEqual(connection.login, uri_parts.login)
self.assertEqual(connection.password, uri_parts.password)
self.assertEqual(connection.host, uri_parts.host)
self.assertEqual(connection.port, uri_parts.port)
self.assertEqual(connection.schema, uri_parts.schema)
| |
import logging
import sys
import os.path
import json
from pysam import AlignmentFile
from PyMaSC import entrypoint, logging_version
from PyMaSC.utils.parsearg import get_plot_parser
from PyMaSC.utils.logfmt import set_rootlogger
from PyMaSC.utils.calc import filter_chroms
from PyMaSC.pymasc import prepare_output, PLOTFILE_SUFFIX
from PyMaSC.output.stats import load_stats, output_stats, STATSFILE_SUFFIX
from PyMaSC.output.table import (load_cc, load_masc, load_nreads_table, output_cc, output_mscc,
CCOUTPUT_SUFFIX, MSCCOUTPUT_SUFFIX, NREADOUTPUT_SUFFIX)
from PyMaSC.handler.result import CCResult
from PyMaSC.output.figure import plot_figures
logger = logging.getLogger(__name__)
def _complete_path_arg(args, attr, val):
if not getattr(args, attr) and os.path.exists(val):
setattr(args, attr, val)
def _parse_args():
# parse args
parser = get_plot_parser()
args = parser.parse_args()
#
if args.statfile:
for attr, suffix in zip(("stats", "cc", "masc", "nreads"),
(STATSFILE_SUFFIX, CCOUTPUT_SUFFIX, MSCCOUTPUT_SUFFIX, NREADOUTPUT_SUFFIX)):
_complete_path_arg(args, attr, args.statfile + suffix)
#
if not args.stats:
parser.error("Statistics file path is not specified.")
if not args.nreads:
parser.error("# of reads table file path is not specified.")
elif not args.cc and not args.masc:
parser.error("Neither cross-correlation table file path nor mappability "
"sensitive cross-correlation table file path is specified.")
#
if not os.path.exists(args.stats):
parser.error("Statistics file path does not exist: '{}'".format(args.stats))
if not os.path.exists(args.nreads):
parser.error("# of reads table file path does not exist: '{}'".format(args.nreads))
elif all((args.cc and not os.path.exists(args.cc),
args.masc and not os.path.exists(args.masc))):
parser.error("Neither cross-correlation table file path '{}' nor "
"mappability sensitive cross-correlation table file path "
"'{}' exists.".format(args.cc, args.masc))
if args.cc:
if not os.path.exists(args.cc):
parser.error("Cross-correlation table file path does not exists: "
"'{}'".format(args.cc))
elif not args.sizes or not os.path.exists(args.sizes):
parser.error("Please specify a chromosome sizes file using -s/--sizes option.")
if args.masc:
if not os.path.exists(args.masc):
parser.error("Mappability sensitive cross-correlation table file path "
"does not exists: '{}'".format(args.cc))
else:
if args.mappability_stats and not args.mappability_stats.endswith(".json"):
setattr(args, "mappability_stats",
os.path.splitext(args.mappability_stats)[0] + "_mappability.json")
if not args.mappability_stats or not os.path.exists(args.mappability_stats):
parser.error("Please specify a JSON file which generated by PyMaSC "
"for a BigWig file using -m/--mappability-stats option.")
#
if "all" in args.force_overwrite:
args.force_overwrite = ("stats", "cc", "mscc")
# set up logging
set_rootlogger(args.color, args.log_level)
logging_version(logger)
#
return args
@entrypoint(logger)
def main():
args = _parse_args()
read_len = _prepare_stats(args)
(ref2cc, ref2genomelen, ref2masc, ref2mappable_len, references,
(forward_sum, reverse_sum, mappable_forward_sum, mappable_reverse_sum)) = _load_tables(args)
references = filter_chroms(references, args.chromfilter)
checked_suffixes = _prepare_outputs(args)
#
ccr = CCResult(
args.smooth_window, args.chi2_pval, args.mask_size, args.bg_avr_width,
args.library_length,
read_len=read_len,
references=references,
ref2genomelen=ref2genomelen,
ref2forward_sum=forward_sum,
ref2reverse_sum=reverse_sum,
ref2cc=ref2cc,
ref2mappable_len=ref2mappable_len,
mappable_ref2forward_sum=mappable_forward_sum,
mappable_ref2reverse_sum=mappable_reverse_sum,
ref2masc=ref2masc
)
#
for outputfunc, suffix in zip((output_stats, output_cc, output_mscc),
(STATSFILE_SUFFIX, CCOUTPUT_SUFFIX, MSCCOUTPUT_SUFFIX)):
if suffix in checked_suffixes:
outputfunc(os.path.join(args.outdir, args.name), ccr)
#
plot_figures(os.path.join(args.outdir, args.name + PLOTFILE_SUFFIX), ccr)
def _prepare_stats(args):
#
try:
statattrs = load_stats(args.stats, ("name", "library_len", "read_len"))
except (IOError, ):
logger.critical("Failed to load stats.")
sys.exit(1)
if "library_len" in statattrs and not args.library_length:
args.library_length = statattrs["library_len"]
if "read_len" not in statattrs:
logger.critical("Mandatory attribute 'Read length' not found in '{}'.".format(args.stats))
sys.exit(1)
read_len = statattrs["read_len"]
#
if "name" in statattrs:
args.name = statattrs.pop("name")
if not args.name:
logger.critical("Mandatory attribute 'Name' not found in '{}'. "
"Set name manually with -n/--name option.".format(args.stats))
sys.exit(1)
return read_len
def _load_table(path, load_fun):
try:
return load_fun(path)
except (IOError, KeyError, IndexError, StopIteration):
logger.critical("Failed to load tables.")
sys.exit(1)
def _load_tables(args):
if args.cc:
cc_table = _load_table(args.cc, load_cc)
cc_references = set(cc_table.keys())
ref2genomelen = _load_chrom_sizes(args.sizes)
for ref in cc_references:
if ref not in ref2genomelen:
logger.critical("Reference '{}' not found in '{}'.".format(ref, args.sizes))
sys.exit(1)
else:
cc_table = ref2genomelen = None
cc_references = set()
if args.masc:
masc_table = _load_table(args.masc, load_masc)
masc_references = set(masc_table.keys())
try:
ref2mappable_len = _load_mappable_lengths(args.mappability_stats)
except json.JSONDecoder as e:
logger.critical("Failed to load '{}':".format(args.mappability_stats))
logger.error(e.msg)
sys.exit(1)
for ref in masc_references:
if ref not in ref2mappable_len:
logger.critical("Reference '{}' not found in '{}'.".format(ref, args.mappability_stats))
sys.exit(1)
else:
masc_table = ref2mappable_len = None
masc_references = set()
dicts = forward_sum, reverse_sum, mappable_forward_sum, mappable_reverse_sum = _load_table(args.nreads, load_nreads_table)
if forward_sum and reverse_sum:
nreads_refs = set(forward_sum.keys())
assert nreads_refs == set(reverse_sum.keys())
else:
nreads_refs = set(mappable_forward_sum.keys())
assert nreads_refs == set(mappable_reverse_sum.keys())
nreads_refs.discard("whole")
refsets = [s for s in (cc_references, masc_references) if s]
union = nreads_refs.union(*refsets)
intersection = nreads_refs.intersection(*refsets)
if union != intersection:
logger.warning("Chromosome names in tables are unmatched.")
logger.warning("Trying use common names anyway: {}".format(intersection))
return cc_table, ref2genomelen, masc_table, ref2mappable_len, sorted(intersection), dicts
def _prepare_outputs(args):
check_suffixes = [PLOTFILE_SUFFIX]
output_base = os.path.join(args.outdir, args.name)
for source, suffix, force_overwrite in zip(
(args.stats, args.cc, args.masc),
(STATSFILE_SUFFIX, CCOUTPUT_SUFFIX, MSCCOUTPUT_SUFFIX),
tuple(n in args.force_overwrite for n in ("stats", "cc", "masc"))):
if source and _check_overwrite(source, os.path.normpath(output_base + suffix), force_overwrite):
check_suffixes.append(suffix)
prepare_output([None], [args.name], args.outdir, check_suffixes)
return check_suffixes
def _check_overwrite(source, output, force_overwrite):
if os.path.normpath(source) == output:
if force_overwrite:
logger.warning("Overwrite option was specified. '{}' will be overwritten!".format(output))
return True
else:
logger.warning("Prevent to overwrite input stats file '{}', "
"output stats file will be skipped.".format(output))
return False
return True
def _load_chrom_sizes(path):
try:
with AlignmentFile(path) as f:
return {r: l for r, l in zip(f.references, f.lengths)}
except ValueError:
ref2len = {}
with open(path) as f:
for l in f:
chrom, length, _ = l.split('\t', 2)
ref2len[chrom] = int(length)
return ref2len
def _load_mappable_lengths(path):
with open(path) as f:
return json.load(f)["references"]
| |
"""
Rest API for Home Assistant.
For more details about the RESTful API, please refer to the documentation at
https://home-assistant.io/developers/api/
"""
import asyncio
import json
import logging
from aiohttp import web
import async_timeout
import homeassistant.core as ha
import homeassistant.remote as rem
from homeassistant.bootstrap import ERROR_LOG_FILENAME
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP, EVENT_TIME_CHANGED,
HTTP_BAD_REQUEST, HTTP_CREATED, HTTP_NOT_FOUND,
MATCH_ALL, URL_API, URL_API_COMPONENTS,
URL_API_CONFIG, URL_API_DISCOVERY_INFO, URL_API_ERROR_LOG,
URL_API_EVENTS, URL_API_SERVICES,
URL_API_STATES, URL_API_STATES_ENTITY, URL_API_STREAM, URL_API_TEMPLATE,
__version__)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.state import AsyncTrackStates
from homeassistant.helpers import template
from homeassistant.components.http import HomeAssistantView
DOMAIN = 'api'
DEPENDENCIES = ['http']
STREAM_PING_PAYLOAD = "ping"
STREAM_PING_INTERVAL = 50 # seconds
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Register the API with the HTTP interface."""
hass.http.register_view(APIStatusView)
hass.http.register_view(APIEventStream)
hass.http.register_view(APIConfigView)
hass.http.register_view(APIDiscoveryView)
hass.http.register_view(APIStatesView)
hass.http.register_view(APIEntityStateView)
hass.http.register_view(APIEventListenersView)
hass.http.register_view(APIEventView)
hass.http.register_view(APIServicesView)
hass.http.register_view(APIDomainServicesView)
hass.http.register_view(APIComponentsView)
hass.http.register_view(APITemplateView)
hass.http.register_static_path(
URL_API_ERROR_LOG, hass.config.path(ERROR_LOG_FILENAME), False)
return True
class APIStatusView(HomeAssistantView):
"""View to handle Status requests."""
url = URL_API
name = "api:status"
@ha.callback
def get(self, request):
"""Retrieve if API is running."""
return self.json_message('API running.')
class APIEventStream(HomeAssistantView):
"""View to handle EventStream requests."""
url = URL_API_STREAM
name = "api:stream"
@asyncio.coroutine
def get(self, request):
"""Provide a streaming interface for the event bus."""
# pylint: disable=no-self-use
hass = request.app['hass']
stop_obj = object()
to_write = asyncio.Queue(loop=hass.loop)
restrict = request.query.get('restrict')
if restrict:
restrict = restrict.split(',') + [EVENT_HOMEASSISTANT_STOP]
@asyncio.coroutine
def forward_events(event):
"""Forward events to the open request."""
if event.event_type == EVENT_TIME_CHANGED:
return
if restrict and event.event_type not in restrict:
return
_LOGGER.debug('STREAM %s FORWARDING %s', id(stop_obj), event)
if event.event_type == EVENT_HOMEASSISTANT_STOP:
data = stop_obj
else:
data = json.dumps(event, cls=rem.JSONEncoder)
yield from to_write.put(data)
response = web.StreamResponse()
response.content_type = 'text/event-stream'
yield from response.prepare(request)
unsub_stream = hass.bus.async_listen(MATCH_ALL, forward_events)
try:
_LOGGER.debug('STREAM %s ATTACHED', id(stop_obj))
# Fire off one message so browsers fire open event right away
yield from to_write.put(STREAM_PING_PAYLOAD)
while True:
try:
with async_timeout.timeout(STREAM_PING_INTERVAL,
loop=hass.loop):
payload = yield from to_write.get()
if payload is stop_obj:
break
msg = "data: {}\n\n".format(payload)
_LOGGER.debug('STREAM %s WRITING %s', id(stop_obj),
msg.strip())
response.write(msg.encode("UTF-8"))
yield from response.drain()
except asyncio.TimeoutError:
yield from to_write.put(STREAM_PING_PAYLOAD)
except asyncio.CancelledError:
_LOGGER.debug('STREAM %s ABORT', id(stop_obj))
finally:
_LOGGER.debug('STREAM %s RESPONSE CLOSED', id(stop_obj))
unsub_stream()
class APIConfigView(HomeAssistantView):
"""View to handle Config requests."""
url = URL_API_CONFIG
name = "api:config"
@ha.callback
def get(self, request):
"""Get current configuration."""
return self.json(request.app['hass'].config.as_dict())
class APIDiscoveryView(HomeAssistantView):
"""View to provide discovery info."""
requires_auth = False
url = URL_API_DISCOVERY_INFO
name = "api:discovery"
@ha.callback
def get(self, request):
"""Get discovery info."""
hass = request.app['hass']
needs_auth = hass.config.api.api_password is not None
return self.json({
'base_url': hass.config.api.base_url,
'location_name': hass.config.location_name,
'requires_api_password': needs_auth,
'version': __version__
})
class APIStatesView(HomeAssistantView):
"""View to handle States requests."""
url = URL_API_STATES
name = "api:states"
@ha.callback
def get(self, request):
"""Get current states."""
return self.json(request.app['hass'].states.async_all())
class APIEntityStateView(HomeAssistantView):
"""View to handle EntityState requests."""
url = "/api/states/{entity_id}"
name = "api:entity-state"
@ha.callback
def get(self, request, entity_id):
"""Retrieve state of entity."""
state = request.app['hass'].states.get(entity_id)
if state:
return self.json(state)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
@asyncio.coroutine
def post(self, request, entity_id):
"""Update state of entity."""
hass = request.app['hass']
try:
data = yield from request.json()
except ValueError:
return self.json_message('Invalid JSON specified',
HTTP_BAD_REQUEST)
new_state = data.get('state')
if new_state is None:
return self.json_message('No state specified', HTTP_BAD_REQUEST)
attributes = data.get('attributes')
force_update = data.get('force_update', False)
is_new_state = hass.states.get(entity_id) is None
# Write state
hass.states.async_set(entity_id, new_state, attributes, force_update)
# Read the state back for our response
status_code = HTTP_CREATED if is_new_state else 200
resp = self.json(hass.states.get(entity_id), status_code)
resp.headers.add('Location', URL_API_STATES_ENTITY.format(entity_id))
return resp
@ha.callback
def delete(self, request, entity_id):
"""Remove entity."""
if request.app['hass'].states.async_remove(entity_id):
return self.json_message('Entity removed')
return self.json_message('Entity not found', HTTP_NOT_FOUND)
class APIEventListenersView(HomeAssistantView):
"""View to handle EventListeners requests."""
url = URL_API_EVENTS
name = "api:event-listeners"
@ha.callback
def get(self, request):
"""Get event listeners."""
return self.json(async_events_json(request.app['hass']))
class APIEventView(HomeAssistantView):
"""View to handle Event requests."""
url = '/api/events/{event_type}'
name = "api:event"
@asyncio.coroutine
def post(self, request, event_type):
"""Fire events."""
body = yield from request.text()
event_data = json.loads(body) if body else None
if event_data is not None and not isinstance(event_data, dict):
return self.json_message('Event data should be a JSON object',
HTTP_BAD_REQUEST)
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ('old_state', 'new_state'):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
request.app['hass'].bus.async_fire(event_type, event_data,
ha.EventOrigin.remote)
return self.json_message("Event {} fired.".format(event_type))
class APIServicesView(HomeAssistantView):
"""View to handle Services requests."""
url = URL_API_SERVICES
name = "api:services"
@ha.callback
def get(self, request):
"""Get registered services."""
return self.json(async_services_json(request.app['hass']))
class APIDomainServicesView(HomeAssistantView):
"""View to handle DomainServices requests."""
url = "/api/services/{domain}/{service}"
name = "api:domain-services"
@asyncio.coroutine
def post(self, request, domain, service):
"""Call a service.
Returns a list of changed states.
"""
hass = request.app['hass']
body = yield from request.text()
data = json.loads(body) if body else None
with AsyncTrackStates(hass) as changed_states:
yield from hass.services.async_call(domain, service, data, True)
return self.json(changed_states)
class APIComponentsView(HomeAssistantView):
"""View to handle Components requests."""
url = URL_API_COMPONENTS
name = "api:components"
@ha.callback
def get(self, request):
"""Get current loaded components."""
return self.json(request.app['hass'].config.components)
class APITemplateView(HomeAssistantView):
"""View to handle requests."""
url = URL_API_TEMPLATE
name = "api:template"
@asyncio.coroutine
def post(self, request):
"""Render a template."""
try:
data = yield from request.json()
tpl = template.Template(data['template'], request.app['hass'])
return tpl.async_render(data.get('variables'))
except (ValueError, TemplateError) as ex:
return self.json_message('Error rendering template: {}'.format(ex),
HTTP_BAD_REQUEST)
def async_services_json(hass):
"""Generate services data to JSONify."""
return [{"domain": key, "services": value}
for key, value in hass.services.async_services().items()]
def async_events_json(hass):
"""Generate event data to JSONify."""
return [{"event": key, "listener_count": value}
for key, value in hass.bus.async_listeners().items()]
| |
# -*- coding: utf-8 -*-
# vim:tabstop=4:expandtab:sw=4:softtabstop=4
import datetime
from suds.client import Client
from suds.cache import ObjectCache
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from billing import Gateway
from billing.models import PaylaneTransaction,PaylaneAuthorization
from billing.utils.credit_card import CreditCard,InvalidCard,Visa,MasterCard
from billing.utils.paylane import PaylaneError
from billing.signals import transaction_was_successful,transaction_was_unsuccessful
class PaylaneGateway(Gateway):
"""
"""
default_currency = "EUR"
supported_cardtypes = [Visa,MasterCard]
supported_countries = ['PT',]
homepage_url = 'http://www.paylane.com/'
display_name = 'Paylane'
def __init__(self):
wsdl = getattr(settings,'PAYLANE_WSDL','https://direct.paylane.com/wsdl/production/Direct.wsdl')
wsdl_cache = getattr(settings,'SUDS_CACHE_DIR','/tmp/suds')
if self.test_mode:
username = getattr(settings, 'PAYLANE_USERNAME', '')
password = getattr(settings, 'PAYLANE_PASSWORD', '')
else:
username = settings.PAYLANE_USERNAME
password = settings.PAYLANE_PASSWORD
self.client = Client(wsdl, username=username, password=password,cache = ObjectCache(location=wsdl_cache,days=15))
def _validate(self,card):
if not isinstance(card,CreditCard):
raise InvalidCard('credit_card not an instance of CreditCard')
if not self.validate_card(card):
raise InvalidCard('Invalid Card')
card.month = '%02d' % card.month
def authorize(self, money, credit_card, options=None):
"""Authorization for a future capture transaction"""
self._validate(credit_card)
params = self.client.factory.create('ns0:multi_sale_params')
params['payment_method'] = {}
params['payment_method']['card_data'] = {}
params['payment_method']['card_data']['card_number'] = credit_card.number
params['payment_method']['card_data']['card_code'] = credit_card.verification_value
params['payment_method']['card_data']['expiration_month'] = credit_card.month
params['payment_method']['card_data']['expiration_year'] = credit_card.year
params['payment_method']['card_data']['name_on_card'] = '%s %s' % (credit_card.first_name,credit_card.last_name)
params['capture_later'] = True
customer = options['customer']
params['customer']['name'] = customer.name
params['customer']['email'] = customer.email
params['customer']['ip'] = customer.ip_address
params['customer']['address']['street_house'] = customer.address.street_house
params['customer']['address']['city'] = customer.address.city
if customer.address.state:
params['customer']['address']['state'] = customer.address.state
params['customer']['address']['zip'] = customer.address.zip_code
params['customer']['address']['country_code'] = customer.address.country_code
params['amount'] = money
params['currency_code'] = self.default_currency
product = options['product']
params['product'] = {}
params['product']['description'] = product.description
res = self.client.service.multiSale(params)
transaction = PaylaneTransaction()
transaction.amount = money
transaction.customer_name = customer.name
transaction.customer_email = customer.email
transaction.product = product.description
status = None
response = None
transaction.success = hasattr(res,'OK')
transaction.save()
if hasattr(res,'OK'):
status = 'SUCCESS'
authz = PaylaneAuthorization()
authz.sale_authorization_id=res.OK.id_sale_authorization
authz.transaction=transaction
authz.first_authorization = True
authz.save()
response = {'transaction':transaction,'authorization':authz}
transaction_was_successful.send(sender=self,type='recurring',response=response)
else:
status = 'FAILURE'
response = {'error':PaylaneError(getattr(res.ERROR,'error_number'),
getattr(res.ERROR,'error_description'),
getattr(res.ERROR,'processor_error_number',''),
getattr(res.ERROR,'processor_error_description','')),
'transaction':transaction
}
transaction_was_unsuccessful.send(sender=self,type='recurring',response=response)
return {'status':status, 'response':response}
def capture(self, money, authorization, options=None):
"""Capture all funds from a previously authorized transaction"""
product = options['product']
res = self.client.service.captureSale(id_sale_authorization=authorization.sale_authorization_id,
amount=money,
description=product)
previous_transaction = authorization.transaction
transaction = PaylaneTransaction()
transaction.amount = previous_transaction.amount
transaction.customer_name = previous_transaction.customer_name
transaction.customer_email = previous_transaction.customer_email
transaction.product = previous_transaction.product
status = None
response = None
transaction.success = hasattr(res,'OK')
transaction.save()
if hasattr(res,'OK'):
status = 'SUCCESS'
authz = PaylaneAuthorization()
authz.sale_authorization_id=authorization.sale_authorization_id
authz.transaction=transaction
authz.save()
response = {'transaction':transaction,'authorization':authz}
transaction_was_successful.send(sender=self,type='bill_recurring',response=response)
else:
status = 'FAILURE'
response = {'error':PaylaneError(getattr(res.ERROR,'error_number'),
getattr(res.ERROR,'error_description'),
getattr(res.ERROR,'processor_error_number',''),
getattr(res.ERROR,'processor_error_description','')),
'transaction':transaction
}
transaction_was_unsuccessful.send(sender=self,type='bill_recurring',response=response)
return {'status':status,'response':response}
def purchase(self,money,credit_card,options=None):
"""One go authorize and capture transaction"""
self._validate(credit_card)
params = self.client.factory.create('ns0:multi_sale_params')
params['payment_method'] = {}
params['payment_method']['card_data'] = {}
params['payment_method']['card_data']['card_number'] = credit_card.number
params['payment_method']['card_data']['card_code'] = credit_card.verification_value
params['payment_method']['card_data']['expiration_month'] = credit_card.month
params['payment_method']['card_data']['expiration_year'] = credit_card.year
params['payment_method']['card_data']['name_on_card'] = '%s %s' % (credit_card.first_name,credit_card.last_name)
params['capture_later'] = False
customer = options['customer']
params['customer']['name'] = customer.name
params['customer']['email'] = customer.email
params['customer']['ip'] = customer.ip_address
params['customer']['address']['street_house'] = customer.address.street_house
params['customer']['address']['city'] = customer.address.city
if customer.address.state:
params['customer']['address']['state'] = customer.address.state
params['customer']['address']['zip'] = customer.address.zip_code
params['customer']['address']['country_code'] = customer.address.country_code
params['amount'] = money
params['currency_code'] = self.default_currency
product = options['product']
params['product'] = {}
params['product']['description'] = product
res = self.client.service.multiSale(params)
transaction = PaylaneTransaction()
transaction.amount = money
transaction.customer_name = customer.name
transaction.customer_email = customer.email
transaction.product = product
status = None
response = None
transaction.success = hasattr(res,'OK')
transaction.save()
if hasattr(res,'OK'):
status = 'SUCCESS'
response = {'transaction':transaction}
transaction_was_successful.send(sender=self,type='purchase',response=response)
else:
status = 'FAILURE'
response = {'error':PaylaneError(getattr(res.ERROR,'error_number'),
getattr(res.ERROR,'error_description'),
getattr(res.ERROR,'processor_error_number',''),
getattr(res.ERROR,'processor_error_description','')),
'transaction':transaction
}
transaction_was_unsuccessful.send(sender=self,type='purchase',response=response)
return {'status':status, 'response':response}
def recurring(self,money,credit_card,options=None):
"""Setup a recurring transaction"""
return self.authorize(money,credit_card,options)
def void(self, identification, options=None):
"""Null/Blank/Delete a previous transaction"""
res = self.client.service.closeSaleAuthorization(id_sale_authorization=identification)
if hasattr(res,'OK'):
return {'status':'SUCCESS'}
else:
return {'status':'FAILURE',
'response':{'error':PaylaneError(getattr(res.ERROR,'error_number'),
getattr(res.ERROR,'error_description'),
getattr(res.ERROR,'processor_error_number',''),
getattr(res.ERROR,'processor_error_description','')),
}
}
def bill_recurring(self,amount,authorization,description):
""" Debit a recurring transaction payment, eg. monthly subscription.
Use the result of recurring() as the paylane_recurring parameter.
If this transaction is successful, use it's response as input for the
next bill_recurring() call.
"""
processing_date = datetime.datetime.today().strftime("%Y-%m-%d")
res = self.client.service.resale(id_sale=authorization.sale_authorization_id,amount=amount,currency=self.default_currency,
description=description,processing_date=processing_date,resale_by_authorization=authorization)
previous_transaction = authorization.transaction
transaction = PaylaneTransaction()
transaction.amount = previous_transaction.amount
transaction.customer_name = previous_transaction.customer_name
transaction.customer_email = previous_transaction.customer_email
transaction.product = previous_transaction.product
status = None
response = None
transaction.success = hasattr(res,'OK')
transaction.save()
if hasattr(res,'OK'):
status = 'SUCCESS'
authz = PaylaneAuthorization()
authz.sale_authorization_id=authorization.sale_authorization_id
authz.transaction=transaction
authz.save()
response = {'transaction':transaction,'authorization':authz}
transaction_was_successful.send(sender=self,type='bill_recurring',response=response)
else:
status = 'FAILURE'
response = {'error':PaylaneError(getattr(res.ERROR,'error_number'),
getattr(res.ERROR,'error_description'),
getattr(res.ERROR,'processor_error_number',''),
getattr(res.ERROR,'processor_error_description','')),
'transaction':transaction
}
transaction_was_unsuccessful.send(sender=self,type='bill_recurring',response=response)
return {'status':status,'response':response}
| |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from base64 import b64encode
import sys
import shutil
from zope.interface import implements
from twisted.internet import reactor, defer
from twisted.python import log, failure, runtime
from buildslave.interfaces import ISlaveCommand
from buildslave import runprocess
from buildslave.exceptions import AbandonChain
from buildslave.commands import utils
from buildslave import util
# this used to be a CVS $-style "Revision" auto-updated keyword, but since I
# moved to Darcs as the primary repository, this is updated manually each
# time this file is changed. The last cvs_ver that was here was 1.51 .
command_version = "2.13"
# version history:
# >=1.17: commands are interruptable
# >=1.28: Arch understands 'revision', added Bazaar
# >=1.33: Source classes understand 'retry'
# >=1.39: Source classes correctly handle changes in branch (except Git)
# Darcs accepts 'revision' (now all do but Git) (well, and P4Sync)
# Arch/Baz should accept 'build-config'
# >=1.51: (release 0.7.3)
# >= 2.1: SlaveShellCommand now accepts 'initial_stdin', 'keep_stdin_open',
# and 'logfiles'. It now sends 'log' messages in addition to
# stdout/stdin/header/rc. It acquired writeStdin/closeStdin methods,
# but these are not remotely callable yet.
# (not externally visible: ShellCommandPP has writeStdin/closeStdin.
# ShellCommand accepts new arguments (logfiles=, initialStdin=,
# keepStdinOpen=) and no longer accepts stdin=)
# (release 0.7.4)
# >= 2.2: added monotone, uploadFile, and downloadFile (release 0.7.5)
# >= 2.3: added bzr (release 0.7.6)
# >= 2.4: Git understands 'revision' and branches
# >= 2.5: workaround added for remote 'hg clone --rev REV' when hg<0.9.2
# >= 2.6: added uploadDirectory
# >= 2.7: added usePTY option to SlaveShellCommand
# >= 2.8: added username and password args to SVN class
# >= 2.9: add depth arg to SVN class
# >= 2.10: CVS can handle 'extra_options' and 'export_options'
# >= 2.11: Arch, Bazaar, and Monotone removed
# >= 2.12: SlaveShellCommand no longer accepts 'keep_stdin_open'
# >= 2.13: SlaveFileUploadCommand supports option 'keepstamp'
class Command:
implements(ISlaveCommand)
"""This class defines one command that can be invoked by the build master.
The command is executed on the slave side, and always sends back a
completion message when it finishes. It may also send intermediate status
as it runs (by calling builder.sendStatus). Some commands can be
interrupted (either by the build master or a local timeout), in which
case the step is expected to complete normally with a status message that
indicates an error occurred.
These commands are used by BuildSteps on the master side. Each kind of
BuildStep uses a single Command. The slave must implement all the
Commands required by the set of BuildSteps used for any given build:
this is checked at startup time.
All Commands are constructed with the same signature:
c = CommandClass(builder, stepid, args)
where 'builder' is the parent SlaveBuilder object, and 'args' is a
dict that is interpreted per-command.
The setup(args) method is available for setup, and is run from __init__.
The Command is started with start(). This method must be implemented in a
subclass, and it should return a Deferred. When your step is done, you
should fire the Deferred (the results are not used). If the command is
interrupted, it should fire the Deferred anyway.
While the command runs. it may send status messages back to the
buildmaster by calling self.sendStatus(statusdict). The statusdict is
interpreted by the master-side BuildStep however it likes.
A separate completion message is sent when the deferred fires, which
indicates that the Command has finished, but does not carry any status
data. If the Command needs to return an exit code of some sort, that
should be sent as a regular status message before the deferred is fired .
Once builder.commandComplete has been run, no more status messages may be
sent.
If interrupt() is called, the Command should attempt to shut down as
quickly as possible. Child processes should be killed, new ones should
not be started. The Command should send some kind of error status update,
then complete as usual by firing the Deferred.
.interrupted should be set by interrupt(), and can be tested to avoid
sending multiple error status messages.
If .running is False, the bot is shutting down (or has otherwise lost the
connection to the master), and should not send any status messages. This
is checked in Command.sendStatus .
"""
# builder methods:
# sendStatus(dict) (zero or more)
# commandComplete() or commandInterrupted() (one, at end)
debug = False
interrupted = False
running = False # set by Builder, cleared on shutdown or when the
# Deferred fires
_reactor = reactor
def __init__(self, builder, stepId, args):
self.builder = builder
self.stepId = stepId # just for logging
self.args = args
self.startTime = None
self.setup(args)
def setup(self, args):
"""Override this in a subclass to extract items from the args dict."""
pass
def doStart(self):
self.running = True
self.startTime = util.now(self._reactor)
d = defer.maybeDeferred(self.start)
def commandComplete(res):
self.sendStatus({"elapsed": util.now(self._reactor) - self.startTime})
self.running = False
return res
d.addBoth(commandComplete)
return d
def start(self):
"""Start the command. This method should return a Deferred that will
fire when the command has completed. The Deferred's argument will be
ignored.
This method should be overridden by subclasses."""
raise NotImplementedError, "You must implement this in a subclass"
def sendStatus(self, status):
"""Send a status update to the master."""
if self.debug:
log.msg("sendStatus", status)
if not self.running:
log.msg("would sendStatus but not .running")
return
self.builder.sendUpdate(status)
def doInterrupt(self):
self.running = False
self.interrupt()
def interrupt(self):
"""Override this in a subclass to allow commands to be interrupted.
May be called multiple times, test and set self.interrupted=True if
this matters."""
pass
# utility methods, mostly used by SlaveShellCommand and the like
def _abandonOnFailure(self, rc):
if type(rc) is not int:
log.msg("weird, _abandonOnFailure was given rc=%s (%s)" % \
(rc, type(rc)))
assert isinstance(rc, int)
if rc != 0:
raise AbandonChain(rc)
return rc
def _sendRC(self, res):
self.sendStatus({'rc': 0})
def _checkAbandoned(self, why):
log.msg("_checkAbandoned", why)
why.trap(AbandonChain)
log.msg(" abandoning chain", why.value)
self.sendStatus({'rc': why.value.args[0]})
return None
class SourceBaseCommand(Command):
"""Abstract base class for Version Control System operations (checkout
and update). This class extracts the following arguments from the
dictionary received from the master:
- ['workdir']: (required) the subdirectory where the buildable sources
should be placed
- ['mode']: one of update/copy/clobber/export, defaults to 'update'
- ['revision']: (required) If not None, this is an int or string which indicates
which sources (along a time-like axis) should be used.
It is the thing you provide as the CVS -r or -D
argument.
- ['patch']: If not None, this is a tuple of (striplevel, patch)
which contains a patch that should be applied after the
checkout has occurred. Once applied, the tree is no
longer eligible for use with mode='update', and it only
makes sense to use this in conjunction with a
['revision'] argument. striplevel is an int, and patch
is a string in standard unified diff format. The patch
will be applied with 'patch -p%d <PATCH', with
STRIPLEVEL substituted as %d. The command will fail if
the patch process fails (rejected hunks).
- ['timeout']: seconds of silence tolerated before we kill off the
command
- ['maxTime']: seconds before we kill off the command
- ['retry']: If not None, this is a tuple of (delay, repeats)
which means that any failed VC updates should be
reattempted, up to REPEATS times, after a delay of
DELAY seconds. This is intended to deal with slaves
that experience transient network failures.
"""
sourcedata = ""
def setup(self, args):
# if we need to parse the output, use this environment. Otherwise
# command output will be in whatever the buildslave's native language
# has been set to.
self.env = os.environ.copy()
self.env['LC_MESSAGES'] = "C"
self.workdir = args['workdir']
self.mode = args.get('mode', "update")
self.revision = args.get('revision')
self.patch = args.get('patch')
self.timeout = args.get('timeout', 120)
self.maxTime = args.get('maxTime', None)
self.retry = args.get('retry')
self._commandPaths = {}
# VC-specific subclasses should override this to extract more args.
# Make sure to upcall!
def getCommand(self, name):
"""Wrapper around utils.getCommand that will output a resonable
error message and raise AbandonChain if the command cannot be
found"""
if name not in self._commandPaths:
try:
self._commandPaths[name] = utils.getCommand(name)
except RuntimeError:
self.sendStatus({'stderr' : "could not find '%s'\n" % name})
self.sendStatus({'stderr' : "PATH is '%s'\n" % os.environ.get('PATH', '')})
raise AbandonChain(-1)
return self._commandPaths[name]
def start(self):
self.sendStatus({'header': "starting " + self.header + "\n"})
self.command = None
# self.srcdir is where the VC system should put the sources
if self.mode == "copy":
self.srcdir = "source" # hardwired directory name, sorry
else:
self.srcdir = self.workdir
self.sourcedatafile = os.path.join(self.builder.basedir,
".buildbot-sourcedata-" + b64encode(self.srcdir))
# upgrade older versions to the new sourcedata location
old_sd_path = os.path.join(self.builder.basedir, self.srcdir, ".buildbot-sourcedata")
if os.path.exists(old_sd_path) and not os.path.exists(self.sourcedatafile):
os.rename(old_sd_path, self.sourcedatafile)
# also upgrade versions that didn't include the encoded version of the
# source directory
old_sd_path = os.path.join(self.builder.basedir, ".buildbot-sourcedata")
if os.path.exists(old_sd_path) and not os.path.exists(self.sourcedatafile):
os.rename(old_sd_path, self.sourcedatafile)
d = defer.succeed(None)
self.maybeClobber(d)
if not (self.sourcedirIsUpdateable() and self.sourcedataMatches()):
# the directory cannot be updated, so we have to clobber it.
# Perhaps the master just changed modes from 'export' to
# 'update'.
d.addCallback(self.doClobber, self.srcdir)
d.addCallback(self.doVC)
if self.mode == "copy":
d.addCallback(self.doCopy)
if self.patch:
d.addCallback(self.doPatch)
d.addCallbacks(self._sendRC, self._checkAbandoned)
return d
def maybeClobber(self, d):
# do we need to clobber anything?
if self.mode in ("copy", "clobber", "export"):
d.addCallback(self.doClobber, self.workdir)
def interrupt(self):
self.interrupted = True
if self.command:
self.command.kill("command interrupted")
def doVC(self, res):
if self.interrupted:
raise AbandonChain(1)
if self.sourcedirIsUpdateable() and self.sourcedataMatches():
d = self.doVCUpdate()
d.addBoth(self.maybeDoVCFallback)
else:
d = self.doVCFull()
d.addBoth(self.maybeDoVCRetry)
d.addCallback(self._abandonOnFailure)
d.addCallback(self._handleGotRevision)
d.addCallback(self.writeSourcedata)
return d
def sourcedataMatches(self):
try:
olddata = self.readSourcedata()
if olddata != self.sourcedata:
return False
except IOError:
return False
return True
def sourcedirIsPatched(self):
return os.path.exists(os.path.join(self.builder.basedir,
self.workdir,
".buildbot-patched"))
def _handleGotRevision(self, res):
d = defer.maybeDeferred(self.parseGotRevision)
d.addCallback(lambda got_revision:
self.sendStatus({'got_revision': got_revision}))
return d
def parseGotRevision(self):
"""Override this in a subclass. It should return a string that
represents which revision was actually checked out, or a Deferred
that will fire with such a string. If, in a future build, you were to
pass this 'got_revision' string in as the 'revision' component of a
SourceStamp, you should wind up with the same source code as this
checkout just obtained.
It is probably most useful to scan self.command.stdout for a string
of some sort. Be sure to set keepStdout=True on the VC command that
you run, so that you'll have something available to look at.
If this information is unavailable, just return None."""
return None
def readSourcedata(self):
"""
Read the sourcedata file and return its contents
@returns: source data
@raises: IOError if the file does not exist
"""
return open(self.sourcedatafile, "r").read()
def writeSourcedata(self, res):
open(self.sourcedatafile, "w").write(self.sourcedata)
return res
def sourcedirIsUpdateable(self):
"""Returns True if the tree can be updated."""
raise NotImplementedError("this must be implemented in a subclass")
def doVCUpdate(self):
"""Returns a deferred with the steps to update a checkout."""
raise NotImplementedError("this must be implemented in a subclass")
def doVCFull(self):
"""Returns a deferred with the steps to do a fresh checkout."""
raise NotImplementedError("this must be implemented in a subclass")
def maybeDoVCFallback(self, rc):
if type(rc) is int and rc == 0:
return rc
if self.interrupted:
raise AbandonChain(1)
# allow AssertionErrors to fall through, for benefit of the tests; for
# all other errors, carry on to try the fallback
if isinstance(rc, failure.Failure) and rc.check(AssertionError):
return rc
# Let VCS subclasses have an opportunity to handle
# unrecoverable errors without having to clobber the repo
self.maybeNotDoVCFallback(rc)
msg = "update failed, clobbering and trying again"
self.sendStatus({'header': msg + "\n"})
log.msg(msg)
d = self.doClobber(None, self.srcdir)
d.addCallback(self.doVCFallback2)
return d
def doVCFallback2(self, res):
msg = "now retrying VC operation"
self.sendStatus({'header': msg + "\n"})
log.msg(msg)
d = self.doVCFull()
d.addBoth(self.maybeDoVCRetry)
d.addCallback(self._abandonOnFailure)
return d
def maybeNotDoVCFallback(self, rc):
"""Override this in a subclass if you want to detect unrecoverable
checkout errors where clobbering the repo wouldn't help, and stop
the current VC chain before it clobbers the repo for future builds.
Use 'raise AbandonChain' to pass up a halt if you do detect such."""
pass
def maybeDoVCRetry(self, res):
"""We get here somewhere after a VC chain has finished. res could
be::
- 0: the operation was successful
- nonzero: the operation failed. retry if possible
- AbandonChain: the operation failed, someone else noticed. retry.
- Failure: some other exception, re-raise
"""
if isinstance(res, failure.Failure):
if self.interrupted:
return res # don't re-try interrupted builds
res.trap(AbandonChain)
else:
if type(res) is int and res == 0:
return res
if self.interrupted:
raise AbandonChain(1)
# if we get here, we should retry, if possible
if self.retry:
delay, repeats = self.retry
if repeats >= 0:
self.retry = (delay, repeats-1)
msg = ("update failed, trying %d more times after %d seconds"
% (repeats, delay))
self.sendStatus({'header': msg + "\n"})
log.msg(msg)
d = defer.Deferred()
# we are going to do a full checkout, so a clobber is
# required first
self.doClobber(d, self.workdir)
if self.srcdir:
self.doClobber(d, self.srcdir)
d.addCallback(lambda res: self.doVCFull())
d.addBoth(self.maybeDoVCRetry)
self._reactor.callLater(delay, d.callback, None)
return d
return res
def doClobber(self, dummy, dirname, chmodDone=False):
# TODO: remove the old tree in the background
## workdir = os.path.join(self.builder.basedir, self.workdir)
## deaddir = self.workdir + ".deleting"
## if os.path.isdir(workdir):
## try:
## os.rename(workdir, deaddir)
## # might fail if deaddir already exists: previous deletion
## # hasn't finished yet
## # start the deletion in the background
## # TODO: there was a solaris/NetApp/NFS problem where a
## # process that was still running out of the directory we're
## # trying to delete could prevent the rm-rf from working. I
## # think it stalled the rm, but maybe it just died with
## # permission issues. Try to detect this.
## os.commands("rm -rf %s &" % deaddir)
## except:
## # fall back to sequential delete-then-checkout
## pass
d = os.path.join(self.builder.basedir, dirname)
if runtime.platformType != "posix":
# if we're running on w32, use rmtree instead. It will block,
# but hopefully it won't take too long.
utils.rmdirRecursive(d)
return defer.succeed(0)
command = ["rm", "-rf", d]
c = runprocess.RunProcess(self.builder, command, self.builder.basedir,
sendRC=0, timeout=self.timeout, maxTime=self.maxTime,
usePTY=False)
self.command = c
# sendRC=0 means the rm command will send stdout/stderr to the
# master, but not the rc=0 when it finishes. That job is left to
# _sendRC
d = c.start()
# The rm -rf may fail if there is a left-over subdir with chmod 000
# permissions. So if we get a failure, we attempt to chmod suitable
# permissions and re-try the rm -rf.
if chmodDone:
d.addCallback(self._abandonOnFailure)
else:
d.addCallback(lambda rc: self.doClobberTryChmodIfFail(rc, dirname))
return d
def doClobberTryChmodIfFail(self, rc, dirname):
assert isinstance(rc, int)
if rc == 0:
return defer.succeed(0)
# Attempt a recursive chmod and re-try the rm -rf after.
command = ["chmod", "-Rf", "u+rwx", os.path.join(self.builder.basedir, dirname)]
if sys.platform.startswith('freebsd'):
# Work around a broken 'chmod -R' on FreeBSD (it tries to recurse into a
# directory for which it doesn't have permission, before changing that
# permission) by running 'find' instead
command = ["find", os.path.join(self.builder.basedir, dirname),
'-exec', 'chmod', 'u+rwx', '{}', ';' ]
c = runprocess.RunProcess(self.builder, command, self.builder.basedir,
sendRC=0, timeout=self.timeout, maxTime=self.maxTime,
usePTY=False)
self.command = c
d = c.start()
d.addCallback(self._abandonOnFailure)
d.addCallback(lambda dummy: self.doClobber(dummy, dirname, True))
return d
def doCopy(self, res):
# now copy tree to workdir
fromdir = os.path.join(self.builder.basedir, self.srcdir)
todir = os.path.join(self.builder.basedir, self.workdir)
if runtime.platformType != "posix":
self.sendStatus({'header': "Since we're on a non-POSIX platform, "
"we're not going to try to execute cp in a subprocess, but instead "
"use shutil.copytree(), which will block until it is complete. "
"fromdir: %s, todir: %s\n" % (fromdir, todir)})
shutil.copytree(fromdir, todir)
return defer.succeed(0)
if not os.path.exists(os.path.dirname(todir)):
os.makedirs(os.path.dirname(todir))
if os.path.exists(todir):
# I don't think this happens, but just in case..
log.msg("cp target '%s' already exists -- cp will not do what you think!" % todir)
command = ['cp', '-R', '-P', '-p', fromdir, todir]
c = runprocess.RunProcess(self.builder, command, self.builder.basedir,
sendRC=False, timeout=self.timeout, maxTime=self.maxTime,
usePTY=False)
self.command = c
d = c.start()
d.addCallback(self._abandonOnFailure)
return d
def doPatch(self, res):
patchlevel = self.patch[0]
diff = self.patch[1]
root = None
if len(self.patch) >= 3:
root = self.patch[2]
command = [
utils.getCommand("patch"),
'-p%d' % patchlevel,
'--remove-empty-files',
'--force',
'--forward',
'-i', '.buildbot-diff',
]
dir = os.path.join(self.builder.basedir, self.workdir)
# Mark the directory so we don't try to update it later, or at least try
# to revert first.
open(os.path.join(dir, ".buildbot-patched"), "w").write("patched\n")
# write the diff to a file, for reading later
open(os.path.join(dir, ".buildbot-diff"), "w").write(diff)
# Update 'dir' with the 'root' option. Make sure it is a subdirectory
# of dir.
if (root and
os.path.abspath(os.path.join(dir, root)
).startswith(os.path.abspath(dir))):
dir = os.path.join(dir, root)
# now apply the patch
c = runprocess.RunProcess(self.builder, command, dir,
sendRC=False, timeout=self.timeout,
maxTime=self.maxTime, usePTY=False)
self.command = c
d = c.start()
# clean up the temp file
def cleanup(x):
try:
os.unlink(os.path.join(dir, ".buildbot-diff"))
except:
pass
return x
d.addBoth(cleanup)
d.addCallback(self._abandonOnFailure)
return d
def setFileContents(self, filename, contents):
"""Put the given C{contents} in C{filename}; this is a bit more
succinct than opening, writing, and closing, and has the advantage of
being patchable in tests. Note that the enclosing directory is
not automatically created, nor is this an "atomic" overwrite."""
f = open(filename, 'w')
f.write(contents)
f.close()
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import ddt
import mock
from oslo_serialization import jsonutils
from six.moves import http_client
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
from cinder import context
import cinder.db
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder import volume
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return fake_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return fake_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return fake_new_snapshot_metadata()
def fake_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def fake_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def fake_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
# First argument needs to be self to receive the context argument in the right
# variable, as this'll be used to replace the original API.get method which
# receives self as the first argument.
def fake_get(self, context, *args, **kwargs):
vol = {'id': fake.VOLUME_ID,
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'availability_zone': 'fake-zone',
'attach_status': fields.VolumeAttachStatus.DETACHED,
'metadata': {}}
return fake_volume.fake_volume_obj(context, **vol)
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
@ddt.ddt
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.mock_object(volume.api.API, 'get', fake_get)
self.mock_object(cinder.db, 'snapshot_get', return_snapshot)
self.mock_object(self.volume_api, 'update_snapshot_metadata')
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/%s/snapshots/%s/metadata' % (
fake.PROJECT_ID, self.req_id)
snap = {"volume_size": 100,
"volume_id": fake.VOLUME_ID,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index_nonexistent_snapshot(self, snapshot_get_by_id):
snapshot_get_by_id.side_effect = \
exception.SnapshotNotFound(snapshot_id=self.req_id)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exception.SnapshotNotFound,
self.controller.index, req, self.url)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_index_no_data(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key2': 'value2'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show_nonexistent_snapshot(self, snapshot_get_by_id):
snapshot_get_by_id.side_effect = \
exception.SnapshotNotFound(snapshot_id=self.req_id)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(exception.SnapshotNotFound,
self.controller.show, req, self.req_id, 'key2')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_show_meta_not_found(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(exception.SnapshotMetadataNotFound,
self.controller.show, req, self.req_id, 'key6')
@mock.patch('cinder.db.snapshot_metadata_delete')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_obj['metadata'] = {'key2': 'value2'}
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(http_client.OK, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.mock_object(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(exception.SnapshotNotFound,
self.controller.delete, req, self.req_id, 'key1')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_delete_meta_not_found(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(exception.SnapshotMetadataNotFound,
self.controller.delete, req, self.req_id, 'key6')
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Volume.get_by_id')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_create(self, snapshot_get_by_id, volume_get_by_id,
snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
fake_volume_obj = fake_volume.fake_volume_obj(ctx)
snapshot_get_by_id.return_value = snapshot_obj
volume_get_by_id.return_value = fake_volume_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dump_as_bytes(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_create_with_keys_in_uppercase_and_lowercase(
self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dump_as_bytes(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.mock_object(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.SnapshotNotFound,
self.controller.create, req, self.req_id, body)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all(self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': []
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
@mock.patch('cinder.db.snapshot_update',
return_value={'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20'})
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all_with_keys_in_uppercase_and_lowercase(
self, snapshot_get_by_id, snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_all_empty_container(self, snapshot_get_by_id,
snapshot_update):
snapshot = {
'id': self.req_id,
'expected_attrs': []
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_value={})
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dump_as_bytes(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dump_as_bytes(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.mock_object(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.SnapshotNotFound,
self.controller.update_all, req, '100', body)
@mock.patch('cinder.db.snapshot_metadata_update', return_value=dict())
@mock.patch('cinder.db.snapshot_update')
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item(self, snapshot_get_by_id,
snapshot_update, snapshot_metadata_update):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.mock_object(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/%s/snapshots/asdf/metadata/key1' % fake.PROJECT_ID)
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.SnapshotNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
@mock.patch('cinder.db.sqlalchemy.api._snapshot_get')
@mock.patch('cinder.db.snapshot_metadata_update', autospec=True)
def test_update_item_empty_key(self, metadata_update, snapshot_get):
snapshot_get.return_value = fake_get
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item_key_too_long(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_item_value_too_long(self, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
@ddt.data({"meta": {"key1": "value1", "key2": "value2"}},
{"meta": {"key1": None}})
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_update_invalid_metadata(self, body, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
@ddt.data({"metadata": {"a" * 260: "value1"}},
{"metadata": {"key": "v" * 260}},
{"metadata": {"": "value1"}},
{"metadata": {"key": None}})
@mock.patch('cinder.objects.Snapshot.get_by_id')
def test_invalid_metadata_items_on_create(self, data, snapshot_get_by_id):
snapshot = {
'id': self.req_id,
'expected_attrs': ['metadata']
}
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)
snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot)
snapshot_get_by_id.return_value = snapshot_obj
self.mock_object(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
exc = webob.exc.HTTPBadRequest
if (len(list(data['metadata'].keys())[0]) > 255 or
(list(data['metadata'].values())[0] is not None and
len(list(data['metadata'].values())[0]) > 255)):
exc = webob.exc.HTTPRequestEntityTooLarge
req.body = jsonutils.dump_as_bytes(data)
self.assertRaises(exc, self.controller.create, req, self.req_id, data)
| |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport adapter for Requests."""
from __future__ import absolute_import
import functools
import logging
import numbers
import os
import time
try:
import requests
except ImportError as caught_exc: # pragma: NO COVER
import six
six.raise_from(
ImportError(
"The requests library is not installed, please install the "
"requests package to use the requests transport."
),
caught_exc,
)
import requests.adapters # pylint: disable=ungrouped-imports
import requests.exceptions # pylint: disable=ungrouped-imports
from requests.packages.urllib3.util.ssl_ import ( # type: ignore
create_urllib3_context,
) # pylint: disable=ungrouped-imports
import six # pylint: disable=ungrouped-imports
from google.auth import environment_vars
from google.auth import exceptions
from google.auth import transport
import google.auth.transport._mtls_helper
from google.oauth2 import service_account
_LOGGER = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = 120 # in seconds
class _Response(transport.Response):
"""Requests transport response adapter.
Args:
response (requests.Response): The raw Requests response.
"""
def __init__(self, response):
self._response = response
@property
def status(self):
return self._response.status_code
@property
def headers(self):
return self._response.headers
@property
def data(self):
return self._response.content
class TimeoutGuard(object):
"""A context manager raising an error if the suite execution took too long.
Args:
timeout (Union[None, Union[float, Tuple[float, float]]]):
The maximum number of seconds a suite can run without the context
manager raising a timeout exception on exit. If passed as a tuple,
the smaller of the values is taken as a timeout. If ``None``, a
timeout error is never raised.
timeout_error_type (Optional[Exception]):
The type of the error to raise on timeout. Defaults to
:class:`requests.exceptions.Timeout`.
"""
def __init__(self, timeout, timeout_error_type=requests.exceptions.Timeout):
self._timeout = timeout
self.remaining_timeout = timeout
self._timeout_error_type = timeout_error_type
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value:
return # let the error bubble up automatically
if self._timeout is None:
return # nothing to do, the timeout was not specified
elapsed = time.time() - self._start
deadline_hit = False
if isinstance(self._timeout, numbers.Number):
self.remaining_timeout = self._timeout - elapsed
deadline_hit = self.remaining_timeout <= 0
else:
self.remaining_timeout = tuple(x - elapsed for x in self._timeout)
deadline_hit = min(self.remaining_timeout) <= 0
if deadline_hit:
raise self._timeout_error_type()
class Request(transport.Request):
"""Requests request adapter.
This class is used internally for making requests using various transports
in a consistent way. If you use :class:`AuthorizedSession` you do not need
to construct or use this class directly.
This class can be useful if you want to manually refresh a
:class:`~google.auth.credentials.Credentials` instance::
import google.auth.transport.requests
import requests
request = google.auth.transport.requests.Request()
credentials.refresh(request)
Args:
session (requests.Session): An instance :class:`requests.Session` used
to make HTTP requests. If not specified, a session will be created.
.. automethod:: __call__
"""
def __init__(self, session=None):
if not session:
session = requests.Session()
self.session = session
def __call__(
self,
url,
method="GET",
body=None,
headers=None,
timeout=_DEFAULT_TIMEOUT,
**kwargs
):
"""Make an HTTP request using requests.
Args:
url (str): The URI to be requested.
method (str): The HTTP method to use for the request. Defaults
to 'GET'.
body (bytes): The payload or body in HTTP request.
headers (Mapping[str, str]): Request headers.
timeout (Optional[int]): The number of seconds to wait for a
response from the server. If not specified or if None, the
requests default timeout will be used.
kwargs: Additional arguments passed through to the underlying
requests :meth:`~requests.Session.request` method.
Returns:
google.auth.transport.Response: The HTTP response.
Raises:
google.auth.exceptions.TransportError: If any exception occurred.
"""
try:
_LOGGER.debug("Making request: %s %s", method, url)
response = self.session.request(
method, url, data=body, headers=headers, timeout=timeout, **kwargs
)
return _Response(response)
except requests.exceptions.RequestException as caught_exc:
new_exc = exceptions.TransportError(caught_exc)
six.raise_from(new_exc, caught_exc)
class _MutualTlsAdapter(requests.adapters.HTTPAdapter):
"""
A TransportAdapter that enables mutual TLS.
Args:
cert (bytes): client certificate in PEM format
key (bytes): client private key in PEM format
Raises:
ImportError: if certifi or pyOpenSSL is not installed
OpenSSL.crypto.Error: if client cert or key is invalid
"""
def __init__(self, cert, key):
import certifi
from OpenSSL import crypto
import urllib3.contrib.pyopenssl # type: ignore
urllib3.contrib.pyopenssl.inject_into_urllib3()
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ctx_poolmanager = create_urllib3_context()
ctx_poolmanager.load_verify_locations(cafile=certifi.where())
ctx_poolmanager._ctx.use_certificate(x509)
ctx_poolmanager._ctx.use_privatekey(pkey)
self._ctx_poolmanager = ctx_poolmanager
ctx_proxymanager = create_urllib3_context()
ctx_proxymanager.load_verify_locations(cafile=certifi.where())
ctx_proxymanager._ctx.use_certificate(x509)
ctx_proxymanager._ctx.use_privatekey(pkey)
self._ctx_proxymanager = ctx_proxymanager
super(_MutualTlsAdapter, self).__init__()
def init_poolmanager(self, *args, **kwargs):
kwargs["ssl_context"] = self._ctx_poolmanager
super(_MutualTlsAdapter, self).init_poolmanager(*args, **kwargs)
def proxy_manager_for(self, *args, **kwargs):
kwargs["ssl_context"] = self._ctx_proxymanager
return super(_MutualTlsAdapter, self).proxy_manager_for(*args, **kwargs)
class AuthorizedSession(requests.Session):
"""A Requests Session class with credentials.
This class is used to perform requests to API endpoints that require
authorization::
from google.auth.transport.requests import AuthorizedSession
authed_session = AuthorizedSession(credentials)
response = authed_session.request(
'GET', 'https://www.googleapis.com/storage/v1/b')
The underlying :meth:`request` implementation handles adding the
credentials' headers to the request and refreshing credentials as needed.
This class also supports mutual TLS via :meth:`configure_mtls_channel`
method. In order to use this method, the `GOOGLE_API_USE_CLIENT_CERTIFICATE`
environment variable must be explicitly set to ``true``, otherwise it does
nothing. Assume the environment is set to ``true``, the method behaves in the
following manner:
If client_cert_callback is provided, client certificate and private
key are loaded using the callback; if client_cert_callback is None,
application default SSL credentials will be used. Exceptions are raised if
there are problems with the certificate, private key, or the loading process,
so it should be called within a try/except block.
First we set the environment variable to ``true``, then create an :class:`AuthorizedSession`
instance and specify the endpoints::
regular_endpoint = 'https://pubsub.googleapis.com/v1/projects/{my_project_id}/topics'
mtls_endpoint = 'https://pubsub.mtls.googleapis.com/v1/projects/{my_project_id}/topics'
authed_session = AuthorizedSession(credentials)
Now we can pass a callback to :meth:`configure_mtls_channel`::
def my_cert_callback():
# some code to load client cert bytes and private key bytes, both in
# PEM format.
some_code_to_load_client_cert_and_key()
if loaded:
return cert, key
raise MyClientCertFailureException()
# Always call configure_mtls_channel within a try/except block.
try:
authed_session.configure_mtls_channel(my_cert_callback)
except:
# handle exceptions.
if authed_session.is_mtls:
response = authed_session.request('GET', mtls_endpoint)
else:
response = authed_session.request('GET', regular_endpoint)
You can alternatively use application default SSL credentials like this::
try:
authed_session.configure_mtls_channel()
except:
# handle exceptions.
Args:
credentials (google.auth.credentials.Credentials): The credentials to
add to the request.
refresh_status_codes (Sequence[int]): Which HTTP status codes indicate
that credentials should be refreshed and the request should be
retried.
max_refresh_attempts (int): The maximum number of times to attempt to
refresh the credentials and retry the request.
refresh_timeout (Optional[int]): The timeout value in seconds for
credential refresh HTTP requests.
auth_request (google.auth.transport.requests.Request):
(Optional) An instance of
:class:`~google.auth.transport.requests.Request` used when
refreshing credentials. If not passed,
an instance of :class:`~google.auth.transport.requests.Request`
is created.
default_host (Optional[str]): A host like "pubsub.googleapis.com".
This is used when a self-signed JWT is created from service
account credentials.
"""
def __init__(
self,
credentials,
refresh_status_codes=transport.DEFAULT_REFRESH_STATUS_CODES,
max_refresh_attempts=transport.DEFAULT_MAX_REFRESH_ATTEMPTS,
refresh_timeout=None,
auth_request=None,
default_host=None,
):
super(AuthorizedSession, self).__init__()
self.credentials = credentials
self._refresh_status_codes = refresh_status_codes
self._max_refresh_attempts = max_refresh_attempts
self._refresh_timeout = refresh_timeout
self._is_mtls = False
self._default_host = default_host
if auth_request is None:
self._auth_request_session = requests.Session()
# Using an adapter to make HTTP requests robust to network errors.
# This adapter retrys HTTP requests when network errors occur
# and the requests seems safely retryable.
retry_adapter = requests.adapters.HTTPAdapter(max_retries=3)
self._auth_request_session.mount("https://", retry_adapter)
# Do not pass `self` as the session here, as it can lead to
# infinite recursion.
auth_request = Request(self._auth_request_session)
else:
self._auth_request_session = None
# Request instance used by internal methods (for example,
# credentials.refresh).
self._auth_request = auth_request
# https://google.aip.dev/auth/4111
# Attempt to use self-signed JWTs when a service account is used.
if isinstance(self.credentials, service_account.Credentials):
self.credentials._create_self_signed_jwt(
"https://{}/".format(self._default_host) if self._default_host else None
)
def configure_mtls_channel(self, client_cert_callback=None):
"""Configure the client certificate and key for SSL connection.
The function does nothing unless `GOOGLE_API_USE_CLIENT_CERTIFICATE` is
explicitly set to `true`. In this case if client certificate and key are
successfully obtained (from the given client_cert_callback or from application
default SSL credentials), a :class:`_MutualTlsAdapter` instance will be mounted
to "https://" prefix.
Args:
client_cert_callback (Optional[Callable[[], (bytes, bytes)]]):
The optional callback returns the client certificate and private
key bytes both in PEM format.
If the callback is None, application default SSL credentials
will be used.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS channel
creation failed for any reason.
"""
use_client_cert = os.getenv(
environment_vars.GOOGLE_API_USE_CLIENT_CERTIFICATE, "false"
)
if use_client_cert != "true":
self._is_mtls = False
return
try:
import OpenSSL
except ImportError as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
try:
(
self._is_mtls,
cert,
key,
) = google.auth.transport._mtls_helper.get_client_cert_and_key(
client_cert_callback
)
if self._is_mtls:
mtls_adapter = _MutualTlsAdapter(cert, key)
self.mount("https://", mtls_adapter)
except (
exceptions.ClientCertError,
ImportError,
OpenSSL.crypto.Error,
) as caught_exc:
new_exc = exceptions.MutualTLSChannelError(caught_exc)
six.raise_from(new_exc, caught_exc)
def request(
self,
method,
url,
data=None,
headers=None,
max_allowed_time=None,
timeout=_DEFAULT_TIMEOUT,
**kwargs
):
"""Implementation of Requests' request.
Args:
timeout (Optional[Union[float, Tuple[float, float]]]):
The amount of time in seconds to wait for the server response
with each individual request. Can also be passed as a tuple
``(connect_timeout, read_timeout)``. See :meth:`requests.Session.request`
documentation for details.
max_allowed_time (Optional[float]):
If the method runs longer than this, a ``Timeout`` exception is
automatically raised. Unlike the ``timeout`` parameter, this
value applies to the total method execution time, even if
multiple requests are made under the hood.
Mind that it is not guaranteed that the timeout error is raised
at ``max_allowed_time``. It might take longer, for example, if
an underlying request takes a lot of time, but the request
itself does not timeout, e.g. if a large file is being
transmitted. The timout error will be raised after such
request completes.
"""
# pylint: disable=arguments-differ
# Requests has a ton of arguments to request, but only two
# (method, url) are required. We pass through all of the other
# arguments to super, so no need to exhaustively list them here.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop("_credential_refresh_attempt", 0)
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy() if headers is not None else {}
# Do not apply the timeout unconditionally in order to not override the
# _auth_request's default timeout.
auth_request = (
self._auth_request
if timeout is None
else functools.partial(self._auth_request, timeout=timeout)
)
remaining_time = max_allowed_time
with TimeoutGuard(remaining_time) as guard:
self.credentials.before_request(auth_request, method, url, request_headers)
remaining_time = guard.remaining_timeout
with TimeoutGuard(remaining_time) as guard:
response = super(AuthorizedSession, self).request(
method,
url,
data=data,
headers=request_headers,
timeout=timeout,
**kwargs
)
remaining_time = guard.remaining_timeout
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
if (
response.status_code in self._refresh_status_codes
and _credential_refresh_attempt < self._max_refresh_attempts
):
_LOGGER.info(
"Refreshing credentials due to a %s response. Attempt %s/%s.",
response.status_code,
_credential_refresh_attempt + 1,
self._max_refresh_attempts,
)
# Do not apply the timeout unconditionally in order to not override the
# _auth_request's default timeout.
auth_request = (
self._auth_request
if timeout is None
else functools.partial(self._auth_request, timeout=timeout)
)
with TimeoutGuard(remaining_time) as guard:
self.credentials.refresh(auth_request)
remaining_time = guard.remaining_timeout
# Recurse. Pass in the original headers, not our modified set, but
# do pass the adjusted max allowed time (i.e. the remaining total time).
return self.request(
method,
url,
data=data,
headers=headers,
max_allowed_time=remaining_time,
timeout=timeout,
_credential_refresh_attempt=_credential_refresh_attempt + 1,
**kwargs
)
return response
@property
def is_mtls(self):
"""Indicates if the created SSL channel is mutual TLS."""
return self._is_mtls
def close(self):
if self._auth_request_session is not None:
self._auth_request_session.close()
super(AuthorizedSession, self).close()
| |
# -*- coding: utf-8 -*-
import datetime
import functools
import operator
import re
import pytz
from dateutil import parser
from django.utils import timezone
from nose.tools import * # flake8: noqa
from rest_framework import serializers as ser
from unittest import TestCase
from tests.base import ApiTestCase
from api.base.filters import ListFilterMixin
import api.base.filters as filters
from api.base.exceptions import (
InvalidFilterError,
InvalidFilterOperator,
InvalidFilterComparisonType,
InvalidFilterMatchType,
)
from api.base.serializers import RelationshipField
class FakeSerializer(ser.Serializer):
filterable_fields = ('id', 'string_field', 'second_string_field','list_field', 'date_field', 'int_field', 'bool_field', 'relationship_field')
id = ser.CharField()
string_field = ser.CharField()
second_string_field = ser.CharField()
list_field = ser.ListField()
date_field = ser.DateField()
datetime_field = ser.DateTimeField()
int_field = ser.IntegerField()
float_field = ser.FloatField()
bool_field = ser.BooleanField(source='foobar')
relationship_field = RelationshipField(related_view='fake', related_view_kwargs={})
class FakeRecord(object):
def __init__(
self,
_id=None,
string_field='foo',
second_string_field='bar',
list_field=None,
date_field=timezone.now(),
datetime_field=timezone.now(),
int_field=42,
float_field=41.99999,
foobar=True
):
self._id = _id
self.string_field = string_field
self.second_string_field = second_string_field
self.list_field = list_field or [1, 2, 3]
self.date_field = date_field
self.datetime_field = datetime_field
self.int_field = int_field
self.float_field = float_field
# bool_field in serializer corresponds to foobar in model
self.foobar = foobar
class FakeListView(ListFilterMixin):
serializer_class = FakeSerializer
class TestFilterMixin(ApiTestCase):
def setUp(self):
super(TestFilterMixin, self).setUp()
self.view = FakeListView()
def test_parse_query_params_default_operators(self):
query_params = {
'filter[string_field]': 'foo',
'filter[list_field]': 'bar',
'filter[int_field]': '42',
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields['filter[string_field]'])
assert_equal(fields['filter[string_field]']['string_field']['op'], 'icontains')
assert_in('list_field', fields['filter[list_field]'])
assert_equal(fields['filter[list_field]']['list_field']['op'], 'contains')
assert_in('int_field', fields['filter[int_field]'])
assert_equal(fields['filter[int_field]']['int_field']['op'], 'eq')
assert_in('bool_field', fields['filter[bool_field]'])
assert_equal(fields['filter[bool_field]']['bool_field']['op'], 'eq')
def test_parse_query_params_casts_values(self):
query_params = {
'filter[string_field]': 'foo',
'filter[list_field]': 'bar',
'filter[int_field]': '42',
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields['filter[string_field]'])
assert_equal(fields['filter[string_field]']['string_field']['value'], 'foo')
assert_in('list_field', fields['filter[list_field]'])
assert_equal(fields['filter[list_field]']['list_field']['value'], 'bar')
assert_in('int_field', fields['filter[int_field]'])
assert_equal(fields['filter[int_field]']['int_field']['value'], 42)
assert_in('bool_field', fields.get('filter[bool_field]'))
assert_equal(fields['filter[bool_field]']['bool_field']['value'], False)
def test_parse_query_params_uses_field_source_attribute(self):
query_params = {
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
parsed_field = fields['filter[bool_field]']['bool_field']
assert_equal(parsed_field['source_field_name'], 'foobar')
assert_equal(parsed_field ['value'], False)
assert_equal(parsed_field ['op'], 'eq')
def test_parse_query_params_generalizes_dates(self):
query_params = {
'filter[date_field]': '2014-12-12'
}
fields = self.view.parse_query_params(query_params)
start = parser.parse('2014-12-12').replace(tzinfo=pytz.utc)
stop = start + datetime.timedelta(days=1)
for key, field_name in fields.iteritems():
for match in field_name['date_field']:
if match['op'] == 'gte':
assert_equal(match['value'], start)
elif match['op'] == 'lt':
assert_equal(match['value'], stop)
else:
self.fail()
def test_parse_query_params_comparable_field(self):
query_params = {
'filter[int_field][gt]': 42,
'filter[int_field][lte]': 9000
}
fields = self.view.parse_query_params(query_params)
for key, field_name in fields.iteritems():
if field_name['int_field']['op'] == 'gt':
assert_equal(field_name['int_field']['value'], 42)
elif field_name['int_field']['op'] == 'lte':
assert_equal(field_name['int_field']['value'], 9000)
else:
self.fail()
def test_parse_query_params_matchable_field(self):
query_params = {
'filter[string_field][contains]': 'foo',
'filter[string_field][icontains]': 'bar'
}
fields = self.view.parse_query_params(query_params)
for key, field_name in fields.iteritems():
if field_name['string_field']['op'] == 'contains':
assert_equal(field_name['string_field']['value'], 'foo')
elif field_name['string_field']['op'] == 'icontains':
assert_equal(field_name['string_field']['value'], 'bar')
else:
self.fail()
def test_parse_query_params_raises_InvalidFilterError_bad_field(self):
query_params = {
'filter[fake]': 'foo'
}
with assert_raises(InvalidFilterError):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterComparisonType(self):
query_params = {
'filter[string_field][gt]': 'foo'
}
with assert_raises(InvalidFilterComparisonType):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterMatchType(self):
query_params = {
'filter[date_field][icontains]': '2015'
}
with assert_raises(InvalidFilterMatchType):
self.view.parse_query_params(query_params)
def test_parse_query_params_raises_InvalidFilterOperator(self):
query_params = {
'filter[int_field][bar]': 42
}
with assert_raises(InvalidFilterOperator):
self.view.parse_query_params(query_params)
def test_InvalidFilterOperator_parameterizes_valid_operators(self):
query_params = {
'filter[int_field][bar]': 42
}
try:
self.view.parse_query_params(query_params)
except InvalidFilterOperator as err:
ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops']
assert_equal(ops, "gt, gte, lt, lte, eq, ne")
query_params = {
'filter[string_field][bar]': 'foo'
}
try:
self.view.parse_query_params(query_params)
except InvalidFilterOperator as err:
ops = re.search(r'one of (?P<ops>.+)\.$', err.detail).groupdict()['ops']
assert_equal(ops, "contains, icontains, eq, ne")
def test_parse_query_params_supports_multiple_filters(self):
query_params = {
'filter[string_field]': 'foo',
'filter[string_field]': 'bar',
}
# FIXME: This test may only be checking one field
fields = self.view.parse_query_params(query_params)
assert_in('string_field', fields.get('filter[string_field]'))
for key, field_name in fields.iteritems():
assert_in(field_name['string_field']['value'], ('foo', 'bar'))
def test_convert_value_bool(self):
value = 'true'
field = FakeSerializer._declared_fields['bool_field']
value = self.view.convert_value(value, field)
assert_true(isinstance(value, bool))
assert_true(value)
def test_convert_value_date(self):
value = '2014-12-12'
field = FakeSerializer._declared_fields['date_field']
value = self.view.convert_value(value, field)
assert_true(isinstance(value, datetime.datetime))
assert_equal(value, parser.parse('2014-12-12').replace(tzinfo=pytz.utc))
def test_convert_value_int(self):
value = '9000'
field = FakeSerializer._declared_fields['int_field']
value = self.view.convert_value(value, field)
assert_equal(value, 9000)
def test_convert_value_float(self):
value = '42'
orig_type = type(value)
field = FakeSerializer._declared_fields['float_field']
value = self.view.convert_value(value, field)
assert_equal(value, 42.0)
def test_convert_value_null_for_list(self):
value = 'null'
field = FakeSerializer._declared_fields['list_field']
value = self.view.convert_value(value, field)
assert_equal(value, [])
def test_multiple_filter_params_bad_filter(self):
query_params = {
'filter[string_field, not_a_field]': 'test'
}
with assert_raises(InvalidFilterError):
self.view.parse_query_params(query_params)
def test_bad_filter_operator(self):
query_params = {
'filter[relationship_field][invalid]': 'false',
}
with assert_raises(InvalidFilterOperator):
self.view.parse_query_params(query_params)
class TestListFilterMixin(ApiTestCase):
def setUp(self):
super(TestListFilterMixin, self).setUp()
self.view = FakeListView()
def test_get_filtered_queryset_for_list_field_converts_to_lowercase(self):
field_name = 'list_field'
params = {
'value': 'FOO',
'source_field_name': field_name
}
default_queryset = [
FakeRecord(_id=1, list_field=['fOO', 'Foo', 'Bar', 'baR']),
FakeRecord(_id=2, list_field=['Foo', 'Bax']),
FakeRecord(_id=3, list_field=['Bar', 'baR', 'bat'])
]
filtered = self.view.get_filtered_queryset(field_name, params, default_queryset)
for record in filtered:
assert_not_equal(record._id, 3)
for id in (1, 2):
assert_in(id, [f._id for f in filtered])
def test_get_filtered_queryset_for_list_respects_special_case_of_ids_being_list(self):
field_name = 'bool_field'
params = {
'value': True,
'op': 'eq',
'source_field_name': 'foobar'
}
default_queryset = [
FakeRecord(_id=1, foobar=True),
FakeRecord(_id=2, foobar=True),
FakeRecord(_id=3, foobar=False)
]
filtered = self.view.get_filtered_queryset(field_name, params, default_queryset)
for record in filtered:
assert_not_equal(record._id, 3)
for id in (1, 2):
assert_in(id, [f._id for f in filtered])
def test_get_filtered_queryset_for_list_respects_id_always_being_list(self):
field_name = 'id'
params = {
'value': '2',
'op': 'in',
'source_field_name': '_id'
}
default_queryset = [
FakeRecord(_id='1', foobar=True),
FakeRecord(_id='2', foobar=True),
FakeRecord(_id='3', foobar=False)
]
filtered = self.view.get_filtered_queryset(field_name, params, default_queryset)
for record in filtered:
assert_equal(record._id, '2')
for id in ('1', '3'):
assert_not_in(id, [f._id for f in filtered])
def test_parse_query_params_uses_field_source_attribute(self):
query_params = {
'filter[bool_field]': 'false',
}
fields = self.view.parse_query_params(query_params)
parsed_field = fields['filter[bool_field]']['bool_field']
assert_equal(parsed_field['source_field_name'], 'foobar')
assert_equal(parsed_field ['value'], False)
assert_equal(parsed_field ['op'], 'eq')
class TestODMOrderingFilter(ApiTestCase):
class query:
title = ' '
def __init__(self, title):
self.title = title
def __str__(self):
return self.title
class query_with_num:
title = ' '
number = 0
def __init__(self, title, number):
self.title = title
self.number = number
def __str__(self):
return self.title
def test_filter_queryset_forward(self):
query_to_be_sorted = [self.query(x) for x in 'NewProj Zip Proj Activity'.split()]
sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['title']))
sorted_output = [str(i) for i in sorted_query]
assert_equal(sorted_output, ['Activity', 'NewProj', 'Proj', 'Zip'])
def test_filter_queryset_forward_duplicate(self):
query_to_be_sorted = [self.query(x) for x in 'NewProj Activity Zip Activity'.split()]
sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['title']))
sorted_output = [str(i) for i in sorted_query]
assert_equal(sorted_output, ['Activity', 'Activity', 'NewProj', 'Zip'])
def test_filter_queryset_reverse(self):
query_to_be_sorted = [self.query(x) for x in 'NewProj Zip Proj Activity'.split()]
sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['-title']))
sorted_output = [str(i) for i in sorted_query]
assert_equal(sorted_output, ['Zip', 'Proj', 'NewProj', 'Activity'])
def test_filter_queryset_reverse_duplicate(self):
query_to_be_sorted = [self.query(x) for x in 'NewProj Activity Zip Activity'.split()]
sorted_query = sorted(query_to_be_sorted, cmp=filters.sort_multiple(['-title']))
sorted_output = [str(i) for i in sorted_query]
assert_equal(sorted_output, ['Zip', 'NewProj', 'Activity', 'Activity'])
def test_filter_queryset_handles_multiple_fields(self):
objs = [self.query_with_num(title='NewProj', number=10),
self.query_with_num(title='Zip', number=20),
self.query_with_num(title='Activity', number=30),
self.query_with_num(title='Activity', number=40)]
actual = [x.number for x in sorted(objs, cmp=filters.sort_multiple(['title', '-number']))]
assert_equal(actual, [40, 30, 10, 20])
class TestQueryPatternRegex(TestCase):
def setUp(self):
super(TestQueryPatternRegex, self).setUp()
self.filter_regex = FakeListView.QUERY_PATTERN
self.filter_fields = FakeListView.FILTER_FIELDS
def test_single_field_filter(self):
filter_str = 'filter[name]'
match = self.filter_regex.match(filter_str)
fields = match.groupdict()['fields']
field_names = re.findall(self.filter_fields, fields)
assert_equal(fields, 'name')
assert_equal(field_names[0], 'name')
def test_double_field_filter(self):
filter_str = 'filter[name,id]'
match = self.filter_regex.match(filter_str)
fields = match.groupdict()['fields']
field_names = re.findall(self.filter_fields, fields)
assert_equal(fields, 'name,id')
assert_equal(field_names[0], 'name')
assert_equal(field_names[1], 'id')
def test_multiple_field_filter(self):
filter_str = 'filter[name,id,another,field,here]'
match = self.filter_regex.match(filter_str)
fields = match.groupdict()['fields']
field_names = re.findall(self.filter_fields, fields)
assert_equal(fields, 'name,id,another,field,here')
assert_equals(len(field_names), 5)
def test_single_field_filter_end_comma(self):
filter_str = 'filter[name,]'
match = self.filter_regex.match(filter_str)
assert_false(match)
def test_multiple_field_filter_end_comma(self):
filter_str = 'filter[name,id,]'
match = self.filter_regex.match(filter_str)
assert_false(match)
def test_multiple_field_filter_with_spaces(self):
filter_str = 'filter[name, id]'
match = self.filter_regex.match(filter_str)
fields = match.groupdict()['fields']
field_names = re.findall(self.filter_fields, fields)
assert_equal(fields, 'name, id')
assert_equal(field_names[0], 'name')
assert_equal(field_names[1], 'id')
def test_multiple_field_filter_with_blank_field(self):
filter_str = 'filter[name, , id]'
match = self.filter_regex.match(filter_str)
assert_false(match)
def test_multiple_field_filter_non_match(self):
filter_str = 'filter[name; id]'
match = self.filter_regex.match(filter_str)
assert_false(match)
def test_single_field_filter_non_match(self):
filter_str = 'fitler[name]'
match = self.filter_regex.match(filter_str)
assert_false(match)
def test_single_field_non_alphanumeric_character(self):
filter_str = 'fitler[<name>]'
match = self.filter_regex.match(filter_str)
assert_false(match)
| |
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc # noqa
from tempest.api.share import base
from tempest import config_share as config
from tempest import test
CONF = config.CONF
class ShareIpRulesForNFSNegativeTest(base.BaseSharesTest):
protocol = "nfs"
@classmethod
def resource_setup(cls):
super(ShareIpRulesForNFSNegativeTest, cls).resource_setup()
if not (cls.protocol in CONF.share.enable_protocols and
cls.protocol in CONF.share.enable_ip_rules_for_protocols):
msg = "IP rule tests for %s protocol are disabled" % cls.protocol
raise cls.skipException(msg)
# create share
cls.share = cls.create_share(cls.protocol)
# create snapshot
cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"])
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_1(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.256")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_2(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.1.1.-")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_3(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.4/33")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_4(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.*")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_5(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.*/23")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_6(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.1|23")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_7(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.1/-1")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_target_8(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "ip", "1.2.3.1/")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_with_wrong_level(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"],
'ip',
'2.2.2.2',
'su')
@test.attr(type=["negative", "gate", ])
def test_create_duplicate_of_ip_rule(self):
# test data
access_type = "ip"
access_to = "1.2.3.4"
# create rule
rule = self.shares_client.create_access_rule(
self.share["id"], access_type, access_to)
self.shares_client.wait_for_access_rule_status(
self.share["id"], rule["id"], "active")
# try create duplicate of rule
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], access_type, access_to)
class ShareIpRulesForCIFSNegativeTest(ShareIpRulesForNFSNegativeTest):
protocol = "cifs"
class ShareUserRulesForNFSNegativeTest(base.BaseSharesTest):
protocol = "nfs"
@classmethod
def resource_setup(cls):
super(ShareUserRulesForNFSNegativeTest, cls).resource_setup()
if not (cls.protocol in CONF.share.enable_protocols and
cls.protocol in CONF.share.enable_user_rules_for_protocols):
msg = "USER rule tests for %s protocol are disabled" % cls.protocol
raise cls.skipException(msg)
# create share
cls.share = cls.create_share(cls.protocol)
# create snapshot
cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"])
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_wrong_input_2(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "user",
"try+")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_empty_key(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "user", "")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_too_little_key(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "user", "abc")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_too_big_key(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "user", "a" * 33)
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_wrong_input_1(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "user",
"try+")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_to_snapshot(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_access_rule,
self.snap["id"],
access_type="user",
access_to="fakeuser")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_user_with_wrong_share_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_access_rule,
"wrong_share_id",
access_type="user",
access_to="fakeuser")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_with_wrong_level(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"],
'user',
CONF.share.username_for_user_rules,
'su')
class ShareUserRulesForCIFSNegativeTest(ShareUserRulesForNFSNegativeTest):
protocol = "cifs"
class ShareRulesNegativeTest(base.BaseSharesTest):
# Tests independent from rule type and share protocol
@classmethod
def resource_setup(cls):
super(ShareRulesNegativeTest, cls).resource_setup()
if not (any(p in CONF.share.enable_ip_rules_for_protocols
for p in cls.protocols) or
any(p in CONF.share.enable_user_rules_for_protocols
for p in cls.protocols)):
cls.message = "Rule tests are disabled"
raise cls.skipException(cls.message)
# create share
cls.share = cls.create_share()
# create snapshot
cls.snap = cls.create_snapshot_wait_for_active(cls.share["id"])
@test.attr(type=["negative", "gate", ])
def test_delete_access_rule_with_wrong_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_access_rule,
self.share["id"], "wrong_rule_id")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_type(self):
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_access_rule,
self.share["id"], "wrong_type", "1.2.3.4")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_with_wrong_share_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_access_rule,
"wrong_share_id")
@test.attr(type=["negative", "gate", ])
def test_create_access_rule_ip_to_snapshot(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_access_rule,
self.snap["id"])
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
| |
###########################################################################
# Joshua R. Boverhof, LBNL
# See Copyright for copyright notice!
# $Id: WSresource.py 1439 2008-01-11 00:47:15Z boverhof $
###########################################################################
import sys, warnings
# twisted & related imports
from zope.interface import classProvides, implements, Interface
from twisted.python import log, failure
from twisted.web.error import NoResource
from twisted.web.server import NOT_DONE_YET
import twisted.web.http
import twisted.web.resource
# ZSI imports
from ZSI import _get_element_nsuri_name, EvaluateException, ParseException
from ZSI.parse import ParsedSoap
from ZSI.writer import SoapWriter
from ZSI import fault
# WS-Address related imports
from ZSI.address import Address
from ZSI.ServiceContainer import WSActionException
from interfaces import CheckInputArgs, HandlerChainInterface, CallbackChainInterface,\
DataHandler
class LoggingHandlerChain:
@CheckInputArgs(CallbackChainInterface, HandlerChainInterface)
def __init__(self, cb, *handlers):
self.handlercb = cb
self.handlers = handlers
self.debug = len(log.theLogPublisher.observers) > 0
def processRequest(self, arg, **kw):
debug = self.debug
if debug: log.msg('--->PROCESS REQUEST: %s' %arg, debug=1)
for h in self.handlers:
if debug: log.msg('\t%s handler: %s' %(arg, h), debug=1)
arg = h.processRequest(arg, **kw)
return self.handlercb.processRequest(arg, **kw)
def processResponse(self, arg, **kw):
debug = self.debug
if debug: log.msg('===>PROCESS RESPONSE: %s' %str(arg), debug=1)
if arg is None:
return
for h in self.handlers:
if debug: log.msg('\t%s handler: %s' %(arg, h), debug=1)
arg = h.processResponse(arg, **kw)
s = str(arg)
if debug: log.msg(s, debug=1)
return s
#
# Stability: Unstable
#
class DefaultCallbackHandler:
classProvides(CallbackChainInterface)
@classmethod
def processRequest(cls, ps, **kw):
"""invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
"""
resource = kw['resource']
request = kw['request']
method = getattr(resource, 'soap_%s' %
_get_element_nsuri_name(ps.body_root)[-1])
try:
req_pyobj,rsp_pyobj = method(ps, request=request)
except TypeError, ex:
log.err(
'ERROR: service %s is broken, method MUST return request, response'\
% cls.__name__
)
raise
except Exception, ex:
log.err('failure when calling bound method')
raise
return rsp_pyobj
class WSAddressHandler:
"""General WS-Address handler. This implementation depends on a
'wsAction' dictionary in the service stub which contains keys to
WS-Action values.
Implementation saves state on request response flow, so using this
handle is not reliable if execution is deferred between proceesRequest
and processResponse.
TODO: sink this up with wsdl2dispatch
TODO: reduce coupling with WSAddressCallbackHandler.
"""
implements(HandlerChainInterface)
def processRequest(self, ps, **kw):
# TODO: Clean this up
resource = kw['resource']
d = getattr(resource, 'root', None)
key = _get_element_nsuri_name(ps.body_root)
if d is None or d.has_key(key) is False:
raise RuntimeError,\
'Error looking for key(%s) in root dictionary(%s)' %(key, str(d))
self.op_name = d[key]
self.address = address = Address()
address.parse(ps)
action = address.getAction()
if not action:
raise WSActionException('No WS-Action specified in Request')
request = kw['request']
http_headers = request.getAllHeaders()
soap_action = http_headers.get('soapaction')
if soap_action and soap_action.strip('\'"') != action:
raise WSActionException(\
'SOAP Action("%s") must match WS-Action("%s") if specified.'\
%(soap_action,action)
)
# Save WS-Address in ParsedSoap instance.
ps.address = address
return ps
def processResponse(self, sw, **kw):
if sw is None:
self.address = None
return
request, resource = kw['request'], kw['resource']
if isinstance(request, twisted.web.http.Request) is False:
raise TypeError, '%s instance expected' %http.Request
d = getattr(resource, 'wsAction', None)
key = self.op_name
if d is None or d.has_key(key) is False:
raise WSActionNotSpecified,\
'Error looking for key(%s) in wsAction dictionary(%s)' %(key, str(d))
addressRsp = Address(action=d[key])
if request.transport.TLS == 0:
addressRsp.setResponseFromWSAddress(\
self.address, 'http://%s:%d%s' %(
request.host.host, request.host.port, request.path)
)
else:
addressRsp.setResponseFromWSAddress(\
self.address, 'https://%s:%d%s' %(
request.host.host, request.host.port, request.path)
)
addressRsp.serialize(sw, typed=False)
self.address = None
return sw
class WSAddressCallbackHandler:
classProvides(CallbackChainInterface)
@classmethod
def processRequest(cls, ps, **kw):
"""invokes callback that should return a (request,response) tuple.
representing the SOAP request and response respectively.
ps -- ParsedSoap instance representing HTTP Body.
request -- twisted.web.server.Request
"""
resource = kw['resource']
request = kw['request']
method = getattr(resource, 'wsa_%s' %
_get_element_nsuri_name(ps.body_root)[-1])
# TODO: grab ps.address, clean this up.
try:
req_pyobj,rsp_pyobj = method(ps, ps.address, request=request)
except TypeError, ex:
log.err(
'ERROR: service %s is broken, method MUST return request, response'\
%self.__class__.__name__
)
raise
except Exception, ex:
log.err('failure when calling bound method')
raise
return rsp_pyobj
class DeferHandlerChain:
"""Each handler is
"""
@CheckInputArgs(CallbackChainInterface, HandlerChainInterface)
def __init__(self, cb, *handlers):
self.handlercb = cb
self.handlers = handlers
self.debug = len(log.theLogPublisher.observers) > 0
def processRequest(self, arg, **kw):
from twisted.internet import reactor
from twisted.internet.defer import Deferred
debug = self.debug
if debug: log.msg('--->DEFER PROCESS REQUEST: %s' %arg, debug=1)
d = Deferred()
for h in self.handlers:
if debug:
log.msg('\t%s handler: %s' %(arg, h), debug=1)
log.msg('\thandler callback: %s' %h.processRequest)
d.addCallback(h.processRequest, **kw)
d.addCallback(self.handlercb.processRequest, **kw)
reactor.callLater(.0001, d.callback, arg)
if debug: log.msg('===>DEFER PROCESS RESPONSE: %s' %str(arg), debug=1)
for h in self.handlers:
if debug: log.msg('\t%s handler: %s' %(arg, h), debug=1)
d.addCallback(h.processResponse, **kw)
d.addCallback(str)
return d
def processResponse(self, arg, **kw):
return arg
class DefaultHandlerChainFactory:
protocol = LoggingHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(DefaultCallbackHandler, DataHandler)
class DefaultHandlerChain:
@CheckInputArgs(CallbackChainInterface, HandlerChainInterface)
def __init__(self, cb, *handlers):
self.handlercb = cb
self.handlers = handlers
self.debug = len(log.theLogPublisher.observers) > 0
def processRequest(self, arg, **kw):
debug = self.debug
if debug: log.msg('--->PROCESS REQUEST: %s' %arg, debug=1)
for h in self.handlers:
if debug: log.msg('\t%s handler: %s' %(arg, h), debug=1)
arg = h.processRequest(arg, **kw)
return self.handlercb.processRequest(arg, **kw)
def processResponse(self, arg, **kw):
debug = self.debug
if debug: log.msg('===>PROCESS RESPONSE: %s' %str(arg), debug=1)
if arg is None:
return
for h in self.handlers:
if debug: log.msg('\t%s handler: %s' %(arg, h), debug=1)
arg = h.processResponse(arg, **kw)
s = str(arg)
if debug: log.msg(s, debug=1)
return s
class WSAddressHandlerChainFactory:
protocol = DefaultHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(WSAddressCallbackHandler, DataHandler,
WSAddressHandler())
class WSResource(twisted.web.resource.Resource, object):
"""
class variables:
encoding --
factory -- hander chain, which has a factory method "newInstance"
that returns a
"""
encoding = "UTF-8"
factory = DefaultHandlerChainFactory
def __init__(self):
"""
"""
twisted.web.resource.Resource.__init__(self)
def _writeResponse(self, response, request, status=200):
"""
request -- request message
response --- response message
status -- HTTP Status
"""
request.setResponseCode(status)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-Type", mimeType)
request.setHeader("Content-Length", str(len(response)))
request.write(response)
request.finish()
def _writeFault(self, fail, request):
"""
fail -- failure
request -- request message
ex -- Exception
"""
response = fault.FaultFromException(fail.value, False, fail.tb).AsSOAP()
self._writeResponse(response, request, status=500)
def render_POST(self, request):
"""Dispatch Method called by twisted render, creates a
request/response handler chain.
request -- twisted.web.server.Request
"""
from twisted.internet.defer import maybeDeferred
chain = self.factory.newInstance()
data = request.content.read()
d = maybeDeferred(chain.processRequest, data, request=request, resource=self)
d.addCallback(chain.processResponse, request=request, resource=self)
d.addCallback(self._writeResponse, request)
d.addErrback(self._writeFault, request)
return NOT_DONE_YET
| |
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
i = p.rfind('.')
if i<=p.rfind(':'):
return p, ''
else:
return p[:i], p[i:]
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st.st_mode)
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return False
return S_ISREG(st.st_mode)
def getctime(filename):
"""Return the creation time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
def exists(s):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
st = os.stat(s)
except os.error:
return False
return True
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
n = min(len(s1), len(s2))
for i in xrange(n):
if s1[i] != s2[i]:
return s1[:i]
return s1[:n]
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
components = path.split(':')
path = components[0] + ':'
for c in components[1:]:
path = join(path, c)
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
return path
supports_unicode_filenames = False
| |
"""
Data loading script for pascal voc2007
=====================================
Load VOC dataset to pytorch torch.utils.data.Dataset class for further
training and processing.
*Author*: Yu Zhang, Northwestern Polytechnical University
"""
import os
import shutil
import pandas as pd
import torch
import numpy as np
from bs4 import BeautifulSoup
from torch.utils.data import Dataset, DataLoader
from skimage import io, transform
import random
import torchvision.transforms as transforms
def select_validation(val_size):
"""Select val_size images as validation set from VOC2007 val.txt file"""
root_dir = '/home/zhangyu/data/VOC2007/'
train_annos = os.path.join(root_dir, 'train_annos')
val_annos = os.path.join(root_dir, 'val_annos')
os.mkdir(val_annos)
xmls_dir = os.path.join(root_dir, 'Annotations')
txtfile = '/home/zhangyu/data/VOC2007/ImageSets/Main/val.txt'
df = pd.read_csv(txtfile, dtype=str, names=['filename'])
mask = random.sample(range(df.shape[0]), min(val_size, df.shape[0]))
val_files = df['filename'].values[mask]
shutil.copytree(xmls_dir, train_annos)
for filename in val_files:
xmlname = filename + '.xml'
shutil.move(os.path.join(train_annos, xmlname),
os.path.join(val_annos, xmlname))
def training_set_feature(set_dir):
"""Computer image mean and std for training set"""
def fold_files(foldname):
"""All files in the fold should have the same extern"""
allfiles = os.listdir(foldname)
if len(allfiles) < 1:
return None
else:
ext = allfiles[0].split('.')[-1]
filelist = [fname.replace(''.join(['.', ext]), '') for fname in allfiles]
return ext, filelist
def load_annotation(xmlFile):
"""
Read annotations from for a image from xml file and return a dictionary of
the objects and their locations
"""
with open(xmlFile) as f:
xml = f.readlines()
xml = ''.join([line.strip('\t') for line in xml])
anno = BeautifulSoup(xml, "html5lib")
anno_dic = {}
fname = anno.findChild('filename').contents[0]
anno_dic['filename'] = fname
objs = anno.findAll('object')
# print('Number of objects:', len(objs))
objects = []
for obj in objs:
obj_name = obj.findChild('name').contents[0]
bbox = obj.findChildren('bndbox')[0]
xmin = int(bbox.findChildren('xmin')[0].contents[0])
ymin = int(bbox.findChildren('ymin')[0].contents[0])
xmax = int(bbox.findChildren('xmax')[0].contents[0])
ymax = int(bbox.findChildren('ymax')[0].contents[0])
obj_dic = {'object_name': obj_name,
'location': np.array([xmin, ymin, xmax, ymax])}
objects.append(obj_dic)
anno_dic['annotation'] = objects
return anno_dic
def get_cls_cord_from_anno(anno_dic, cls_name):
"""can have multiple cords for one class in one image"""
objs = anno_dic['annotation']
cords = None
for obj in objs:
if obj['object_name'] == cls_name:
cords = obj['location'] if cords is None \
else np.vstack((cords, obj['location']))
return cords
def load_class(xmlFile):
"""
Read only class information from xml file, no bbox GT
"""
all_cls = np.asarray(['aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor'])
with open(xmlFile) as f:
xml = f.readlines()
xml = ''.join([line.strip('\t') for line in xml])
anno = BeautifulSoup(xml, "html5lib")
objs = anno.findAll('object')
# print('Number of objects:', len(objs))
classes = np.zeros(all_cls.size)
for obj in objs:
obj_name = obj.findChild('name').contents[0]
classes[np.where(all_cls == obj_name)[0][0]] = 1
return classes
class VOCDataset(Dataset):
"""PASCAL VOC2007 dataset"""
def __init__(self, xmlsPath, imgDir, transform=None):
"""
Args
xmlsPath: Path to xml files with image annotations, one xml file per image
imgDir: Directory with all the images
transform:
"""
_, self.imgList = fold_files(xmlsPath)
self.xmlsPath = xmlsPath
self.imgDir = imgDir
self.transform = transform
def __len__(self):
return len(self.imgList)
def __getitem__(self, idx):
imgName = self.imgList[idx] + '.jpg'
img = io.imread(os.path.join(self.imgDir, imgName))
im_sz = np.array(list(img.shape[:2]))
cls = load_class(os.path.join(
self.xmlsPath, ''.join([self.imgList[idx], '.xml'])))
sample = {'filename':imgName, 'sz': im_sz, 'image': img, 'class': cls}
if self.transform:
sample = self.transform(sample)
return sample
class Rescale(object):
"""Rescale the image in a sample to a given size, sample without bbox GT
Args:
output_size (int or tuple): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
# sample = deepcopy(sample)
image, cls, filename, sz = sample['image'], sample['class'], \
sample['filename'], sample['sz']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(image, (new_h, new_w), mode='constant')
return {'filename': filename, 'image': image, 'class': cls, 'sz': sz}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors, no bbox"""
def __call__(self, sample):
image, cls, filename, sz = sample['image'], sample['class'], \
sample['filename'], sample['sz']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'filename': filename, 'sz': sz,
'image': torch.from_numpy(image),
'class': torch.from_numpy(cls)}
class Normalize(object):
"""Normalize images"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, sample):
image, cls, filename, sz = sample['image'], sample['class'], \
sample['filename'], sample['sz']
normal = transforms.Normalize(self.mean, self.std)
image = normal(image)
return {'filename': filename, 'image': image, 'class': cls, 'sz': sz}
class Augmentation(object):
"""image augmentation with flip and crop"""
def __call__(self, sample):
image, cls, filename, sz = sample['image'], sample['class'], \
sample['filename'], sample['sz']
# randomly choose whether do augmentation
if random.random() < 0.7:
h, w = image.shape[:2]
# convert to PIL.Image for crop and flip
topil = transforms.ToPILImage()
image = topil(image)
crop_size = random.randint(20, 30)
pad_size = random.randint(0, 4)
crop = transforms.RandomCrop(size=[h-crop_size, w-crop_size],
padding=pad_size)
image = crop(image)
flip = transforms.RandomHorizontalFlip()
image = flip(image)
image = np.array(image.convert('RGB'))
return {'filename': filename, 'image': image, 'class': cls, 'sz': sz}
class RescaleBox(object):
"""Rescale the image in a sample to a given size, sample with bbox GT
Args:
output_size (int or tuple): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
# sample = deepcopy(sample)
image, annos = sample['image'], sample['info']
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
image = transform.resize(image, (new_h, new_w), mode='constant')
objs = annos['annotation']
for index, obj in enumerate(objs):
obj_loc = obj['location']
obj_loc[0] = max(int(obj_loc[0] * (new_w / w)), 1)
obj_loc[2] = min(int(obj_loc[2] * (new_w / w)), new_w)
obj_loc[1] = max(int(obj_loc[1] * (new_h / h)), 1)
obj_loc[3] = min(int(obj_loc[3] * (new_h / h)), new_h)
annos['annotation'][index]['location'] = obj_loc
return {'image': image, 'info': annos}
class ToTensorBox(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, annos = sample['image'], sample['info']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
objs = annos['annotation']
for index, obj in enumerate(objs):
annos['annotation'][index]['location'] = torch.from_numpy(obj['location'])
return {'image': torch.from_numpy(image), 'info': annos}
| |
"""Control tasks execution order"""
import fnmatch
from collections import deque
from collections import OrderedDict
import re
from .exceptions import InvalidTask, InvalidCommand, InvalidDodoFile
from .cmdparse import TaskParse, CmdOption
from .task import Task, DelayedLoaded
from .loader import generate_tasks
class RegexGroup(object):
'''Helper to keep track of all delayed-tasks which regexp target
matches the target specified from command line.
'''
def __init__(self, target, tasks):
# target name specified in command line
self.target = target
# set of delayed-tasks names (string)
self.tasks = tasks
# keep track if the target was already found
self.found = False
class TaskControl(object):
"""Manages tasks inter-relationship
There are 3 phases
1) the constructor gets a list of tasks and do initialization
2) 'process' the command line options for tasks are processed
3) 'task_dispatcher' dispatch tasks to runner
Process dependencies and targets to find out the order tasks
should be executed. Also apply filter to exclude tasks from
execution. And parse task cmd line options.
@ivar tasks: (dict) Key: task name ([taskgen.]name)
Value: L{Task} instance
@ivar targets: (dict) Key: fileName
Value: task_name
"""
def __init__(self, task_list, auto_delayed_regex=False):
self.tasks = OrderedDict()
self.targets = {}
self.auto_delayed_regex = auto_delayed_regex
# name of task in order to be executed
# this the order as in the dodo file. the real execution
# order might be different if the dependecies require so.
self._def_order = []
# list of tasks selected to be executed
self.selected_tasks = None
# sanity check and create tasks dict
for task in task_list:
# task must be a Task
if not isinstance(task, Task):
msg = "Task must an instance of Task class. %s"
raise InvalidTask(msg % (task.__class__))
# task name must be unique
if task.name in self.tasks:
msg = "Task names must be unique. %s"
raise InvalidDodoFile(msg % task.name)
self.tasks[task.name] = task
self._def_order.append(task.name)
# expand wild-card task-dependencies
for task in self.tasks.values():
for pattern in task.wild_dep:
task.task_dep.extend(self._get_wild_tasks(pattern))
self._check_dep_names()
self.set_implicit_deps(self.targets, task_list)
def _check_dep_names(self):
"""check if user input task_dep or setup_task that doesnt exist"""
# check task-dependencies exist.
for task in self.tasks.values():
for dep in task.task_dep:
if dep not in self.tasks:
msg = "%s. Task dependency '%s' does not exist."
raise InvalidTask(msg% (task.name, dep))
for setup_task in task.setup_tasks:
if setup_task not in self.tasks:
msg = "Task '%s': invalid setup task '%s'."
raise InvalidTask(msg % (task.name, setup_task))
@staticmethod
def set_implicit_deps(targets, task_list):
"""set/add task_dep based on file_dep on a target from another task
@param targets: (dict) fileName -> task_name
@param task_list: (list - Task) task with newly added file_dep
"""
# 1) create a dictionary associating every target->task. where the task
# builds that target.
for task in task_list:
for target in task.targets:
if target in targets:
msg = ("Two different tasks can't have a common target." +
"'%s' is a target for %s and %s.")
raise InvalidTask(msg % (target, task.name,
targets[target]))
targets[target] = task.name
# 2) now go through all dependencies and check if they are target from
# another task.
# FIXME - when used with delayed tasks needs to check if
# any new target matches any old file_dep.
for task in task_list:
TaskControl.add_implicit_task_dep(targets, task, task.file_dep)
@staticmethod
def add_implicit_task_dep(targets, task, deps_list):
"""add implicit task_dep for `task` for newly added `file_dep`
@param targets: (dict) fileName -> task_name
@param task: (Task) task with newly added file_dep
@param dep_list: (list - str): list of file_dep for task
"""
for dep in deps_list:
if (dep in targets and targets[dep] not in task.task_dep):
task.task_dep.append(targets[dep])
def _get_wild_tasks(self, pattern):
"""get list of tasks that match pattern"""
wild_list = []
for t_name in self._def_order:
if fnmatch.fnmatch(t_name, pattern):
wild_list.append(t_name)
return wild_list
def _process_filter(self, task_selection):
"""process cmd line task options
[task_name [-task_opt [opt_value]] ...] ...
@param task_selection: list of strings with task names/params or target
@return list of task names. Expanding glob and removed params
"""
filter_list = []
def add_filtered_task(seq, f_name):
"""add task to list `filter_list` and set task.options from params
@return list - str: of elements not yet
"""
filter_list.append(f_name)
# only tasks specified by name can contain parameters
if f_name in self.tasks:
# parse task_selection
the_task = self.tasks[f_name]
# remaining items are other tasks not positional options
taskcmd = TaskParse([CmdOption(opt) for opt in the_task.params])
the_task.options, seq = taskcmd.parse(seq)
# if task takes positional parameters set all as pos_arg_val
if the_task.pos_arg is not None:
the_task.pos_arg_val = seq
seq = []
return seq
# process...
seq = task_selection[:]
# process cmd_opts until nothing left
while seq:
f_name = seq.pop(0) # always start with a task/target name
# select tasks by task-name pattern
if '*' in f_name:
for task_name in self._get_wild_tasks(f_name):
add_filtered_task((), task_name)
else:
seq = add_filtered_task(seq, f_name)
return filter_list
def _filter_tasks(self, task_selection):
"""Select tasks specified by filter.
@param task_selection: list of strings with task names/params or target
@return (list) of string. where elements are task name.
"""
selected_task = []
filter_list = self._process_filter(task_selection)
for filter_ in filter_list:
# by task name
if filter_ in self.tasks:
selected_task.append(filter_)
continue
# by target
if filter_ in self.targets:
selected_task.append(self.targets[filter_])
continue
# if can not find name check if it is a sub-task of a delayed
basename = filter_.split(':', 1)[0]
if basename in self.tasks:
loader = self.tasks[basename].loader
if not loader:
raise InvalidCommand(not_found=filter_)
loader.basename = basename
self.tasks[filter_] = Task(filter_, None, loader=loader)
selected_task.append(filter_)
continue
# check if target matches any regex
delayed_matched = [] # list of Task
for task in list(self.tasks.values()):
if not task.loader:
continue
if task.name.startswith('_regex_target'):
continue
if task.loader.target_regex:
if re.match(task.loader.target_regex, filter_):
delayed_matched.append(task)
elif self.auto_delayed_regex:
delayed_matched.append(task)
delayed_matched_names = [t.name for t in delayed_matched]
regex_group = RegexGroup(filter_, set(delayed_matched_names))
# create extra tasks to load delayed tasks matched by regex
for task in delayed_matched:
loader = task.loader
loader.basename = task.name
name = '{}_{}:{}'.format('_regex_target', filter_, task.name)
loader.regex_groups[name] = regex_group
self.tasks[name] = Task(name, None,
loader=loader,
file_dep=[filter_])
selected_task.append(name)
if not delayed_matched:
# not found
raise InvalidCommand(not_found=filter_)
return selected_task
def process(self, task_selection):
"""
@param task_selection: list of strings with task names/params
@return (list - string) each element is the name of a task
"""
# execute only tasks in the filter in the order specified by filter
if task_selection is not None:
self.selected_tasks = self._filter_tasks(task_selection)
else:
# if no filter is defined execute all tasks
# in the order they were defined.
self.selected_tasks = self._def_order
def task_dispatcher(self):
"""return a TaskDispatcher generator
"""
assert self.selected_tasks is not None, \
"must call 'process' before this"
return TaskDispatcher(self.tasks, self.targets, self.selected_tasks)
class ExecNode(object):
"""Each task will have an instace of this
This used to keep track of waiting events and the generator for dep nodes
@ivar run_status (str): contains the result of Dependency.get_status().status
modified by runner, value can be:
- None: not processed yet
- run: task is selected to be executed (it might be running or
waiting for setup)
- ignore: task wont be executed (user forced deselect)
- up-to-date: task wont be executed (no need)
- done: task finished its execution
"""
def __init__(self, task, parent):
self.task = task
# list of dependencies not processed by _add_task yet
self.task_dep = task.task_dep[:]
self.calc_dep = task.calc_dep.copy()
# ancestors are used to detect cyclic references.
# it does not contain a list of tasks that depends on this node
# for that check the attribute waiting_me
self.ancestors = []
if parent:
self.ancestors.extend(parent.ancestors)
self.ancestors.append(task.name)
# Wait for a task to be selected to its execution
# checking if it is up-to-date
self.wait_select = False
# Wait for a task to finish its execution
self.wait_run = set() # task names
self.wait_run_calc = set() # task names
self.waiting_me = set() # ExecNode
self.run_status = None
# all ancestors that failed
self.bad_deps = []
self.ignored_deps = []
# generator from TaskDispatcher._add_task
self.generator = None
def reset_task(self, task, generator):
"""reset task & generator after task is created by its own `loader`"""
self.task = task
self.task_dep = task.task_dep[:]
self.calc_dep = task.calc_dep.copy()
self.generator = generator
def parent_status(self, parent_node):
if parent_node.run_status == 'failure':
self.bad_deps.append(parent_node)
elif parent_node.run_status == 'ignore':
self.ignored_deps.append(parent_node)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.task.name)
def step(self):
"""get node's next step"""
try:
return next(self.generator)
except StopIteration:
return None
def no_none(decorated):
"""decorator for a generator to discard/filter-out None values"""
def _func(*args, **kwargs):
"""wrap generator"""
for value in decorated(*args, **kwargs):
if value is not None:
yield value
return _func
class TaskDispatcher(object):
"""Dispatch another task to be selected/executed, mostly handle with MP
Note that a dispatched task might not be ready to be executed.
"""
def __init__(self, tasks, targets, selected_tasks):
self.tasks = tasks
self.targets = targets
self.nodes = {} # key task-name, value: ExecNode
# queues
self.waiting = set() # of ExecNode
self.ready = deque() # of ExecNode
self.generator = self._dispatcher_generator(selected_tasks)
def _gen_node(self, parent, task_name):
"""return ExecNode for task_name if not created yet"""
node = self.nodes.get(task_name, None)
# first time, create node
if node is None:
node = ExecNode(self.tasks[task_name], parent)
node.generator = self._add_task(node)
self.nodes[task_name] = node
return node
# detect cyclic/recursive dependencies
if parent and task_name in parent.ancestors:
msg = "Cyclic/recursive dependencies for task %s: [%s]"
cycle = " -> ".join(parent.ancestors + [task_name])
raise InvalidDodoFile(msg % (task_name, cycle))
def _node_add_wait_run(self, node, task_list, calc=False):
"""updates node.wait_run
@param node (ExecNode)
@param task_list (list - str) tasks that node should wait for
@param calc (bool) task_list is for calc_dep
"""
# wait_for: contains tasks that `node` needs to wait for and
# were not executed yet.
wait_for = set()
for name in task_list:
dep_node = self.nodes[name]
if (not dep_node) or dep_node.run_status in (None, 'run'):
wait_for.add(name)
else:
# if dep task was already executed:
# a) set parent status
node.parent_status(dep_node)
# b) update dependencies from calc_dep results
if calc:
self._process_calc_dep_results(dep_node, node)
# update ExecNode setting parent/dependent relationship
for name in wait_for:
self.nodes[name].waiting_me.add(node)
if calc:
node.wait_run_calc.update(wait_for)
else:
node.wait_run.update(wait_for)
@no_none
def _add_task(self, node):
"""@return a generator that produces:
- ExecNode for task dependencies
- 'wait' to wait for an event (i.e. a dep task run)
- Task when ready to be dispatched to runner (run or be selected)
- None values are of no interest and are filtered out
by the decorator no_none
note that after a 'wait' is sent it is the reponsability of the
caller to ensure the current ExecNode cleared all its waiting
before calling `next()` again on this generator
"""
this_task = node.task
# skip this task if task belongs to a regex_group that already
# executed the task used to build the given target
if this_task.loader:
regex_group = this_task.loader.regex_groups.get(this_task.name, None)
if regex_group and regex_group.found:
return
# add calc_dep & task_dep until all processed
# calc_dep may add more deps so need to loop until nothing left
while True:
calc_dep_list = list(node.calc_dep)
node.calc_dep.clear()
task_dep_list = node.task_dep[:]
node.task_dep = []
for calc_dep in calc_dep_list:
yield self._gen_node(node, calc_dep)
self._node_add_wait_run(node, calc_dep_list, calc=True)
# add task_dep
for task_dep in task_dep_list:
yield self._gen_node(node, task_dep)
self._node_add_wait_run(node, task_dep_list)
# do not wait until all possible task_dep are created
if (node.calc_dep or node.task_dep):
continue # pragma: no cover # coverage cant catch this #198
elif (node.wait_run or node.wait_run_calc):
yield 'wait'
else:
break
# generate tasks from a DelayedLoader
if this_task.loader:
ref = this_task.loader.creator
to_load = this_task.loader.basename or this_task.name
this_loader = self.tasks[to_load].loader
if this_loader and not this_loader.created:
new_tasks = generate_tasks(to_load, ref(), ref.__doc__)
TaskControl.set_implicit_deps(self.targets, new_tasks)
for nt in new_tasks:
if not nt.loader:
nt.loader = DelayedLoaded
self.tasks[nt.name] = nt
# check itself for implicit dep (used by regex_target)
TaskControl.add_implicit_task_dep(
self.targets, this_task, this_task.file_dep)
# remove file_dep since generated tasks are not required
# to really create the target (support multiple matches)
if regex_group:
this_task.file_dep = {}
if regex_group.target in self.targets:
regex_group.found = True
else:
regex_group.tasks.remove(this_task.loader.basename)
if len(regex_group.tasks) == 0:
# In case no task is left, we cannot find a task
# generating this target. Print an error message!
raise InvalidCommand(not_found=regex_group.target)
# mark this loader to not be executed again
this_task.loader.created = True
this_task.loader = DelayedLoaded
# this task was placeholder to execute the loader
# now it needs to be re-processed with the real task
yield "reset generator"
assert False, "This generator can not be used again"
# add itself
yield this_task
# tasks that contain setup-tasks need to be yielded twice
if this_task.setup_tasks:
# run_status None means task is waiting for other tasks
# in order to check if up-to-date. so it needs to wait
# before scheduling its setup-tasks.
if node.run_status is None:
node.wait_select = True
yield "wait"
# if this task should run, so schedule setup-tasks before itself
if node.run_status == 'run':
for setup_task in this_task.setup_tasks:
yield self._gen_node(node, setup_task)
self._node_add_wait_run(node, this_task.setup_tasks)
if node.wait_run:
yield 'wait'
# re-send this task after setup_tasks are sent
yield this_task
def _get_next_node(self, ready, tasks_to_run):
"""get ExecNode from (in order):
.1 ready
.2 tasks_to_run (list in reverse order)
"""
if ready:
return ready.popleft()
# get task group from tasks_to_run
while tasks_to_run:
task_name = tasks_to_run.pop()
node = self._gen_node(None, task_name)
if node:
return node
def _update_waiting(self, processed):
"""updates 'ready' and 'waiting' queues after processed
@param processed (ExecNode) or None
"""
# no task processed, just ignore
if processed is None:
return
node = processed
# if node was waiting select must only receive select event
if node.wait_select:
self.ready.append(node)
self.waiting.remove(node)
node.wait_select = False
# status == run means this was not just select completed
if node.run_status == 'run':
return
for waiting_node in node.waiting_me:
waiting_node.parent_status(node)
# is_ready indicates if node.generator can be invoked again
task_name = node.task.name
# node wait_run will be ready if there are nothing left to wait
if task_name in waiting_node.wait_run:
waiting_node.wait_run.remove(task_name)
is_ready = not (waiting_node.wait_run or
waiting_node.wait_run_calc)
# node wait_run_calc
else:
assert task_name in waiting_node.wait_run_calc
waiting_node.wait_run_calc.remove(task_name)
# calc_dep might add new deps that can be run without
# waiting for the completion of the remaining deps
is_ready = True
self._process_calc_dep_results(node, waiting_node)
# this node can be further processed
if is_ready and (waiting_node in self.waiting):
self.ready.append(waiting_node)
self.waiting.remove(waiting_node)
def _process_calc_dep_results(self, node, waiting_node):
# refresh this task dependencies with values got from calc_dep
values = node.task.values
len_task_deps = len(waiting_node.task.task_dep)
old_calc_dep = waiting_node.task.calc_dep.copy()
waiting_node.task.update_deps(values)
TaskControl.add_implicit_task_dep(
self.targets, waiting_node.task,
values.get('file_dep', []))
# update node's list of non-processed dependencies
new_task_dep = waiting_node.task.task_dep[len_task_deps:]
waiting_node.task_dep.extend(new_task_dep)
new_calc_dep = waiting_node.task.calc_dep - old_calc_dep
waiting_node.calc_dep.update(new_calc_dep)
def _dispatcher_generator(self, selected_tasks):
"""return generator dispatching tasks"""
# each selected task will create a tree (from dependencies) of
# tasks to be processed
tasks_to_run = list(reversed(selected_tasks))
node = None # current active ExecNode
while True:
# get current node
if not node:
node = self._get_next_node(self.ready, tasks_to_run)
if not node:
if self.waiting:
# all tasks are waiting, hold on
processed = (yield "hold on")
self._update_waiting(processed)
continue
# we are done!
return
# get next step from current node
next_step = node.step()
# got None, nothing left for this generator
if next_step is None:
node = None
continue
# got a task, send ExecNode to runner
if isinstance(next_step, Task):
processed = (yield self.nodes[next_step.name])
self._update_waiting(processed)
# got new ExecNode, add to ready_queue
elif isinstance(next_step, ExecNode):
self.ready.append(next_step)
# node just performed a delayed creation of tasks, restart
elif next_step == "reset generator":
node.reset_task(self.tasks[node.task.name],
self._add_task(node))
# got 'wait', add ExecNode to waiting queue
else:
assert next_step == "wait"
self.waiting.add(node)
node = None
| |
import json
import os
import pydoc
from eg import color
from eg import substitute
# The file name suffix expected for example files.
EXAMPLE_FILE_SUFFIX = '.md'
# Version of eg itself.
# Also bump in setup.py.
VERSION = '0.1.4'
# Flags for showing where the examples for commands are coming from.
FLAG_ONLY_CUSTOM = '+'
FLAG_CUSTOM_AND_DEFAULT = '*'
# This flag indicates that we should use the fallback pager.
FLAG_FALLBACK = 'pydoc.pager'
# The name of the file storing mappings of aliases to programs with entries.
ALIAS_FILE_NAME = 'aliases.json'
def handle_program(program, config):
default_file_path = None
custom_file_path = None
# try to resolve any aliases
resolved_program = get_resolved_program(program, config)
if has_default_entry_for_program(resolved_program, config):
default_file_path = get_file_path_for_program(
resolved_program,
config.examples_dir
)
if has_custom_entry_for_program(resolved_program, config):
custom_file_path = get_file_path_for_program(
resolved_program,
config.custom_dir
)
# Handle the case where we have nothing for them.
if default_file_path is None and custom_file_path is None:
print (
'No entry found for ' +
program +
'. Run `eg --list` to see all available entries.'
)
return
raw_contents = get_contents_from_files(
default_file_path,
custom_file_path
)
formatted_contents = get_formatted_contents(
raw_contents,
use_color=config.use_color,
color_config=config.color_config,
squeeze=config.squeeze,
subs=config.subs
)
page_string(formatted_contents, config.pager_cmd)
def get_file_path_for_program(program, dir_to_search):
"""
Return the file name and path for the program.
examples_dir cannot be None
Path is not guaranteed to exist. Just says where it should be if it
existed. Paths must be fully expanded before being passed in (i.e. no ~ or
variables).
"""
if dir_to_search is None:
raise TypeError('examples_dir cannot be None')
else:
result = os.path.join(dir_to_search, program + EXAMPLE_FILE_SUFFIX)
return result
def has_default_entry_for_program(program, config):
"""Return True if has standard examples for program, else False."""
if config.examples_dir:
file_path = get_file_path_for_program(
program,
config.examples_dir
)
return os.path.isfile(file_path)
else:
return False
def has_custom_entry_for_program(program, config):
"""Return True if has custom examples for a program, else false."""
if config.custom_dir:
custom_path = get_file_path_for_program(
program,
config.custom_dir
)
return os.path.isfile(custom_path)
else:
return False
def get_contents_from_files(default_file_path, custom_file_path):
"""
Take the paths to two files and return the contents as a string. If
custom_file_path is valid, it will be shown before the contents of the
default file.
"""
file_data = ''
if custom_file_path:
file_data += _get_contents_of_file(custom_file_path)
if default_file_path:
file_data += _get_contents_of_file(default_file_path)
return file_data
def page_string(str_to_page, pager_cmd):
"""
Page str_to_page via the pager.
"""
# By default, we expect the command to be `less -R`. If that is the
# pager_cmd, but they don't have less on their machine, odds are they're
# just using the default value. In this case the pager will fail, so we'll
# just go via pydoc.pager, which tries to do smarter checking that we don't
# want to bother trying to replicate.
use_fallback_page_function = False
if pager_cmd is None:
use_fallback_page_function = True
elif pager_cmd == FLAG_FALLBACK:
use_fallback_page_function = True
try:
if use_fallback_page_function:
pydoc.pager(str_to_page)
else:
# Otherwise, obey the user.
pydoc.pipepager(str_to_page, cmd=pager_cmd)
except KeyboardInterrupt:
pass
def _get_contents_of_file(path):
"""Get the contents of the file at path. The file must exist."""
with open(path, 'r') as f:
result = f.read()
return result
def _is_example_file(file_name):
"""
True if the file_name is an example file, else False.
"""
return file_name.endswith(EXAMPLE_FILE_SUFFIX)
def get_list_of_all_supported_commands(config):
"""
Generate a list of all the commands that have examples known to eg. The
format of the list is the command names. The fact that there are examples
for 'cp', for example, would mean that 'cp' was in the list.
The format of the list contains additional information to say if there are
only default examples, only custom examples, or both:
cp (only default)
cp * (only custom)
cp + (default and custom)
Aliases are shown as
alias -> resolved, with resolved having its '*' or '+' as expected. Aliases
that shadow custom-only file names are expected to be shown instead of the
custom file names. This is intentional, as that is the behavior for file
resolution--an alias will hide a custom file.
"""
default_files = []
custom_files = []
if config.examples_dir and os.path.isdir(config.examples_dir):
default_files = os.listdir(config.examples_dir)
if config.custom_dir and os.path.isdir(config.custom_dir):
custom_files = os.listdir(config.custom_dir)
# Now filter so we only have example files, not things like aliases.json.
default_files = [path for path in default_files if _is_example_file(path)]
custom_files = [path for path in custom_files if _is_example_file(path)]
def get_without_suffix(file_name):
"""
Return the file name without the suffix, or the file name itself
if it does not have the suffix.
"""
return file_name.split(EXAMPLE_FILE_SUFFIX)[0]
default_files = [get_without_suffix(f) for f in default_files]
custom_files = [get_without_suffix(f) for f in custom_files]
set_default_commands = set(default_files)
set_custom_commands = set(custom_files)
alias_dict = get_alias_dict(config)
both_defined = set_default_commands & set_custom_commands
only_default = set_default_commands - set_custom_commands
only_custom = set_custom_commands - set_default_commands
all_commands = both_defined | only_default | only_custom
command_to_rep = {}
for command in all_commands:
rep = None
if command in both_defined:
rep = command + ' ' + FLAG_CUSTOM_AND_DEFAULT
elif command in only_default:
rep = command
elif command in only_custom:
rep = command + ' ' + FLAG_ONLY_CUSTOM
else:
raise NameError('command not in known set: ' + str(command))
command_to_rep[command] = rep
result = []
all_commands_and_aliases = all_commands.union(alias_dict.keys())
for command in all_commands_and_aliases:
if command in alias_dict:
# aliases get precedence
target = alias_dict[command]
rep_of_target = command_to_rep[target]
result.append(command + ' -> ' + rep_of_target)
else:
rep = command_to_rep[command]
result.append(rep)
result.sort()
return result
def get_squeezed_contents(contents):
"""
Squeeze the contents by removing blank lines between definition and example
and remove duplicate blank lines except between sections.
"""
line_between_example_code = substitute.Substitution(
'\n\n ',
'\n ',
True
)
lines_between_examples = substitute.Substitution('\n\n\n', '\n\n', True)
lines_between_sections = substitute.Substitution('\n\n\n\n', '\n\n\n', True)
result = contents
result = line_between_example_code.apply_and_get_result(result)
result = lines_between_examples.apply_and_get_result(result)
result = lines_between_sections.apply_and_get_result(result)
return result
def get_colorized_contents(contents, color_config):
"""Colorize the contents based on the color_config."""
colorizer = color.EgColorizer(color_config)
result = colorizer.colorize_text(contents)
return result
def get_substituted_contents(contents, substitutions):
"""
Perform a list of substitutions and return the result.
contents: the starting string on which to beging substitutions
substitutions: list of Substitution objects to call, in order, with the
result of the previous substitution.
"""
result = contents
for sub in substitutions:
result = sub.apply_and_get_result(result)
return result
def get_formatted_contents(
raw_contents,
use_color,
color_config,
squeeze,
subs
):
"""
Apply formatting to raw_contents and return the result. Formatting is
applied in the order: color, squeeze, subs.
"""
result = raw_contents
if use_color:
result = get_colorized_contents(result, color_config)
if squeeze:
result = get_squeezed_contents(result)
if subs:
result = get_substituted_contents(result, subs)
return result
def get_resolved_program(program, config_obj):
"""
Take a program that may be an alias for another program and return the
resolved program.
It only ever resolves a single level of aliasing, so does not support
aliasing to an alias.
Returns the original program if the program is not an alias.
"""
alias_dict = get_alias_dict(config_obj)
if program in alias_dict:
return alias_dict[program]
else:
return program
def get_alias_dict(config_obj):
"""
Return a dictionary consisting of all aliases known to eg.
The format is {'alias': 'resolved_program'}.
If the aliases file does not exist, returns an empty dict.
"""
if not config_obj.examples_dir:
return {}
alias_file_path = _get_alias_file_path(config_obj)
if not os.path.isfile(alias_file_path):
return {}
alias_file_contents = _get_contents_of_file(alias_file_path)
result = json.loads(alias_file_contents)
return result
def _get_alias_file_path(config_obj):
"""
Return the file path for the aliases dict.
"""
return os.path.join(config_obj.examples_dir, ALIAS_FILE_NAME)
| |
import mock
from django.conf import settings
from django.contrib.messages.storage.fallback import FallbackStorage
from django.http import QueryDict
from django.core.urlresolvers import reverse
from django.utils.importlib import import_module
from accounts.tests.test_routes import UserProfileTestCase,\
UserProfileMerchantTestCase
from accounts.views import MerchantRegisterView, MerchantVerifyView, \
MerchantResendOtpView, MerchantConfirmView
class TestMerchantView(UserProfileMerchantTestCase):
def test_unapproved_merchant_redirected(self):
response = self.client.get(reverse('account_merchant'))
self.assertEquals(response.status_code, 302)
def test_user_register_as_merchant(self):
data = {
'name': 'yourname',
'country': 2,
'location': 84,
'telephone': '12345678901',
'intlnumber': '+2342345678901',
'email': 'youremail',
'address': 'youraddress',
'slug': 'yourslug',
}
data2 = {'token': 123456}
# test response without adding logo field to request
with mock.patch(
'nexmo.libpynexmo.nexmomessage.NexmoMessage.send_request')\
as mock_send_request:
mock_send_request.return_value = (
{"return": {"return": "return"}}
)
request = self.factory.post(
reverse('account_merchant_register'),
data)
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantRegisterView.as_view()(request)
self.assertEquals(response.status_code, 200)
# test with addtion of logo field to request
data['logo'] = 'iu_0jdfj.png'
with mock.patch(
'nexmo.libpynexmo.nexmomessage.NexmoMessage.send_request')\
as mock_send_request:
mock_send_request.return_value = (
{"return": {"return": "return"}}
)
request = self.factory.post(
reverse('account_merchant_register'),
data)
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantRegisterView.as_view()(request)
self.assertEquals(response.status_code, 200)
# test failed OTP send
with mock.patch(
'nexmo.libpynexmo.nexmomessage.NexmoMessage.send_request')\
as mock_send_request:
mock_send_request.return_value = (
False
)
request = self.factory.post(
reverse('account_merchant_register'),
data
)
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantRegisterView.as_view()(request)
self.assertEquals(response.status_code, 200)
# test that OTP number is verified
with mock.patch('pyotp.TOTP.verify') as mock_verify:
mock_verify.return_value = True
request = self.factory.post(
reverse('account_merchant_verify'), data2)
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
response = MerchantVerifyView.as_view()(request)
self.assertEquals(response.status_code, 302)
# test OTP resend view
with mock.patch(
'nexmo.libpynexmo.nexmomessage.NexmoMessage.send_request')\
as mock_send_request:
mock_send_request.return_value = (
{"return": {"return": "return"}}
)
request = self.factory.get(
reverse('account_merchant_resendotp'),
data)
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantResendOtpView.as_view()(request)
self.assertEquals(response.status_code, 302)
class TestOTPVerification(UserProfileTestCase):
def test_otp_form_not_shown__to_user(self):
response = self.client.get(reverse('account_merchant_verify'))
self.assertEquals(response.status_code, 302)
def test_confirmation_page_cannot_be_viewed_by_user(self):
request = self.factory.get(
reverse('account_merchant_confirm'))
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantConfirmView.as_view()(request)
self.assertEquals(response.status_code, 302)
class TestMerchantConfirmationView(UserProfileMerchantTestCase):
def test_confirmation_page_can_be_viewed_by_merchant(self):
request = self.factory.get(
reverse('account_merchant_confirm'))
request.user = self.user
engine = import_module(settings.SESSION_ENGINE)
session_key = None
request.session = engine.SessionStore(session_key)
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
response = MerchantConfirmView.as_view()(request)
self.assertEquals(response.status_code, 200)
class TransactionsViewTestCase(UserProfileTestCase):
"""Suite of tests for transaction view"""
def test_transaction_history_cannot_be_viewed_by_unauthenticated_users(
self):
self.client.logout()
response = self.client.get(reverse('account_history'))
self.assertEquals(response.status_code, 302)
def test_transaction_history_without_page_number(self):
response = self.client.get(reverse('account_history'))
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['transactions'].number, 1)
def test_transaction_history_with_page_number_in_url(self):
query_dictionary = QueryDict('', mutable=True)
query_dictionary.update({'pg': 1})
url = '{base_url}?{querystring}'.format(
base_url=reverse('account_history'),
querystring=query_dictionary.urlencode()
)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['transactions'].number, 1)
def test_transaction_history_with_empty_page(self):
query_dictionary = QueryDict('', mutable=True)
query_dictionary.update({'pg': 2})
url = '{base_url}?{querystring}'.format(
base_url=reverse('account_history'),
querystring=query_dictionary.urlencode()
)
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['transactions'].number, 1)
| |
"""Script Tool.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 Lewis Baker, Stuart McMahon.
@license: Licensed under the MIT license.
"""
import os.path
from cake.target import Target, FileTarget, getPaths, getTask
from cake.library import Tool
from cake.script import Script
class ScriptTool(Tool):
"""Tool that provides utilities for performing Script operations.
"""
def __init__(self, *args, **kwargs):
Tool.__init__(self, *args, **kwargs)
self._included = {}
@property
def path(self):
"""The path of the currently executing script.
"""
return Script.getCurrent().path
@property
def dir(self):
"""The path of the directory of the currently executing script.
"""
return Script.getCurrent().dir
@property
def variant(self):
"""The Variant the currently executing script is being built with.
"""
return Script.getCurrent().variant
def setResult(self, **kwargs):
"""Export a result from this script that other scripts can import.
Other scripts can use getResult(script, name) to get the result
exported by the other script calling setResult(name=result).
"""
Script.getCurrent().setResult(**kwargs)
def getResult(self, script, name, *args, **kwargs):
"""Get a placeholder value that will yield the result of another
script once that other script has finished executing.
"""
return self.get(script).getResult(name, *args, **kwargs)
def getTarget(self, name):
"""Get the named ScriptTarget for this script.
@param name: The name of the target to get.
@type name: C{str}
"""
return Script.getCurrent().getTarget(name)
def getDefaultTarget(self):
return Script.getCurrent().getDefaultTarget()
def addDefaultTarget(self, target):
"""Add a target to be built when the default target for this script is built.
@param target: The target to be built.
@type target: L{Target} or L{AsyncResult} that yields a L{Target}
"""
Script.getCurrent().getDefaultTarget().addTarget(target)
def addDefaultTargets(self, targets):
"""Add a collection of targets to the default script target.
The default script target is the target that is built when the user
executes the current build script without specifying a particular target name.
@param targets: A collection of targets to be built.
@type targets: A list of L{Target} or L{AsyncResult} yielding a list of L{Target}
"""
Script.getCurrent().getDefaultTarget().addTargets(targets)
def addTarget(self, name, target):
"""Add a target to the named script target.
The specified target will be built whenever the named script target is
built.
@param name: The name of the script-target to add the target to.
@type name: C{str}
@param target: A target to build when the named script-target is
built.
@type target: L{Target} or L{AsyncResult} yielding a L{Target}
"""
Script.getCurrent().getTarget(name).addTarget(target)
def addTargets(self, name, targets):
"""Add a collection of targets to the named script target.
The specified targets will be built whenever the named script target is
requested to be built.
@param name: The name of the script-target to add the targets to.
@type name: C{str}
@param target: A target to build when the named script-target is
built.
@type target: L{Target} or L{AsyncResult} yielding a L{Target}
"""
Script.getCurrent().getTarget(name).addTargets(targets)
def get(self, script, keywords={}, useContext=None, configScript=None, configScriptName=None):
"""Get another script to use in referencing targets.
@param script: Path of the script to load.
@type script: string
@param keywords: A set of keywords used to find the variant the script
will be executed with. The variant is looked up in the script's configuration.
@type keywords: dictionary of string -> string
@param useContext: If False or if None and either configScript or configScriptName
are not None then lookup the corresponding configuration script starting from the
script's path, if True then use the current configuration/variant.
@type useContext: bool or None
@param configScript: The path of the configuration script to use to execute the script.
Ignored if useContext is True.
@type configScript: string or None
@param configScriptName: If not None and configScript is None then find the
configuration script with this name starting the search at the script's path.
Ignored if useContext is True.
@type configScriptName: string or None
"""
if not isinstance(script, basestring):
raise ValueError("'script' must be a string")
script = self.configuration.basePath(script)
if useContext is None:
useContext = configScript is None and configScriptName is None
if useContext:
# Use the current configuration and lookup the variant relative
# to the current variant.
baseVariant = Script.getCurrent().variant
variant = self.configuration.findVariant(keywords, baseVariant=baseVariant)
return ScriptProxy(self.configuration.execute(path=script, variant=variant))
else:
# Re-evaluate the configuration to execute the script with.
# Uses the keywords specified to find the variant in the variants
# defined in that configuration.
path = self.configuration.abspath(script)
if configScript is None:
configuration = self.engine.findConfiguration(
path=path,
configScriptName=configScriptName,
)
else:
configuration = self.engine.getConfiguration(
path=self.configuration.abspath(configScript),
)
variant = configuration.findVariant(keywords)
return ScriptProxy(configuration.execute(path=path, variant=variant))
def cwd(self, *args):
"""Return the path prefixed with the this script's directory.
Examples::
env.cwd("a") -> "{cwd}/a"
env.cwd(["a", "b", "c"]) -> ["{cwd}/a", "{cwd}/b", "{cwd}/c"]
@param args: The arguments that need to have the prefix added.
@type args: string or list(string)
@return: The path prefixed with this script's directory.
@rtype: string or list(string)
"""
script = Script.getCurrent()
return script.cwd(*args)
def include(self, scripts):
"""Include another script within the context of the currently
executing script.
A given script will only be included once.
@param scripts: A path or sequence of paths of scripts to include.
@type scripts: string or sequence of string
"""
basePath = self.configuration.basePath
scripts = basePath(scripts)
include = self._include
if isinstance(scripts, basestring):
include(scripts)
else:
for path in scripts:
include(path)
def _include(self, path):
"""Include another script for execution within this script's context.
A script will only be included once within a given context.
@param path: The path of the file to include.
@type path: string
"""
path = os.path.normpath(path)
normalisedPath = os.path.normcase(self.configuration.abspath(path))
if normalisedPath in self._included:
return
currentScript = Script.getCurrent()
includedScript = Script(
path=path,
variant=currentScript.variant,
engine=currentScript.engine,
configuration=currentScript.configuration,
task=currentScript.task,
tools=currentScript.tools,
parent=currentScript,
)
self._included[normalisedPath] = includedScript
try:
includedScript.execute()
except IOError, e:
currentScript.engine.raiseError(
("Failed to include cake script %s: %s\n" % (path, str(e))) +
"".join(" from " + s.path + "\n" for s in currentScript.getAncestors()))
def execute(self, scripts, **keywords):
"""Execute another script as a background task.
Executes the other script using the current script's configuration
but potentially a different build variant.
If you need to execute a script using a different configuration
then use the 'executeNoContext' method instead.
@param scripts: A path or sequence of paths of scripts to execute.
@type scripts: string or sequence of string
@return: A Script object or sequence of Script objects that can be used
to determine what scripts will be executed. The script's task will
complete when the script has finished executing.
@rtype: L{Script} or C{list} of L{Script}
"""
basePath = self.configuration.basePath
scripts = basePath(scripts)
script = Script.getCurrent()
configuration = script.configuration
variant = configuration.findVariant(keywords, baseVariant=script.variant)
execute = configuration.execute
def _execute(path):
script = execute(path, variant)
self.addDefaultTarget(script.getDefaultTarget())
return ScriptProxy(script)
if isinstance(scripts, basestring):
return _execute(scripts)
else:
return [_execute(path) for path in scripts]
def run(self, func, args=None, targets=None, sources=[]):
"""Execute the specified python function as a task.
Only executes the function after the sources have been built and only
if the target exists, args is the same as last run and the sources
haven't changed.
@note: I couldn't think of a better class to put this function in so
for now it's here although it doesn't really belong.
"""
engine = self.engine
configuration = self.configuration
basePath = configuration.basePath
targets = basePath(targets)
sources = basePath(sources)
def _run():
sourcePaths = getPaths(sources)
if targets:
buildArgs = (args, sourcePaths)
try:
_, reason = configuration.checkDependencyInfo(
targets[0],
buildArgs,
)
if reason is None:
# Up to date
return
engine.logger.outputDebug(
"reason",
"Building '%s' because '%s'\n" % (targets[0], reason),
)
except EnvironmentError:
pass
try:
result = func()
except Exception:
if targets:
append = engine.failedTargets.append
for t in targets:
append(t)
raise
if targets:
newDependencyInfo = configuration.createDependencyInfo(
targets=targets,
args=buildArgs,
dependencies=sourcePaths,
)
configuration.storeDependencyInfo(newDependencyInfo)
return result
if self.enabled:
task = engine.createTask(_run)
task.lazyStartAfter(getTask(sources))
else:
task = None
currentScript = Script.getCurrent()
if targets is not None:
targets = [FileTarget(path=t, task=task) for t in targets]
currentScript.getDefaultTarget().addTargets(targets)
return targets
else:
target = Target(task)
currentScript.getDefaultTarget().addTarget(target)
return target
class ScriptProxy:
"""Proxy class for a Script instance that limits what
you can do with a script.
This object is returned from ScriptTool.get() and ScriptTool.execute().
"""
def __init__(self, script):
self._script = script
def execute(self):
"""Executing a script is shorthand for adding it's default
targets to your default targets. This means it will only actually
run if the current script's default target is built.
"""
Script.getCurrent().getDefaultTarget().addTarget(
self._script.getDefaultTarget())
def getTarget(self, name):
"""Get the named target of a script.
@param name: The name of the target.
@type name: C{str}
@return: A target corresponding to the named target defined
by this script.
@rtype: L{ScriptTarget}
"""
return self._script.getTarget(name)
def getDefaultTarget(self):
"""Get the default target for this script.
This is the target that will be built if the script is built
from the command-line without specifying a particular named target.
@rtype: L{ScriptTarget}
"""
return self._script.getDefaultTarget()
def getResult(self, name):
"""Get the result output of a script.
This will be an AsyncResult value that allows one script to consume
a programmatic output from another script.
@rtype: L{ScriptResult}
"""
return self._script.getResult(name)
| |
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes that build dictionaries.
The "pair" is a sub-structure of the dictionary, representing a key/value pair
that is the child of the dictionary creation.
"""
from nuitka import Constants
from nuitka.PythonVersions import python_version
from .NodeBases import (
ExpressionChildrenHavingBase,
SideEffectsFromChildrenMixin
)
from .NodeMakingHelpers import (
makeConstantReplacementNode,
makeStatementOnlyNodesFromExpressions,
wrapExpressionWithNodeSideEffects,
wrapExpressionWithSideEffects
)
class ExpressionKeyValuePair(SideEffectsFromChildrenMixin,
ExpressionChildrenHavingBase):
kind = "EXPRESSION_KEY_VALUE_PAIR"
if python_version < 350:
named_children = (
"key",
"value"
)
else:
named_children = (
"key",
"value"
)
def __init__(self, key, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"key" : key,
"value" : value
},
source_ref = source_ref
)
getKey = ExpressionChildrenHavingBase.childGetter("key")
getValue = ExpressionChildrenHavingBase.childGetter("value")
def computeExpression(self, constraint_collection):
sub_expressions = self.getVisitableNodes()
if sub_expressions[0].willRaiseException(BaseException):
child_name = sub_expressions[0].getChildName()
return sub_expressions[0], "new_raise", "Dictionary %s raises exception" % child_name
if sub_expressions[1].willRaiseException(BaseException):
child_name = sub_expressions[1].getChildName()
result = wrapExpressionWithNodeSideEffects(
new_node = sub_expressions[1],
old_node = sub_expressions[0]
)
return result, "new_raise", "Dictionary %s raises exception" % child_name
return self, None, None
def mayRaiseException(self, exception_type):
key = self.getKey()
return key.mayRaiseException(exception_type) or \
key.isKnownToBeHashable() is not True or \
self.getValue().mayRaiseException(exception_type)
class ExpressionMakeDict(SideEffectsFromChildrenMixin,
ExpressionChildrenHavingBase):
kind = "EXPRESSION_MAKE_DICT"
named_children = (
"pairs",
)
def __init__(self, pairs, lazy_order, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"pairs" : tuple(pairs),
},
source_ref = source_ref
)
self.lazy_order = lazy_order
def getDetails(self):
return {
"lazy_order" : self.lazy_order
}
getPairs = ExpressionChildrenHavingBase.childGetter("pairs")
def computeExpression(self, constraint_collection):
pairs = self.getPairs()
for count, pair in enumerate(pairs):
if pair.willRaiseException(BaseException):
# Later elements have no side effects after the element that
# raised the exception.
result = wrapExpressionWithSideEffects(
side_effects = pairs[:count],
new_node = pair,
old_node = self
)
return result, "new_raise", "Dict creation raises exception"
for pair in pairs:
key = pair.getKey()
# TODO: Mutable key should cause an exception raise to be produced.
if not key.isExpressionConstantRef() or not key.isKnownToBeHashable():
return self, None, None
value = pair.getValue()
if not value.isExpressionConstantRef():
return self, None, None
constant_value = Constants.createConstantDict(
keys = [
pair.getKey().getConstant()
for pair in
pairs
],
values = [
pair.getValue().getConstant()
for pair in
pairs
],
lazy_order = self.lazy_order
)
new_node = makeConstantReplacementNode(
constant = constant_value,
node = self
)
return new_node, "new_constant", """\
Created dictionary found to be constant."""
def mayRaiseException(self, exception_type):
for pair in self.getPairs():
if pair.mayRaiseException(exception_type):
return True
return False
def mayHaveSideEffectsBool(self):
return False
def isKnownToBeIterable(self, count):
return count is None or count == len(self.getPairs())
def getIterationLength(self):
return len(self.getPairs())
def canPredictIterationValues(self):
# Dictionaries are fully predictable, pylint: disable=R0201
return True
def getIterationValue(self, count):
return self.getPairs()[ count ].getKey()
def getTruthValue(self):
return self.getIterationLength() > 0
def mayBeNone(self):
return False
def isMapping(self):
# Dictionaries are always mappings, but this is a virtual method,
# pylint: disable=R0201
return True
def isMappingWithConstantStringKeys(self):
for pair in self.getPairs():
key = pair.getKey()
if not key.isExpressionConstantRef() or not key.isStringConstant():
return False
return True
def getMappingStringKeyPairs(self):
return [
(
pair.getKey().getConstant(),
pair.getValue()
)
for pair in
self.getPairs()
]
def getMappingPairs(self):
return self.getPairs()
# TODO: Missing computeExpressionIter1 here. For now it would require us to
# add lots of temporary variables for keys, which then becomes the tuple,
# but for as long as we don't have efficient forward propagation of these,
# we won't do that. Otherwise we loose execution order of values with them
# remaining as side effects. We could limit ourselves to cases where
# isMappingWithConstantStringKeys is true, or keys had no side effects, but
# that feels wasted effort as we are going to have full propagation.
def computeExpressionDrop(self, statement, constraint_collection):
expressions = []
for pair in self.getPairs():
expressions.extend(pair.extractSideEffects())
result = makeStatementOnlyNodesFromExpressions(
expressions = expressions
)
return result, "new_statements", """\
Removed sequence creation for unused sequence."""
def computeExpressionIter1(self, iter_node, constraint_collection):
return self, None, None
# TODO: This ought to be possible. Only difficulty is to
# preserve order of evaluation, by making values a side
# effect of the keys.
# return iter_node, "new_expression", """\
# Iteration over dict reduced to tuple."""
| |
import sys
import itertools
import logging
import re
import copy
import collections
from column import Column, ColumnName, AmbiguousColumnNameError
from expression import BooleanExpression
def dedupe_with_order(dupes):
"""Given a list, return it without duplicates and order preserved."""
seen = set()
deduped = []
for c in dupes:
if c not in seen:
seen.add(c)
deduped.append(c)
return deduped
class Table(object):
"""Translate abstract data-manipulation operations to commands that perform them.
A Table is a virtual representation of data. Operations on Tables are accumulated and
optimized, but a Table cannot execute its own commands. In order retrieve the data represented
by a Table, a second party must execute the Table's commands.
"""
VALID_IDENTIFIER_REGEX = '^[a-zA-Z_][a-zA-Z0-9_.]*$'
LOG = logging.getLogger(__name__)
def __str__(self):
return self.name
def __init__(self,
name, delimiter=',', cmd=None, columns=None, offset=None, alias=None):
self.name = name
self.delimiter = delimiter
self.cmds = [] if cmd == None else [cmd]
self.columns = columns
self.offset = offset
self.alias = alias
self.sorted_by = []
self.outfile_name = "{0}.out".format(name)
@property
def column_idxs(self):
return self._compute_column_indices()
@property
def column_name_idxs(self):
return self._compute_column_name_indices()
def _compute_column_indices(self):
"""Return a dictionary of column index lists keyed by ColumnName."""
idxs = {}
for i, c in enumerate(self.columns):
try:
idxs[c].append(i)
except KeyError:
idxs[c] = [i]
self.LOG.debug('{0} computed column indices {1}'.format(self,idxs))
return idxs
def _compute_column_name_indices(self):
"""Return a dictionary of column index lists keyed by ColumnName."""
idxs = {}
for i, c in enumerate(self.columns):
for column_name in c.names:
try:
idxs[column_name].append(i)
except KeyError:
idxs[column_name] = [i]
self.LOG.debug('{0} computed column name indices {1}'.format(self,idxs))
return idxs
@classmethod
def from_file_path(cls, file_path, columns=None, delimiter=',', alias=None):
"""Given the path to a file, return an instance of a Table representing that file.
:param file_path: a string containing the path to the file
:param columns: an exhaustive list of column names or Column objects on this table
:param delimiter: the column delimiter for this table; defaults to ','
"""
if file_path == '-':
columns = columns or cls._parse_column_names(sys.stdin, delimiter)
else:
with open(file_path) as f:
columns = columns or cls._parse_column_names(f, delimiter)
alias = alias or file_path
column_qualifiers = [file_path.lower(), alias.lower()]
for idx, col in enumerate(columns):
if not isinstance(col, Column):
columns[idx] = Column(col, qualifiers=column_qualifiers)
return cls(file_path, delimiter, None, columns, 1, alias)
@classmethod
def from_cmd(cls, name, cmd, columns, delimiter=','):
"""Given a command, instantiate a Table representing the output of that command.
:param name: the name of the table
:param cmd: a string of commands whose execution materializes this table
:param columns: an exhaustive list of column names or Column objects on this table
:param delimiter: the column delimiter for this table; defaults to ','
"""
column_qualifiers = [name.lower()]
for idx, col in enumerate(columns):
if not isinstance(col, Column):
columns[idx] = Column(col, qualifiers=column_qualifiers)
return cls(name, delimiter, cmd, columns)
@staticmethod
def _parse_column_names(table_file, delimiter):
"""Return a list of column headers found in the first line of a file."""
head = table_file.readline().rstrip()
return head.split(delimiter)
def order_columns(self, column_names_in_order, drop_other_columns=False):
"""Rearrange and subset the columns of this Table."""
columns_in_order = [self.get_column_for_name(n) for n in column_names_in_order]
if (columns_in_order == self.columns) or (
columns_in_order == self.columns[0:len(columns_in_order)] and not drop_other_columns):
self.LOG.debug('Columns already in order {0}'.format(self.columns))
return
self.LOG.debug('Current column order of {0} is {1}'.format(self.name, self.columns))
self.LOG.debug('Reordering {0} columns to {1}'.format(self.name, columns_in_order))
reordered_col_idxs = [self.column_idxs[col][0] for col in columns_in_order]
unchanged_col_idxs = [
self.column_idxs[col][0] for col in self.columns
if not any([c.match(col) for c in columns_in_order])]
col_idxs = reordered_col_idxs
if not drop_other_columns:
col_idxs += unchanged_col_idxs
reorder_cmd = "awk -F'{0}' 'OFS=\"{0}\" {{ print {1} }}'".format(
self.delimiter, ','.join('$' + str(idx + 1) for idx in col_idxs))
# reorder and re-alias the Columns on this Table
self.columns = [copy.deepcopy(self.columns[idx]) for idx in col_idxs]
for column, alias in zip(self.columns, column_names_in_order):
column.alias = alias
self.cmds.append(reorder_cmd)
def is_sorted_by(self, sort_order_indices):
"""Return true if this Table's rows are sorted by columns at the given indices."""
if len(self.sorted_by) < len(sort_order_indices):
return False
for sort_idx, column_idx in enumerate(sort_order_indices):
if not (self.sorted_by[sort_idx].match(self.columns[column_idx])):
return False
return True
def sort(self, sort_by):
"""Sort the rows of this Table by the given columns or column names."""
deduped_sort_by = dedupe_with_order(sort_by)
columns_to_sort_by = [
self.get_column_for_name(c) if isinstance(c, ColumnName) else c
for c in deduped_sort_by
]
# if this table is already sorted by the requested sort order, do nothing
if columns_to_sort_by == self.sorted_by[0:len(columns_to_sort_by)]:
return None
self.LOG.debug('Sorting {0} by {1}'.format(self.name, columns_to_sort_by))
column_idxs_to_sort_by = [self.column_idxs[col][0] for col in columns_to_sort_by]
sort_key_params = ' -k '.join(
','.join([str(idx + 1),str(idx + 1)]) for idx in column_idxs_to_sort_by)
sort_cmd = 'sort -t{0} -k {1}'.format(self.delimiter, sort_key_params)
self.sorted_by = columns_to_sort_by
self.cmds.append(sort_cmd)
def subset_rows(self, conditions):
"""Subset the rows of this Table to rows that satisfy the given conditions."""
conditions_list = [
cond.args_with_operator() if isinstance(cond, BooleanExpression) else cond
for cond in conditions
]
and_conditions_list = [
i for pair in zip(conditions_list, ['and'] * len(conditions_list)) for i in pair
][:-1]
condition_str = self.get_awk_statement(and_conditions_list)
if not condition_str:
self.LOG.debug('Empty condition string so not subsetting columns on {0}'.format(
self.name))
return
columns = ','.join(['$' + str(self.column_idxs[c][0] + 1) for c in self.columns])
awk_cmd = "awk -F'{0}' 'OFS=\"{0}\" {{ if ({1}) {{ print {2} }} }}'".format(
self.delimiter, condition_str, columns)
self.cmds.append(awk_cmd)
def get_awk_statement(self, conditions):
"""Given a list of 'and', 'or', Expressions, and nested lists of the same, return the
equivalent conditional Awk string.
"""
operator_map = {'or': '||', 'and': '&&'}
string_parts = []
for term in conditions:
if isinstance(term, basestring):
string_parts.append(operator_map[term])
elif isinstance(term, collections.Iterable):
string_parts.append('(' + self.get_awk_statement(term) + ')')
else:
expr_parts = []
for operand in (term.left_operand, term.right_operand, ):
if isinstance(operand, ColumnName):
ordinal = self.column_idxs[self.get_column_for_name(operand)][0] + 1
expr_parts.append('$' + str(ordinal))
else:
expr_parts.append(operand)
string_parts.append(
' '.join((str(expr_parts[0]), term.operator, str(expr_parts[1]), ))
)
return ' '.join(string_parts)
def get_cmd_str(self, output_column_names=False):
"""Return a string of commands whose output is the contents of this Table."""
cmds = self.cmds
if self.name == '-':
data_path = ''
else:
data_path = self.name
if self.offset:
cmds = ['tail -n+{0} {1}'.format(self.offset+1, data_path)] + cmds
cmd_str = ' | '.join(cmds)
# write column names
if output_column_names:
cmd_str = 'echo "{0}"; '.format(
','.join([str(col) for col in self.columns])
) + cmd_str
return cmd_str
def set_column_aliases(self, column_names):
for col, col_name in zip(self.columns, column_names):
col.alias = col_name
def get_column_for_name(self, column_name):
"""Return the unique Column on this table that matches the given ColumnName.
If more than one Column on this table matches the given ColumnName, raise an IndexError.
"""
matched_columns = []
for table_column in self.columns:
if column_name.match(*table_column.names):
matched_columns.append(table_column)
if len(matched_columns) == 0:
return None
elif len(matched_columns) > 1:
raise AmbiguousColumnNameError(column_name, matched_columns)
else:
return matched_columns[0]
def sample_rows(self, sample_size, random_seed=None):
sample_cmd = """awk -v seed={0} -v n={1} '
BEGIN {{ srand(seed) }}
NR <= n {{ reservoir[NR] = $0 }}
NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}
END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(
random_seed if random_seed is not None else '$RANDOM',
sample_size
)
self.cmds.append(sample_cmd)
| |
import os
import argparse
import glob
from PIL import Image
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
import torch
import torch.nn as nn
import torch.optim as optim
parser = argparse.ArgumentParser()
parser.add_argument('--adjoint', action='store_true')
parser.add_argument('--viz', action='store_true')
parser.add_argument('--niters', type=int, default=1000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--num_samples', type=int, default=512)
parser.add_argument('--width', type=int, default=64)
parser.add_argument('--hidden_dim', type=int, default=32)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--train_dir', type=str, default=None)
parser.add_argument('--results_dir', type=str, default="./results")
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
class CNF(nn.Module):
"""Adapted from the NumPy implementation at:
https://gist.github.com/rtqichen/91924063aa4cc95e7ef30b3a5491cc52
"""
def __init__(self, in_out_dim, hidden_dim, width):
super().__init__()
self.in_out_dim = in_out_dim
self.hidden_dim = hidden_dim
self.width = width
self.hyper_net = HyperNetwork(in_out_dim, hidden_dim, width)
def forward(self, t, states):
z = states[0]
logp_z = states[1]
batchsize = z.shape[0]
with torch.set_grad_enabled(True):
z.requires_grad_(True)
W, B, U = self.hyper_net(t)
Z = torch.unsqueeze(z, 0).repeat(self.width, 1, 1)
h = torch.tanh(torch.matmul(Z, W) + B)
dz_dt = torch.matmul(h, U).mean(0)
dlogp_z_dt = -trace_df_dz(dz_dt, z).view(batchsize, 1)
return (dz_dt, dlogp_z_dt)
def trace_df_dz(f, z):
"""Calculates the trace of the Jacobian df/dz.
Stolen from: https://github.com/rtqichen/ffjord/blob/master/lib/layers/odefunc.py#L13
"""
sum_diag = 0.
for i in range(z.shape[1]):
sum_diag += torch.autograd.grad(f[:, i].sum(), z, create_graph=True)[0].contiguous()[:, i].contiguous()
return sum_diag.contiguous()
class HyperNetwork(nn.Module):
"""Hyper-network allowing f(z(t), t) to change with time.
Adapted from the NumPy implementation at:
https://gist.github.com/rtqichen/91924063aa4cc95e7ef30b3a5491cc52
"""
def __init__(self, in_out_dim, hidden_dim, width):
super().__init__()
blocksize = width * in_out_dim
self.fc1 = nn.Linear(1, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, 3 * blocksize + width)
self.in_out_dim = in_out_dim
self.hidden_dim = hidden_dim
self.width = width
self.blocksize = blocksize
def forward(self, t):
# predict params
params = t.reshape(1, 1)
params = torch.tanh(self.fc1(params))
params = torch.tanh(self.fc2(params))
params = self.fc3(params)
# restructure
params = params.reshape(-1)
W = params[:self.blocksize].reshape(self.width, self.in_out_dim, 1)
U = params[self.blocksize:2 * self.blocksize].reshape(self.width, 1, self.in_out_dim)
G = params[2 * self.blocksize:3 * self.blocksize].reshape(self.width, 1, self.in_out_dim)
U = U * torch.sigmoid(G)
B = params[3 * self.blocksize:].reshape(self.width, 1, 1)
return [W, B, U]
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def get_batch(num_samples):
points, _ = make_circles(n_samples=num_samples, noise=0.06, factor=0.5)
x = torch.tensor(points).type(torch.float32).to(device)
logp_diff_t1 = torch.zeros(num_samples, 1).type(torch.float32).to(device)
return(x, logp_diff_t1)
if __name__ == '__main__':
t0 = 0
t1 = 10
device = torch.device('cuda:' + str(args.gpu)
if torch.cuda.is_available() else 'cpu')
# model
func = CNF(in_out_dim=2, hidden_dim=args.hidden_dim, width=args.width).to(device)
optimizer = optim.Adam(func.parameters(), lr=args.lr)
p_z0 = torch.distributions.MultivariateNormal(
loc=torch.tensor([0.0, 0.0]).to(device),
covariance_matrix=torch.tensor([[0.1, 0.0], [0.0, 0.1]]).to(device)
)
loss_meter = RunningAverageMeter()
if args.train_dir is not None:
if not os.path.exists(args.train_dir):
os.makedirs(args.train_dir)
ckpt_path = os.path.join(args.train_dir, 'ckpt.pth')
if os.path.exists(ckpt_path):
checkpoint = torch.load(ckpt_path)
func.load_state_dict(checkpoint['func_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print('Loaded ckpt from {}'.format(ckpt_path))
try:
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
x, logp_diff_t1 = get_batch(args.num_samples)
z_t, logp_diff_t = odeint(
func,
(x, logp_diff_t1),
torch.tensor([t1, t0]).type(torch.float32).to(device),
atol=1e-5,
rtol=1e-5,
method='dopri5',
)
z_t0, logp_diff_t0 = z_t[-1], logp_diff_t[-1]
logp_x = p_z0.log_prob(z_t0).to(device) - logp_diff_t0.view(-1)
loss = -logp_x.mean(0)
loss.backward()
optimizer.step()
loss_meter.update(loss.item())
print('Iter: {}, running avg loss: {:.4f}'.format(itr, loss_meter.avg))
except KeyboardInterrupt:
if args.train_dir is not None:
ckpt_path = os.path.join(args.train_dir, 'ckpt.pth')
torch.save({
'func_state_dict': func.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, ckpt_path)
print('Stored ckpt at {}'.format(ckpt_path))
print('Training complete after {} iters.'.format(itr))
if args.viz:
viz_samples = 30000
viz_timesteps = 41
target_sample, _ = get_batch(viz_samples)
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
with torch.no_grad():
# Generate evolution of samples
z_t0 = p_z0.sample([viz_samples]).to(device)
logp_diff_t0 = torch.zeros(viz_samples, 1).type(torch.float32).to(device)
z_t_samples, _ = odeint(
func,
(z_t0, logp_diff_t0),
torch.tensor(np.linspace(t0, t1, viz_timesteps)).to(device),
atol=1e-5,
rtol=1e-5,
method='dopri5',
)
# Generate evolution of density
x = np.linspace(-1.5, 1.5, 100)
y = np.linspace(-1.5, 1.5, 100)
points = np.vstack(np.meshgrid(x, y)).reshape([2, -1]).T
z_t1 = torch.tensor(points).type(torch.float32).to(device)
logp_diff_t1 = torch.zeros(z_t1.shape[0], 1).type(torch.float32).to(device)
z_t_density, logp_diff_t = odeint(
func,
(z_t1, logp_diff_t1),
torch.tensor(np.linspace(t1, t0, viz_timesteps)).to(device),
atol=1e-5,
rtol=1e-5,
method='dopri5',
)
# Create plots for each timestep
for (t, z_sample, z_density, logp_diff) in zip(
np.linspace(t0, t1, viz_timesteps),
z_t_samples, z_t_density, logp_diff_t
):
fig = plt.figure(figsize=(12, 4), dpi=200)
plt.tight_layout()
plt.axis('off')
plt.margins(0, 0)
fig.suptitle(f'{t:.2f}s')
ax1 = fig.add_subplot(1, 3, 1)
ax1.set_title('Target')
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
ax2 = fig.add_subplot(1, 3, 2)
ax2.set_title('Samples')
ax2.get_xaxis().set_ticks([])
ax2.get_yaxis().set_ticks([])
ax3 = fig.add_subplot(1, 3, 3)
ax3.set_title('Log Probability')
ax3.get_xaxis().set_ticks([])
ax3.get_yaxis().set_ticks([])
ax1.hist2d(*target_sample.detach().cpu().numpy().T, bins=300, density=True,
range=[[-1.5, 1.5], [-1.5, 1.5]])
ax2.hist2d(*z_sample.detach().cpu().numpy().T, bins=300, density=True,
range=[[-1.5, 1.5], [-1.5, 1.5]])
logp = p_z0.log_prob(z_density) - logp_diff.view(-1)
ax3.tricontourf(*z_t1.detach().cpu().numpy().T,
np.exp(logp.detach().cpu().numpy()), 200)
plt.savefig(os.path.join(args.results_dir, f"cnf-viz-{int(t*1000):05d}.jpg"),
pad_inches=0.2, bbox_inches='tight')
plt.close()
img, *imgs = [Image.open(f) for f in sorted(glob.glob(os.path.join(args.results_dir, f"cnf-viz-*.jpg")))]
img.save(fp=os.path.join(args.results_dir, "cnf-viz.gif"), format='GIF', append_images=imgs,
save_all=True, duration=250, loop=0)
print('Saved visualization animation at {}'.format(os.path.join(args.results_dir, "cnf-viz.gif")))
| |
import re
class ChildSetupVerify:
"""hold onto method to verify ChildSubset or SingleChild setup
arguments
"""
@staticmethod
def _verify_identifier_args(identifier):
"""verify that identifier dict keys contain valid (and only
valid) identifiers. tag and tag_regex must contain string
identifiers, while attrib_regex must be a dict with string
identifiers. In the case of tag_regex and attrib_regex, the
strings will be used for regex matching
"""
expected = {'tag': str, 'tag_regex': str, 'attrib_regex': dict}
keys_expected = set(expected.keys())
keys_got = set(identifier.keys())
unexpected_keys = keys_got.difference(keys_expected)
if not keys_got:
raise KeyError('Expected either tag/tag_regex and/or attrib_regex')
if unexpected_keys:
raise KeyError('Unexpected keys found:' + str(unexpected_keys))
incompatible = set(('tag', 'tag_regex',))
if incompatible.issubset(keys_got):
raise KeyError('Cannot specify both tag and tag_regex matching')
for key in keys_got:
val_type = expected[key]
value = identifier[key]
if not value or not isinstance(value, val_type):
raise ValueError(
str(key) + ' should be non-empty and have value of type ' +
str(val_type)
)
class ChildSubsetSimplified(ChildSetupVerify):
"""Provide simplified access to specific child elements through
regex matching of descriptors such as tag, attributes, or a
combination thereof. For example, if you want to simply match a tag
(or tags), pass in a regular expression string that will fully match
the desired tag(s). e.g. 'node|cloud'
# matches any If you want to match a set of attributes, pass in a
dictionary containing regexes to fully match the key(s) and value(s)
of the element's attributes. For example:
{'TEXT':'.*'} matches any element with a 'TEXT' attribute.
{'.*': '.*flag.*'} matches any element with 'flag' in its value.
{'COLOR': '.*'} matches anything with a 'COLOR' attribute.
You can include any number of tag and attribute regexes, each
separated by a comma. All descriptors will have to fully match in
order for an element to qualify as part of this subset. Most useful
for allowing access to child nodes. Provide access to slicing,
removal, appending
:param element: the linked element whose children will be available
through ElementAccessor
:param descriptor: the list of specific
descriptor of elements to group and provide access to.
"""
def __init__(self, elementInstance, **identifier):
self._verify_identifier_args(identifier)
self.TAG = identifier.get('tag', None)
self.TAG_REGEX = identifier.get('tag_regex', None)
self.ATTRIB_REGEX = identifier.get('attrib_regex', {})
self.parent = elementInstance
@classmethod
def setup(cls, **identifier):
"""Return getter and setter methods for self, such that returned
functions can be used in defining a property of an element
"""
self = cls(None, **identifier)
def getter(parent):
self.parent = parent
return self
def setter(parent, iterable):
self.parent = parent
self[:] = iterable
return getter, setter
def append(self, element):
self.parent.children.append(element)
def remove(self, element):
self.parent.children.remove(element)
def __len__(self):
return len(self[:])
def __getitem__(self, index):
if isinstance(index, int): # speed shortcut
for i, elem in enumerate(self):
if i == index:
return elem
raise IndexError('list index out of range')
elements = [e for e in self]
return elements[index]
def __iter__(self):
"""Iterate through _parent's children, yielding children when
they match tag/tag_regex and/or attrib_regex
"""
for elem in self.parent.children:
if self._element_matches(elem):
yield elem
def _element_matches(self, elem):
"""return true if element matches all identifier criteria,
which can include tag, tag_regex, and attrib_regex
"""
matches = lambda x, y, rx, ry: \
re.fullmatch(rx, x) and re.fullmatch(ry, y)
if self.TAG:
if self.TAG != elem.tag:
return False
if self.TAG_REGEX:
if not re.fullmatch(self.TAG_REGEX, elem.tag):
return False
for regK, regV in self.ATTRIB_REGEX.items():
matching_attrib = [
key for key, val in elem.attrib.items() \
if matches(key, val, regK, regV)
]
if not matching_attrib:
return False
return True
def __setitem__(self, index, elem):
"""remove element(s), then re-appends after modification.
Sloppy, but it works, and elements are reordered later anyways.
What really matters is that the order of elements of the same
tag are not altered. Note that this is very inefficient because
the list is reconstructed each time a set-operation is applied
"""
if isinstance(index, int):
e = self[index]
i = self.parent.children.index(e)
self.parent.children[i] = elem
return
subchildren = list(self)
for element in subchildren:
self.parent.children.remove(element)
subchildren[index] = elem
for element in subchildren:
self.parent.children.append(element)
def __delitem__(self, index):
if isinstance(index, int):
element = self[index]
i = self.parent.children.index(element)
del self.parent.children[i]
elif isinstance(index, slice):
indices = []
for element in self[index]:
i = self.parent.children.index(element)
indices.append(i)
indices.sort()
# delete indices from largest index to smallest
for i in reversed(indices):
del self.parent.children[i]
class ChildSubsetCompare:
"""implement methods for comparing lists"""
def _assert_other_is_comparable(self, other):
if isinstance(other, ChildSubsetSimplified) or isinstance(other, list):
return
raise TypeError(
'cannot compare: ' + str(type(self)) + str(type(other))
)
def __lt__(self, other):
self._assert_other_is_comparable(other)
return list(self) < list(other)
def __gt__(self, other):
self._assert_other_is_comparable(other)
return list(self) > list(other)
def __le__(self, other):
self._assert_other_is_comparable(other)
return list(self) <= list(other)
def __ge__(self, other):
self._assert_other_is_comparable(other)
return list(self) >= list(other)
def __eq__(self, other):
self._assert_other_is_comparable(other)
return list(self) == list(other)
def __ne__(self, other):
self._assert_other_is_comparable(other)
return list(self) != list(other)
class ChildSubset(ChildSubsetSimplified, ChildSubsetCompare):
"""Provide access to specific elements within an element through
matching of descriptor. Most useful for allowing access to child
nodes. Provide access with indexing, slicing, removal, appending,
etc.
:param element: the linked element whose children will be available
through ElementAccessor
:param descriptor: the list of specific descriptor of elements to
group and provide access to.
"""
def pop(self, index=-1):
"""Remove and return element in children list"""
children = list(self)
elem = children.pop(index)
self.parent.children.remove(elem)
return elem
def extend(self, elements):
self.parent.children.extend(elements)
class SingleChild(ChildSetupVerify):
"""Provide access to a single child within an element's children.
It does not directly store the child, but rather provides functions
for getting, setting, and deleting the specified child from a
parent element's children attribute. This is meant to be
instantiated as a class property. Pass the setup fxn a tag_regex or
attrib_regex in the same fashion as specifying a ChildSubset, and
pass the returned values to property(). You can look at an example
in Node.cloud.
"""
@classmethod
def setup(cls, **identifier):
cls._verify_identifier_args(identifier)
def getter(parent):
return parent.find(**identifier)
def deleter(parent):
deleteable = parent.find(**identifier)
if deleteable is not None:
parent.children.remove(deleteable)
def setter(parent, child):
"""replace or remove child. If child passed is None, will
delete first matching child. Otherwise will replace
existing child with passed child or append to end of
children
"""
if child is None:
deleter(parent)
return
replaceable = parent.find(**identifier)
if replaceable is None:
parent.children.append(child)
return
i = parent.children.index(replaceable)
parent.children[i] = child
return getter, setter, deleter
class SingleAttrib:
"""property-instantiated class to provide get/set/del access for a
single attrib value within an element. For example, Node provides a
.text property which accesses its .attrib['TEXT']. If del node.text
were called, it would replace the attrib value an empty string: ''.
In this example, attrib_name = 'TEXT', and default_value = ''
Init this within a class as a property like:
text = property(*SingleAttrib(attrib_name, default_value))
"""
@staticmethod
def setup(attrib_name, default_value):
def getter(element):
return element.attrib.get(attrib_name, default_value)
def setter(element, value):
element.attrib[attrib_name] = value
def deleter(element):
element.attrib[attrib_name] = default_value
return getter, setter, deleter
class Link:
"""link for a node. Sets and gets attrib['LINK'] for attached node.
If user links a node, set attrib['LINK'] = node.attrib['ID']
"""
@staticmethod
def setup(ElementClass):
def getter(parent):
return parent.attrib.get('LINK')
def setter(parent, url):
if isinstance(url, ElementClass):
url = url.attrib.get('ID')
parent.attrib['LINK'] = url
def deleter(parent):
parent.attrib['LINK'] = None
del parent.attrib['LINK']
return getter, setter, deleter
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import random
import numpy as np
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.utils.annotations import DeveloperAPI
@DeveloperAPI
class MultiAgentEpisode(object):
"""Tracks the current state of a (possibly multi-agent) episode.
Attributes:
new_batch_builder (func): Create a new MultiAgentSampleBatchBuilder.
add_extra_batch (func): Return a built MultiAgentBatch to the sampler.
batch_builder (obj): Batch builder for the current episode.
total_reward (float): Summed reward across all agents in this episode.
length (int): Length of this episode.
episode_id (int): Unique id identifying this trajectory.
agent_rewards (dict): Summed rewards broken down by agent.
custom_metrics (dict): Dict where the you can add custom metrics.
user_data (dict): Dict that you can use for temporary storage.
Use case 1: Model-based rollouts in multi-agent:
A custom compute_actions() function in a policy can inspect the
current episode state and perform a number of rollouts based on the
policies and state of other agents in the environment.
Use case 2: Returning extra rollouts data.
The model rollouts can be returned back to the sampler by calling:
>>> batch = episode.new_batch_builder()
>>> for each transition:
batch.add_values(...) # see sampler for usage
>>> episode.extra_batches.add(batch.build_and_reset())
"""
def __init__(self, policies, policy_mapping_fn, batch_builder_factory,
extra_batch_callback):
self.new_batch_builder = batch_builder_factory
self.add_extra_batch = extra_batch_callback
self.batch_builder = batch_builder_factory()
self.total_reward = 0.0
self.length = 0
self.episode_id = random.randrange(2e9)
self.agent_rewards = defaultdict(float)
self.custom_metrics = {}
self.user_data = {}
self._policies = policies
self._policy_mapping_fn = policy_mapping_fn
self._next_agent_index = 0
self._agent_to_index = {}
self._agent_to_policy = {}
self._agent_to_rnn_state = {}
self._agent_to_last_obs = {}
self._agent_to_last_raw_obs = {}
self._agent_to_last_info = {}
self._agent_to_last_action = {}
self._agent_to_last_pi_info = {}
self._agent_to_prev_action = {}
self._agent_reward_history = defaultdict(list)
@DeveloperAPI
def soft_reset(self):
"""Clears rewards and metrics, but retains RNN and other state.
This is used to carry state across multiple logical episodes in the
same env (i.e., if `soft_horizon` is set).
"""
self.length = 0
self.episode_id = random.randrange(2e9)
self.total_reward = 0.0
self.agent_rewards = defaultdict(float)
self._agent_reward_history = defaultdict(list)
@DeveloperAPI
def policy_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the policy for the specified agent.
If the agent is new, the policy mapping fn will be called to bind the
agent to a policy for the duration of the episode.
"""
if agent_id not in self._agent_to_policy:
self._agent_to_policy[agent_id] = self._policy_mapping_fn(agent_id)
return self._agent_to_policy[agent_id]
@DeveloperAPI
def last_observation_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last observation for the specified agent."""
return self._agent_to_last_obs.get(agent_id)
@DeveloperAPI
def last_raw_obs_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last un-preprocessed obs for the specified agent."""
return self._agent_to_last_raw_obs.get(agent_id)
@DeveloperAPI
def last_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info for the specified agent."""
return self._agent_to_last_info.get(agent_id)
@DeveloperAPI
def last_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last action for the specified agent, or zeros."""
if agent_id in self._agent_to_last_action:
return _flatten_action(self._agent_to_last_action[agent_id])
else:
policy = self._policies[self.policy_for(agent_id)]
flat = _flatten_action(policy.action_space.sample())
return np.zeros_like(flat)
@DeveloperAPI
def prev_action_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous action for the specified agent."""
if agent_id in self._agent_to_prev_action:
return _flatten_action(self._agent_to_prev_action[agent_id])
else:
# We're at t=0, so return all zeros.
return np.zeros_like(self.last_action_for(agent_id))
@DeveloperAPI
def prev_reward_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the previous reward for the specified agent."""
history = self._agent_reward_history[agent_id]
if len(history) >= 2:
return history[-2]
else:
# We're at t=0, so there is no previous reward, just return zero.
return 0.0
@DeveloperAPI
def rnn_state_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last RNN state for the specified agent."""
if agent_id not in self._agent_to_rnn_state:
policy = self._policies[self.policy_for(agent_id)]
self._agent_to_rnn_state[agent_id] = policy.get_initial_state()
return self._agent_to_rnn_state[agent_id]
@DeveloperAPI
def last_pi_info_for(self, agent_id=_DUMMY_AGENT_ID):
"""Returns the last info object for the specified agent."""
return self._agent_to_last_pi_info[agent_id]
def _add_agent_rewards(self, reward_dict):
for agent_id, reward in reward_dict.items():
if reward is not None:
self.agent_rewards[agent_id,
self.policy_for(agent_id)] += reward
self.total_reward += reward
self._agent_reward_history[agent_id].append(reward)
def _set_rnn_state(self, agent_id, rnn_state):
self._agent_to_rnn_state[agent_id] = rnn_state
def _set_last_observation(self, agent_id, obs):
self._agent_to_last_obs[agent_id] = obs
def _set_last_raw_obs(self, agent_id, obs):
self._agent_to_last_raw_obs[agent_id] = obs
def _set_last_info(self, agent_id, info):
self._agent_to_last_info[agent_id] = info
def _set_last_action(self, agent_id, action):
if agent_id in self._agent_to_last_action:
self._agent_to_prev_action[agent_id] = \
self._agent_to_last_action[agent_id]
self._agent_to_last_action[agent_id] = action
def _set_last_pi_info(self, agent_id, pi_info):
self._agent_to_last_pi_info[agent_id] = pi_info
def _agent_index(self, agent_id):
if agent_id not in self._agent_to_index:
self._agent_to_index[agent_id] = self._next_agent_index
self._next_agent_index += 1
return self._agent_to_index[agent_id]
def _flatten_action(action):
# Concatenate tuple actions
if isinstance(action, list) or isinstance(action, tuple):
expanded = []
for a in action:
if not hasattr(a, "shape") or len(a.shape) == 0:
expanded.append(np.expand_dims(a, 1))
else:
expanded.append(a)
action = np.concatenate(expanded, axis=0).flatten()
return action
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class WARN_DEPRECATED(Exception):
def __init__(self, msg):
self.msg = msg
print(self.msg)
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def warn(self, *args, **kwargs):
raise WARN_DEPRECATED("Deprecated Method warn use warning instead")
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
import warnings
import django
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
from openstack_dashboard.static_settings import find_static_files # noqa
from openstack_dashboard.static_settings import get_staticfiles_dirs # noqa
warnings.formatwarning = lambda message, category, *args, **kwargs: \
'%s: %s' % (category.__name__, message)
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
BIN_DIR = os.path.abspath(os.path.join(ROOT_PATH, '..', 'bin'))
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_BRANDING = 'OpenStack Dashboard'
WEBROOT = '/'
LOGIN_URL = None
LOGOUT_URL = None
LOGIN_REDIRECT_URL = None
STATIC_ROOT = None
STATIC_URL = None
ROOT_URLCONF = 'openstack_dashboard.urls'
HORIZON_CONFIG = {
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'angular_modules': [],
'js_files': [],
'js_spec_files': [],
'external_templates': [],
}
# Set to True to allow users to upload images to glance via Horizon server.
# When enabled, a file form field will appear on the create image form.
# See documentation for deployment considerations.
HORIZON_IMAGES_ALLOW_UPLOAD = True
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
OPENSTACK_IMAGE_BACKEND = {
'image_formats': [
('', _('Select format')),
('aki', _('AKI - Amazon Kernel Image')),
('ami', _('AMI - Amazon Machine Image')),
('ari', _('ARI - Amazon Ramdisk Image')),
('docker', _('Docker')),
('iso', _('ISO - Optical Disk Image')),
('ova', _('OVA - Open Virtual Appliance')),
('qcow2', _('QCOW2 - QEMU Emulator')),
('raw', _('Raw')),
('vdi', _('VDI - Virtual Disk Image')),
('vhd', _('VHD - Virtual Hard Disk')),
('vmdk', _('VMDK - Virtual Machine Disk')),
]
}
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if django.VERSION >= (1, 8, 0):
MIDDLEWARE_CLASSES += (
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',)
else:
MIDDLEWARE_CLASSES += ('django.middleware.doc.XViewMiddleware',)
MIDDLEWARE_CLASSES += (
'horizon.middleware.HorizonMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'horizon.context_processors.horizon',
'openstack_dashboard.context_processors.openstack',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_PRECOMPILERS = (
('text/scss', 'django_pyscss.compressor.DjangoScssFilter'),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
)
COMPRESS_ENABLED = True
COMPRESS_OUTPUT_DIR = 'dashboard'
COMPRESS_CSS_HASHING_METHOD = 'hash'
COMPRESS_PARSER = 'compressor.parser.HtmlParser'
INSTALLED_APPS = [
'openstack_dashboard',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django_pyscss',
'openstack_dashboard.django_pyscss_fix',
'compressor',
'horizon',
'openstack_auth',
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
AUTHENTICATION_BACKENDS = ('openstack_auth.backend.KeystoneBackend',)
AUTHENTICATION_URLS = ['openstack_auth.urls']
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies'
SESSION_COOKIE_HTTPONLY = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_SECURE = False
SESSION_TIMEOUT = 1800
# A token can be near the end of validity when a page starts loading, and
# invalid during the rendering which can cause errors when a page load.
# TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token
# validity to avoid this issue. You can adjust this time depending on the
# performance of the infrastructure.
TOKEN_TIMEOUT_MARGIN = 10
# When using cookie-based sessions, log error when the session cookie exceeds
# the following size (common browsers drop cookies above a certain size):
SESSION_COOKIE_MAX_SIZE = 4093
# when doing upgrades, it may be wise to stick to PickleSerializer
# NOTE(berendt): Check during the K-cycle if this variable can be removed.
# https://bugs.launchpad.net/horizon/+bug/1349463
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
LANGUAGES = (
('de', 'German'),
('en', 'English'),
('en-au', 'Australian English'),
('en-gb', 'British English'),
('es', 'Spanish'),
('fr', 'French'),
('hi', 'Hindi'),
('ja', 'Japanese'),
('ko', 'Korean (Korea)'),
('nl', 'Dutch (Netherlands)'),
('pl', 'Polish'),
('pt-br', 'Portuguese (Brazil)'),
('ru', 'Russian'),
('sr', 'Serbian'),
('zh-cn', 'Simplified Chinese'),
('zh-tw', 'Chinese (Taiwan)'),
)
LANGUAGE_CODE = 'en'
LANGUAGE_COOKIE_NAME = 'horizon_language'
USE_I18N = True
USE_L10N = True
USE_TZ = True
OPENSTACK_KEYSTONE_DEFAULT_ROLE = '_member_'
DEFAULT_EXCEPTION_REPORTER_FILTER = 'horizon.exceptions.HorizonReporterFilter'
POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
POLICY_FILES = {
'identity': 'keystone_policy.json',
'compute': 'nova_policy.json',
'volume': 'cinder_policy.json',
'image': 'glance_policy.json',
'orchestration': 'heat_policy.json',
'network': 'neutron_policy.json',
'telemetry': 'ceilometer_policy.json',
}
SECRET_KEY = None
LOCAL_PATH = None
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
}
ADD_INSTALLED_APPS = []
# STATIC directory for custom theme, set as default.
# It can be overridden in local_settings.py
CUSTOM_THEME_PATH = 'static/themes/default'
try:
from local.local_settings import * # noqa
except ImportError:
logging.warning("No local_settings file found.")
if not WEBROOT.endswith('/'):
WEBROOT += '/'
if LOGIN_URL is None:
LOGIN_URL = WEBROOT + 'auth/login/'
if LOGOUT_URL is None:
LOGOUT_URL = WEBROOT + 'auth/logout/'
if LOGIN_REDIRECT_URL is None:
LOGIN_REDIRECT_URL = WEBROOT
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = WEBROOT + 'media/'
if STATIC_ROOT is None:
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
if STATIC_URL is None:
STATIC_URL = WEBROOT + 'static/'
STATICFILES_DIRS = get_staticfiles_dirs(STATIC_URL)
CUSTOM_THEME = os.path.join(ROOT_PATH, CUSTOM_THEME_PATH)
# If a custom template directory exists within our custom theme, then prepend
# it to our first-come, first-serve TEMPLATE_DIRS
if os.path.exists(os.path.join(CUSTOM_THEME, 'templates')):
TEMPLATE_DIRS = \
(os.path.join(CUSTOM_THEME_PATH, 'templates'),) + TEMPLATE_DIRS
# Only expose the subdirectory 'static' if it exists from a custom theme,
# allowing other logic to live with a theme that we might not want to expose
# statically
if os.path.exists(os.path.join(CUSTOM_THEME, 'static')):
CUSTOM_THEME = os.path.join(CUSTOM_THEME, 'static')
STATICFILES_DIRS.append(
('custom', CUSTOM_THEME),
)
# populate HORIZON_CONFIG with auto-discovered JavaScript sources, mock files,
# specs files and external templates.
find_static_files(ROOT_PATH, HORIZON_CONFIG)
# Load the pluggable dashboard settings
import openstack_dashboard.enabled
import openstack_dashboard.local.enabled
from openstack_dashboard.utils import settings
INSTALLED_APPS = list(INSTALLED_APPS) # Make sure it's mutable
settings.update_dashboards(
[
openstack_dashboard.enabled,
openstack_dashboard.local.enabled,
],
HORIZON_CONFIG,
INSTALLED_APPS,
)
INSTALLED_APPS[0:0] = ADD_INSTALLED_APPS
# Ensure that we always have a SECRET_KEY set, even when no local_settings.py
# file is present. See local_settings.py.example for full documentation on the
# horizon.utils.secret_key module and its use.
if not SECRET_KEY:
if not LOCAL_PATH:
LOCAL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'local')
from horizon.utils import secret_key
SECRET_KEY = secret_key.generate_or_read_from_file(os.path.join(LOCAL_PATH,
'.secret_key_store'))
from openstack_dashboard import policy_backend
POLICY_CHECK_FUNCTION = policy_backend.check
# Add HORIZON_CONFIG to the context information for offline compression
COMPRESS_OFFLINE_CONTEXT = {
'WEBROOT': WEBROOT,
'STATIC_URL': STATIC_URL,
'HORIZON_CONFIG': HORIZON_CONFIG,
}
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
# during django reloads and an active user is logged in, the monkey
# patch below will not otherwise be applied in time - resulting in developers
# appearing to be logged out. In typical production deployments this section
# below may be omitted, though it should not be harmful
from openstack_auth import utils as auth_utils
auth_utils.patch_middleware_get_user()
| |
import json
import logging
import math
from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPNotFound
from pyramid.response import Response
from pyramid.view import view_config
from recaptcha.client import captcha
from community_csdt.src.models.register import *
class RegisterView(object):
def __init__(self, context, request):
self.context = context
self.request = request
# Only users who are not logged in can access the following web page
def nonUsersOnlyAuthorization(self, session):
log = logging.getLogger('csdt')
log.info("register_view.nonUsersOnlyAuthorization()")
# Verifies that only the owner of the web page can perform the following task (Authorization)
if "user_id" in session:
log.warning("user is logged in")
raise HTTPForbidden()
return
# Only users who are owners of the particular web page can access it
def ownerOnlyAuthorization(self, session, user_id):
log = logging.getLogger('csdt')
log.info("register_view.ownerOnlyAuthorization()")
# Verifies that only the owner of the web page can perform the following task (Authorization)
if "user_id" not in session or "username" not in session or session["user_id"] != user_id:
log.warning("user_id is not in session or username is not in session or user_id is not the same as user_id in session")
raise HTTPForbidden()
return
# Only public users can access the particular web page
def publicUserOnlyAuthorization(self, session):
log = logging.getLogger('csdt')
log.info("register_view.publicUserOnlyAuthorization()")
# Verifies that only public users can access page
if "user_id" not in session or "username" not in session or "permissions" not in session or session["permissions"] == "s":
log.warning("user_id is not in session or user is a student")
raise HTTPForbidden()
return
# Verifies that the class_id is an actual class
def verifyClassExistance(self, class_id):
log = logging.getLogger('csdt')
log.info("register_view.verifyClassExistance()")
class_owner = self.context.getClassOwner(class_id)
if class_owner is None:
log.warning("class does not exist")
raise HTTPNotFound()
return class_owner
# Resource url = "/register" - Shows the register page, where the user can register as a public or as a student user
@view_config(context='community_csdt.src.models.register.register.Register', name='', renderer='register.options.mako')
def getRegister(self):
log = logging.getLogger('csdt')
log.info("register_view.getRegister()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
self.nonUsersOnlyAuthorization(session)
return {'session':session}
# Resource url = "/register/accounts/public" - Shows the first part of the public registration portion
@view_config(context='community_csdt.src.models.register.register_public.RegisterPublic', name='', renderer='public.registration.part.1.mako')
def getPublicRegistration(self):
log = logging.getLogger('csdt')
log.info("register_view.getPublicRegistration()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
self.nonUsersOnlyAuthorization(session)
return {'session':session}
# Resource url = "/register/accounts/public/forms" - Verifies that initial user registration conforms to standards. Sends user an email if success.
@view_config(context='community_csdt.src.models.register.register_public.RegisterPublic', name='forms', renderer='json', xhr=True)
def processPublicRegistration(self):
log = logging.getLogger('csdt')
log.debug("register_view.processPublicRegistration()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "first_name" not in self.request.params or "last_name" not in self.request.params or "email" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
host = self.request.host
recaptcha_private_key = '6Ldi2MYSAAAAALJ_KaLfTzOTAg5iNHqOmvmgaQOg'
captcha_result = captcha.submit(
self.request.params['recaptcha_challenge_field'],
self.request.params['recaptcha_response_field'],
recaptcha_private_key,
host,
)
if not captcha_result.is_valid:
error = captcha_result.error_code
json_hash = {'result': '-1', 'error':error}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
first_name = self.request.params['first_name']
last_name = self.request.params['last_name']
email = self.request.params['email']
user_id = self.context.doesEmailExist(email)
if user_id is not None:
json_hash = {'result': '-2'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
plaintext = first_name + ";" + last_name + ";" + email
ciphertext = self.context.encryptUrlQuery(unicode(plaintext))
self.context.createConfirmationLetter(self.request.host_url, email, first_name, last_name, ciphertext)
json_hash = {'result': '0'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/accounts/public/new" - Shows the public registration page
@view_config(context='community_csdt.src.models.register.register_public.RegisterPublic', name='new', renderer='public.registration.part.2.mako')
def getRegisterPublicUser(self):
log = logging.getLogger('csdt')
log.info("register_view.getRegisterPublicUser()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "value" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
ciphertext = self.request.params["value"]
log.debug("ciphertext = %s" % ciphertext)
plaintext = self.context.decryptUrlQuery(ciphertext)
log.debug("plaintext = %s" % plaintext)
if plaintext == "":
log.warning("decryption function has raised an exception")
msg = "There seems to be an error with the registration link. This can be due to your url being tampered with or because the link has expired. If the problem persists, please re-register for a new account."
raise HTTPNotFound(body_template=msg)
plain_list = plaintext.split(";")
if len(plain_list) != 4:
log.warning("plaintext has been tampered with")
msg = "There seems to be an error with the registration link. It seems that the link has been tampered with."
raise HTTPNotFound(body_template=msg)
first_name = plain_list[1]
last_name = plain_list[2]
email = plain_list[3]
user_id = self.context.doesEmailExist(email)
if user_id is not None:
log.warning("email already exists")
msg = "The registration link has expired. Please re-register for a new account."
raise HTTPNotFound(body_template=msg)
return {'full_url':self.request.url, 'email':email, 'first_name':first_name, 'last_name':last_name, 'session':session}
# Resource url = "/register/accounts/public/new-forms" - Attempts to create a new user account.
@view_config(context='community_csdt.src.models.register.register_public.RegisterPublic', name='new-forms', renderer='json', xhr=True)
def processRegisterPublicUser(self):
log = logging.getLogger('csdt')
log.info("register_view.processRegisterPublicUser()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "first_name" not in self.request.params or "last_name" not in self.request.params or "username" not in self.request.params or "password" not in self.request.params or "re_password" not in self.request.params or "email" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
host = self.request.host
recaptcha_private_key = '6Ldi2MYSAAAAALJ_KaLfTzOTAg5iNHqOmvmgaQOg'
captcha_result = captcha.submit(
self.request.params['recaptcha_challenge_field'],
self.request.params['recaptcha_response_field'],
recaptcha_private_key,
host,
)
if not captcha_result.is_valid:
error = captcha_result.error_code
json_hash = {'result': '-1', 'error':error}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
first_name = self.request.params['first_name']
last_name = self.request.params['last_name']
username = self.request.params['username']
password = self.request.params['password']
re_password = self.request.params['re_password']
email = self.request.params['email']
if password != re_password:
json_hash = {'result': '-2'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_id = self.context.doesUsernameExist(username)
if user_id is not None:
json_hash = {'result': '-3'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_id = self.context.doesEmailExist(email)
if user_id is not None:
json_hash = {'result': '-4'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_id = self.context.createPublicAccount(first_name, last_name, username, password, email)
if user_id is None:
json_hash = {'result': '-1'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_info = self.context.getUserProfileInformation(user_id)
session['user_id'] = str(user_id)
session['first_name'] = user_info["first_name"]
session['last_name'] = user_info["last_name"]
session['email'] = user_info["email"]
session['permissions'] = user_info["permissions"]
session['username'] = user_info["username"]
json_hash = {'result': '0', 'username': username}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/accounts/check-username?username=" - Checks if username exists in database
@view_config(context='community_csdt.src.models.register.register_account.RegisterAccount', name='check-username', renderer='json', xhr=True)
def checkUsername(self):
log = logging.getLogger('csdt')
log.info("register_view.checkUsername()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "username" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
username = self.request.params['username']
user_id = self.context.doesUsernameExist(username)
if user_id is not None:
json_hash = {'result': '-1'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
json_hash = {'result': '0'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/classes/all" - Shows a table of all visible and active classes for registration
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='all', renderer='registable.classes.mako')
def getAllRegistableClasses(self):
log = logging.getLogger('csdt')
log.info("register_view.getAllRegistableClasses()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
return {'session':session}
# Resource url = "/register/classes/all-tables" - Returns all visible and active classes for registration
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='all-tables', renderer='json', xhr=True)
def getAllRegistableClassesTable(self):
log = logging.getLogger('csdt')
log.info("register_view.getAllRegistableClassesTable()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
# JqGrid initialization of the table
if "rows" not in self.request.params or "sidx" not in self.request.params or "sord" not in self.request.params or "page" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
rows = int(self.request.params['rows'])
log.debug("rows = %d" % rows)
num_records = int(self.context.getNumOfRegistableClasses())
log.debug("num_records = %d" % num_records)
total_pages = int(math.ceil((num_records / float(rows))))
log.debug("total - number of pages = %d" % total_pages)
sort_name = self.request.params['sidx']
sort_order = self.request.params['sord']
page_loc = int(self.request.params['page'])
begin_index = (page_loc - 1) * rows
end_index = begin_index + rows
log.debug("begin index value = %d" % begin_index)
log.debug("end index value = %d" % end_index)
if end_index > num_records:
end_index = num_records
class_list = self.context.getAllRegistableClasses(sort_name, sort_order)
class_list = class_list[begin_index:end_index]
class_list_hash = {}
class_list_hash['page'] = page_loc
class_list_hash['total'] = total_pages
class_list_hash['records'] = num_records
class_list_hash['results'] = class_list
for k, v in class_list_hash.iteritems():
log.debug("key = %s value = %s" % (k, v))
json_dump = json.dumps(class_list_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/classes/all-registerable-tables?user_id=" - Returns all visible and active classes for registration by a user
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='all-registerable-tables', renderer='json', xhr=True)
def getOwnRegistableClassesTable(self):
log = logging.getLogger('csdt')
log.info("register_view.getOwnRegistableClassesTable()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "user_id" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
user_id = self.request.params['user_id']
self.ownerOnlyAuthorization(session, user_id)
# JqGrid initialization of the table
if "rows" not in self.request.params or "sidx" not in self.request.params or "sord" not in self.request.params or "page" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
rows = int(self.request.params['rows'])
log.debug("rows = %d" % rows)
num_records = int(self.context.getNumOfRegistableClasses())
log.debug("num_records = %d" % num_records)
total_pages = int(math.ceil((num_records / float(rows))))
log.debug("total - number of pages = %d" % total_pages)
sort_name = self.request.params['sidx']
sort_order = self.request.params['sord']
page_loc = int(self.request.params['page'])
begin_index = (page_loc - 1) * rows
end_index = begin_index + rows
log.debug("begin index value = %d" % begin_index)
log.debug("end index value = %d" % end_index)
if end_index > num_records:
end_index = num_records
class_list = self.context.getAllRegistableClassesForAUser(user_id, sort_name, sort_order)
class_list = class_list[begin_index:end_index]
class_list_hash = {}
class_list_hash['page'] = page_loc
class_list_hash['total'] = total_pages
class_list_hash['records'] = num_records
class_list_hash['results'] = class_list
for k, v in class_list_hash.iteritems():
log.debug("key = %s value = %s" % (k, v))
json_dump = json.dumps(class_list_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/classes/sign-up?class_id=" - Shows the sign-up page to register for a particular class
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='sign-up', renderer='class.signup.mako')
def getRegisterClassSigup(self):
log = logging.getLogger('csdt')
log.info("register_view.getRegisterClassSigup()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "class_id" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
class_id = self.request.params['class_id']
class_owner = self.verifyClassExistance(class_id)
# Check if user has already registered for the particular class
if "user_id" in session and "username" in session:
result = self.context.isUserApartOfClass(session["user_id"], class_id)
if result is not None:
log.warning("user is not apart of the particular class")
raise HTTPNotFound()
class_info = self.context.getClassDescription(class_id)
if class_info is None:
log.warning("class does not exist")
raise HTTPNotFound()
return {'class_id':class_id, 'classname':class_info["classname"], 'full_url':self.request.url, 'owner':class_owner, 'path_url':self.request.path_url, 'session':session}
# Resource url = "/register/classes/sign-up-forms?class_id=" - Checks if the password submitted by the user matches that of the class's
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='sign-up-forms', renderer='json', xhr=True)
def processRegisterClassSignup(self):
log = logging.getLogger('csdt')
log.info("register_view.processRegisterClassSignup()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "class_id" not in self.request.params or "password" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
class_id = self.request.params['class_id']
class_owner = self.verifyClassExistance(class_id)
# Check if user has already registered for the particular class
if "user_id" in session and "username" in session:
result = self.context.isUserApartOfClass(session["user_id"], class_id)
if result is not None:
log.warning("user is not apart of the particular class")
raise HTTPNotFound()
# Verifies that the given password matches the classroom's password
password = self.request.params["password"]
result = self.context.verifyClassPassword(class_id, password)
if result is None:
json_hash = {'result': '-1'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Registers the user into the new class and adds the newly registered class_id into the session variable
if "user_id" in session and "username" in session:
self.context.registerClass(session["user_id"], class_id)
class_info = self.context.getClassDescription(class_id)
if class_info is None:
log.warning("class does not exist")
raise HTTPNotFound()
if "student_classes" in session:
student_hash = session["student_classes"]
student_hash[class_id] = class_info["classname"]
session["student_classes"] = student_hash
else:
student_hash = {}
student_hash[class_id] = class_info["classname"]
session["student_classes"] = student_hash
log.debug("session student_classes = %s" % session["student_classes"])
else:
# Verifies that unregistered user has passed the classroom password check
session["registered_class_id"] = str(class_id)
log.debug("session[%s] = %s" % ("registered_class_id", session["registered_class_id"]))
json_hash = {'result': '0'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/accounts/student/new?class_id=" - Shows the student registration page
@view_config(context='community_csdt.src.models.register.register_student.RegisterStudent', name='new', renderer='student.registration.mako')
def getStudentRegistration(self):
log = logging.getLogger('csdt')
log.info("register_view.getStudentRegistration()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "class_id" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
class_id = self.request.params['class_id']
class_owner = self.verifyClassExistance(class_id)
# Verifies that unregistered user has passed the classroom password check
if "registered_class_id" not in session or int(class_id) != int(session["registered_class_id"]):
log.warning("user has not passed classroom password verification")
raise HTTPNotFound()
class_info = self.context.getClassDescription(class_id)
if class_info is None:
log.warning("class does not exist")
raise HTTPNotFound()
return {'class_id':class_id, 'classname':class_info["classname"], 'full_url':self.request.url, 'owner':class_owner, 'path_url':self.request.path_url, 'session':session}
# Resource url = "/register/accounts/student/new-forms?class_id=" - Attempts to create a new user student account. Returns 0 if success.
@view_config(context='community_csdt.src.models.register.register_student.RegisterStudent', name='new-forms', renderer='json', xhr=True)
def processStudentRegistration(self):
log = logging.getLogger('csdt')
log.info("register_view.processStudentRegistration()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "class_id" not in self.request.params or "first_name" not in self.request.params or "last_name" not in self.request.params or "username" not in self.request.params or "password" not in self.request.params or "re_password" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.nonUsersOnlyAuthorization(session)
class_id = self.request.params['class_id']
class_owner = self.verifyClassExistance(class_id)
host = self.request.host
recaptcha_private_key = '6Ldi2MYSAAAAALJ_KaLfTzOTAg5iNHqOmvmgaQOg'
captcha_result = captcha.submit(
self.request.params['recaptcha_challenge_field'],
self.request.params['recaptcha_response_field'],
recaptcha_private_key,
host,
)
if not captcha_result.is_valid:
error = captcha_result.error_code
json_hash = {'result': '-1', 'error':error}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
first_name = self.request.params['first_name']
last_name = self.request.params['last_name']
username = self.request.params['username']
password = self.request.params['password']
re_password = self.request.params['re_password']
if password != re_password:
json_hash = {'result': '-2'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_id = self.context.doesUsernameExist(username)
if user_id is not None:
json_hash = {'result': '-3'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_id = self.context.createStudentAccount(first_name, last_name, username, password, class_id)
if user_id is None:
json_hash = {'result': '-1'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
user_info = self.context.getUserProfileInformation(user_id)
session['user_id'] = str(user_id)
session['first_name'] = user_info["first_name"]
session['last_name'] = user_info["last_name"]
session['email'] = user_info["email"]
session['permissions'] = user_info["permissions"]
session['username'] = user_info["username"]
class_info = self.context.getClassDescription(class_id)
if class_info is None:
log.warning("class does not exist")
raise HTTPNotFound()
student_hash = {}
student_hash[class_id] = class_info["classname"]
session["student_classes"] = student_hash
json_hash = {'result': '0', 'username': username}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/classes/new" - Shows a registration form for the creation of a new class
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='new', renderer='create.class.mako')
def createClass(self):
log = logging.getLogger('csdt')
log.info("register_view.createClass()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
self.publicUserOnlyAuthorization(session)
return {'full_url':self.request.url, 'session':session}
# Resource url = "/register/classes/check-classname" - Checks if classname exists in database
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='check-classname', renderer='json', xhr=True)
def checkClassname(self):
log = logging.getLogger('csdt')
log.info("register_view.checkClassname()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "classname" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.publicUserOnlyAuthorization(session)
classname = self.request.params['classname']
result = self.context.doesClassnameExist(classname)
if result is not None:
json_hash = {'result': '-1'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
json_hash = {'result': '0'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
# Resource url = "/register/classes/new-forms" - Attempts to create a new class. Returns 0 if success.
@view_config(context='community_csdt.src.models.register.register_class.RegisterClass', name='new-forms', renderer='json', xhr=True)
def processCreateClassroom(self):
log = logging.getLogger('csdt')
log.info("register_view.processCreateClassroom()")
log.debug("context = %s" % self.context)
log.debug("request = %s" % self.request)
session = self.request.session
log.debug("request values:")
for k, v in self.request.params.iteritems():
log.debug("key = %s value = %s" % (k, v))
log.debug("session values:")
for k, v in session.iteritems():
log.debug("key = %s value = %s" % (k, v))
if "classname" not in self.request.params or "comment_flag_level" not in self.request.params or "password" not in self.request.params or "re_password" not in self.request.params:
log.warning("request.params is missing a parameter that is essential")
raise HTTPNotFound()
self.publicUserOnlyAuthorization(session)
host = self.request.host
recaptcha_private_key = '6Ldi2MYSAAAAALJ_KaLfTzOTAg5iNHqOmvmgaQOg'
captcha_result = captcha.submit(
self.request.params['recaptcha_challenge_field'],
self.request.params['recaptcha_response_field'],
recaptcha_private_key,
host,
)
if not captcha_result.is_valid:
error = captcha_result.error_code
json_hash = {'result': '-1', 'error':error}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
classname = self.request.params['classname']
level = self.request.params['comment_flag_level']
password = self.request.params['password']
re_password = self.request.params['re_password']
if password != re_password:
json_hash = {'result': '-2'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
result = self.context.doesClassnameExist(classname)
if result is not None:
json_hash = {'result': '-3'}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
class_id = self.context.createClass(session["user_id"], classname, password, level)
if "teacher_classes" in session:
teacher_hash = session["teacher_classes"]
teacher_hash[class_id] = classname
session["teacher_classes"] = teacher_hash
else:
teacher_hash = {}
teacher_hash[class_id] = classname
session["teacher_classes"] = teacher_hash
log.debug("session teacher_classes = %s" % session["teacher_classes"])
json_hash = {'result': '0', 'owner':session['username']}
json_dump = json.dumps(json_hash)
log.debug("json dump = %s" % json_dump)
return Response(json_dump)
| |
# -*- coding:utf-8 -*-
"""
tests.backend
~~~~~~~~~~~~~
:copyright: (c) 2015 by Jason Lai.
:license: BSD, see LICENSE for more details.
"""
from faker import Factory
from functools import partial
fake = Factory.create()
def test_redis_backend_basic(loop, arb, fake_manager, fake_coll):
loop.run_until_complete(arb.init_connection())
fake_manager.collmap = {'t1': fake_coll, 't2': fake_coll}
async def _routine_a():
for name, coll in fake_manager.collmap.items():
await arb.set_collection_index(name, coll)
for name, coll in fake_manager.collmap.items():
pair = await arb.get_collection_index(name)
assert pair == [name, fake_coll.__class__.__name__]
loop.run_until_complete(_routine_a())
# ---------------------- check get all indexes ----------------------
async def _routine_b():
rv = await arb.get_collection_indexes()
matching = {'t1': '_t', 't2': '_t'}
assert rv == matching
# if name not exist get_collection_index should return None
pair = await arb.get_collection_index('not-exists')
assert pair is None
loop.run_until_complete(_routine_b())
def test_redis_backend_metadata(loop, arb, fake_coll):
loop.run_until_complete(arb.init_connection())
taggings = [fake.domain_name() for i in range(10)]
ts_pairs = [(exp, exp-100) for exp in range(200, 300, 10)]
first_ts, mid_ts, last_ts = ts_pairs[0][1], ts_pairs[4][1], ts_pairs[-1][1]
args = ['hello', 'world', 42]
# ---------------- check metadata set and query operation ----------------
async def _routine_md_set_query():
for i, pair in enumerate(ts_pairs, 1):
exp, ts = pair
for t in taggings:
await arb.set_collection_metadata(fake_coll, t, exp, ts, *args)
assert await arb.get_collection_length(fake_coll) == [i]
rv = await arb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == i
assert rv[i-1] == ([exp] + args, ts)
rv = await arb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == i
assert len(rv[ts]) == len(taggings)
rv = await arb.query_collection_metadata_all(fake_coll, 0, 1000)
assert len(rv) == i
assert len(rv[ts]) == len(taggings)
for info in rv[ts].values():
assert info == [exp] + args
loop.run_until_complete(_routine_md_set_query())
# ------------------- check metadata delete operations -------------------
async def _routine_md_del_ope():
t = taggings[-1]
# delete one tagging info in first ts
await arb.del_collection_metadata_by_range(fake_coll, taggings[0],
first_ts, first_ts)
rv = await arb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs)
rv = await arb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs)
assert len(rv[first_ts]) == len(taggings) - 1
assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings)
assert await arb.get_collection_length(fake_coll) == [len(taggings)]
# delete all the taggings in first ts
for t in taggings[1:]:
await arb.del_collection_metadata_by_range(fake_coll, t,
first_ts, first_ts)
rv = await arb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs) - 1
rv = await arb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs) - 1
assert first_ts not in rv
assert len(rv[last_ts]) == len(rv[mid_ts]) == len(taggings)
assert await arb.get_collection_length(fake_coll) == [len(taggings) - 1]
# delete all taggings info in last five ts
for exp, ts in ts_pairs[-5:]:
for t in taggings:
await arb.del_collection_metadata_by_range(fake_coll, t, ts, ts)
rv = await arb.query_collection_metadata(fake_coll, t, 0, 1000)
assert len(rv) == len(ts_pairs) - 6
rv = await arb.query_collection_metadata_tagging(fake_coll, 0, 1000)
assert len(rv) == len(ts_pairs) - 6
assert first_ts not in rv and last_ts not in rv
assert len(rv[mid_ts]) == len(taggings)
assert await arb.get_collection_length(fake_coll) == [len(taggings) - 6]
loop.run_until_complete(_routine_md_del_ope())
# ------------------ check no metadata exists situations ------------------
async def _routine_md_final_ope():
# delete a not exists ts
await arb.del_collection_metadata_by_range(fake_coll, taggings[4],
9999, 9999)
# delete a not exists tagging in mid_ts
await arb.del_collection_metadata_by_range(fake_coll, taggings[4],
mid_ts, mid_ts)
await arb.del_collection_metadata_by_range(fake_coll, taggings[4],
mid_ts, mid_ts)
# query a unexists ts
assert await arb.query_collection_metadata(fake_coll, mid_ts, 9999, 9999) is None
assert await arb.query_collection_metadata_tagging(fake_coll, 9999, 9999) is None
assert await arb.query_collection_metadata_all(fake_coll, 9999, 9999) is None
loop.run_until_complete(_routine_md_final_ope())
async def _add_inc_coll_item(rb, coll, tagging, ts, value):
await rb.set_collection_metadata(coll, tagging, ts+100, ts)
await rb.inc_coll_cache_set(coll, _mk_inc_coll_field(tagging, ts), value)
def _mk_inc_coll_field(tagging, ts):
field_key = '{}:{}'.format(ts, tagging)
return field_key
async def _assert_inc_coll_cache_size(rb, coll, cache_len, md_len):
_md_len, _cache_len = await rb.get_collection_length(coll, klass="IncreaseCollection")
assert _md_len == md_len
assert _cache_len == cache_len
def test_redis_backend_inc_coll(loop, arb, fake_coll):
loop.run_until_complete(arb.init_connection())
tagging, other_tagging = 'day', 'for_diff'
v = {i: i for i in range(20)}
timestamps = [100, 110, 120, 130, 140]
assert_cache_size = partial(_assert_inc_coll_cache_size, arb, fake_coll)
# ---------------- check the operation of item adding ----------------
async def _routine_add_ope():
for ts in timestamps:
await _add_inc_coll_item(arb, fake_coll, tagging, ts, v)
# double adding for checking the logic of duplacate handle
for ts in timestamps:
await _add_inc_coll_item(arb, fake_coll, tagging, ts, v)
# adding the other_tagging for the cache size check below
for ts in timestamps:
await _add_inc_coll_item(arb, fake_coll, other_tagging, ts, v)
print('Success Adding datas...\n\n\n')
await assert_cache_size(10, 5)
loop.run_until_complete(_routine_add_ope())
# ------------------ check the cache data get operations ------------------
async def _routine_get_ope():
fields = [_mk_inc_coll_field(tagging, ts) for ts in timestamps]
rv = await arb.inc_coll_caches_get(fake_coll, *fields)
for r in rv:
assert r == v
await arb.inc_coll_caches_del(fake_coll, *fields)
rv = await arb.inc_coll_caches_get(fake_coll, *fields)
for r in rv:
assert r is None
await assert_cache_size(5, 5)
# if no fields specified
assert await arb.inc_coll_caches_get(fake_coll) == []
loop.run_until_complete(_routine_get_ope())
# ---------------- check for the inc_coll_keys_delete ----------------
async def _routine_del_ope():
await assert_cache_size(5, 5)
await arb.delete_collection_keys(fake_coll, klass="IncreaseCollection")
await assert_cache_size(0, 0)
loop.run_until_complete(_routine_del_ope())
def test_redis_backend_unique_count_coll(loop, arb, fake_coll):
loop.run_until_complete(arb.init_connection())
items_num = 200
tagging = 'day'
v = {fake.uuid4() for i in range(items_num)}
timestamps = [100, 200, 300]
# ----------- check the operation of item adding and getting ----------
async def _routine_add_get_ope():
for ts in timestamps:
rv = await arb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == items_num
rv = await arb.uniq_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == 0
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging, timestamps)
for item in rv:
assert item == v
assert len(item) == items_num
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps, count_only=True)
for count in rv:
assert count == items_num
loop.run_until_complete(_routine_add_get_ope())
# ---------------- check for the operation of deleting ----------------
async def _routine_del_ope():
rv = await arb.uniq_count_coll_cache_del(fake_coll, tagging,
timestamps[0:1])
assert rv == 1
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps[0:1])
assert rv == [set()]
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps[1:])
for item in rv:
assert item == v
assert len(item) == items_num
# uniq_count_coll_cache_pop 50 items
rv = await arb.uniq_count_coll_cache_pop(fake_coll, tagging,
timestamps[1:], 50)
for item in rv:
assert len(item) == 50
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps[1:])
for item in rv:
assert len(item) == items_num - 50
# delete remain items
rv = await arb.uniq_count_coll_cache_del(fake_coll, tagging,
timestamps[1:])
assert rv == 2
rv = await arb.uniq_count_coll_cache_get(fake_coll, tagging,
timestamps)
assert rv == [set(), set(), set()]
loop.run_until_complete(_routine_del_ope())
def test_redis_backend_sorted_count_coll(loop, arb, fake_coll):
loop.run_until_complete(arb.init_connection())
tagging = 'day'
v = {fake.uuid4(): i for i in range(200)}
v2 = [(member, score) for member, score in v.items()]
v2 = sorted(v2, key=lambda x: x[1])
timestamps = [100, 200, 300]
# ----------- check the operation of item adding and getting ----------
async def _routine_add_get_ope():
for ts in timestamps:
rv = await arb.sorted_count_coll_cache_set(fake_coll, ts, tagging, v)
assert rv == 200
rv = await arb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps)
for item in rv:
assert item == v2
rv = await arb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps, topN=100)
for item in rv:
assert item == v2[100:]
loop.run_until_complete(_routine_add_get_ope())
# ---------------- check for the operation of deleting ----------------
async def _routine_del_ope():
rv = await arb.sorted_count_coll_cache_del(fake_coll, tagging,
timestamps[0:1])
assert rv == 1
rv = await arb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps[0:1])
assert rv == [[]]
rv = await arb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps[1:])
for item in rv:
assert item == v2
rv = await arb.sorted_count_coll_cache_del(fake_coll, tagging,
timestamps[1:])
assert rv == 2
rv = await arb.sorted_count_coll_cache_get(fake_coll, tagging,
timestamps)
assert rv == [[], [], []]
loop.run_until_complete(_routine_del_ope())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for requests_kerberos."""
from mock import Mock, patch
import requests
import kerberos
import requests_kerberos
import unittest
# kerberos.authClientInit() is called with the service name (HTTP@FQDN) and
# returns 1 and a kerberos context object on success. Returns -1 on failure.
clientInit_complete = Mock(return_value=(1, "CTX"))
clientInit_error = Mock(return_value=(-1, "CTX"))
# kerberos.authGSSClientStep() is called with the kerberos context object
# returned by authGSSClientInit and the negotiate auth token provided in the
# http response's www-authenticate header. It returns 0 or 1 on success. 0
# Indicates that authentication is progressing but not complete.
clientStep_complete = Mock(return_value=1)
clientStep_continue = Mock(return_value=0)
clientStep_error = Mock(return_value=-1)
clientStep_exception = Mock(side_effect=kerberos.GSSError)
# kerberos.authGSSCLientResponse() is called with the kerberos context which
# was initially returned by authGSSClientInit and had been mutated by a call by
# authGSSClientStep. It returns a string.
clientResponse = Mock(return_value="GSSRESPONSE")
# Note: we're not using the @mock.patch decorator:
# > My only word of warning is that in the past, the patch decorator hides
# > tests when using the standard unittest library.
# > -- sigmavirus24 in https://github.com/requests/requests-kerberos/issues/1
class KerberosTestCase(unittest.TestCase):
def setUp(self):
"""Setup."""
clientInit_complete.reset_mock()
clientInit_error.reset_mock()
clientStep_complete.reset_mock()
clientStep_continue.reset_mock()
clientStep_error.reset_mock()
clientStep_exception.reset_mock()
clientResponse.reset_mock()
def tearDown(self):
"""Teardown."""
pass
def test_negotate_value_extraction(self):
response = requests.Response()
response.headers = {'www-authenticate': 'negotiate token'}
self.assertEqual(
requests_kerberos.kerberos_._negotiate_value(response),
'token'
)
def test_negotate_value_extraction_none(self):
response = requests.Response()
response.headers = {}
self.assertTrue(
requests_kerberos.kerberos_._negotiate_value(response) is None
)
def test_generate_request_header(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
"Negotiate GSSRESPONSE"
)
clientInit_complete.assert_called_with("HTTP@www.example.org")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_generate_request_header_init_error(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_error,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
None
)
clientInit_error.assert_called_with("HTTP@www.example.org")
self.assertFalse(clientStep_continue.called)
self.assertFalse(clientResponse.called)
def test_generate_request_header_step_error(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_error):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth()
self.assertEqual(
auth.generate_request_header(response),
None
)
clientInit_complete.assert_called_with("HTTP@www.example.org")
clientStep_error.assert_called_with("CTX", "token")
self.assertFalse(clientResponse.called)
def test_authenticate_user(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
r = auth.authenticate_user(response)
self.assertTrue(response in r.history)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("HTTP@www.example.org")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_handle_401(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
r = auth.handle_401(response)
self.assertTrue(response in r.history)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("HTTP@www.example.org")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_authenticate_server(self):
with patch.multiple('kerberos', authGSSClientStep=clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
result = auth.authenticate_server(response_ok)
self.assertTrue(result)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_other(self):
with patch('kerberos.authGSSClientStep', clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_other(response_ok)
self.assertEqual(r, response_ok)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_response_200(self):
with patch('kerberos.authGSSClientStep', clientStep_complete):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_ok)
self.assertEqual(r, response_ok)
clientStep_complete.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_required_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
self.assertFalse(clientStep_error.called)
def test_handle_response_200_mutual_auth_required_failure_2(self):
with patch('kerberos.authGSSClientStep', clientStep_exception):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
clientStep_exception.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_optional_hard_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken',
'authorization': 'Negotiate GSSRESPONSE'
}
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
self.assertRaises(requests_kerberos.MutualAuthenticationError,
auth.handle_response,
response_ok)
clientStep_error.assert_called_with("CTX", "servertoken")
def test_handle_response_200_mutual_auth_optional_soft_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_ok)
self.assertEqual(r, response_ok)
self.assertFalse(clientStep_error.called)
def test_handle_response_500_mutual_auth_required_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_500 = requests.Response()
response_500.url = "http://www.example.org/"
response_500.status_code = 500
response_500.headers = {}
response_500.request = "REQUEST"
response_500.connection = "CONNECTION"
response_500._content = "CONTENT"
response_500.encoding = "ENCODING"
response_500.raw = "RAW"
response_500.cookies = "COOKIES"
auth = requests_kerberos.HTTPKerberosAuth()
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_500)
self.assertNotEqual(r, response_500)
self.assertNotEqual(r.headers, response_500.headers)
self.assertEqual(r.status_code, response_500.status_code)
self.assertEqual(r.encoding, response_500.encoding)
self.assertEqual(r.raw, response_500.raw)
self.assertEqual(r.url, response_500.url)
self.assertEqual(r.reason, response_500.reason)
self.assertEqual(r.connection, response_500.connection)
self.assertEqual(r.content, b'')
self.assertNotEqual(r.cookies, response_500.cookies)
self.assertFalse(clientStep_error.called)
def test_handle_response_500_mutual_auth_optional_failure(self):
with patch('kerberos.authGSSClientStep', clientStep_error):
response_500 = requests.Response()
response_500.url = "http://www.example.org/"
response_500.status_code = 500
response_500.headers = {}
response_500.request = "REQUEST"
response_500.connection = "CONNECTION"
response_500._content = "CONTENT"
response_500.encoding = "ENCODING"
response_500.raw = "RAW"
response_500.cookies = "COOKIES"
auth = requests_kerberos.HTTPKerberosAuth(requests_kerberos.OPTIONAL)
auth.context = {"www.example.org": "CTX"}
r = auth.handle_response(response_500)
self.assertEqual(r, response_500)
self.assertFalse(clientStep_error.called)
def test_handle_response_401(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_complete,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response_ok = requests.Response()
response_ok.url = "http://www.example.org/"
response_ok.status_code = 200
response_ok.headers = {'www-authenticate': 'negotiate servertoken'}
connection = Mock()
connection.send = Mock(return_value=response_ok)
raw = Mock()
raw.release_conn = Mock(return_value=None)
request = requests.Request()
response = requests.Response()
response.request = request
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
response.status_code = 401
response.connection = connection
response._content = ""
response.raw = raw
auth = requests_kerberos.HTTPKerberosAuth()
auth.handle_other = Mock(return_value=response_ok)
r = auth.handle_response(response)
self.assertTrue(response in r.history)
auth.handle_other.assert_called_with(response_ok)
self.assertEqual(r, response_ok)
self.assertEqual(request.headers['Authorization'], 'Negotiate GSSRESPONSE')
connection.send.assert_called_with(request)
raw.release_conn.assert_called_with()
clientInit_complete.assert_called_with("HTTP@www.example.org")
clientStep_continue.assert_called_with("CTX", "token")
clientResponse.assert_called_with("CTX")
def test_generate_request_header_custom_service(self):
with patch.multiple('kerberos',
authGSSClientInit=clientInit_error,
authGSSClientResponse=clientResponse,
authGSSClientStep=clientStep_continue):
response = requests.Response()
response.url = "http://www.example.org/"
response.headers = {'www-authenticate': 'negotiate token'}
auth = requests_kerberos.HTTPKerberosAuth(service="barfoo")
auth.generate_request_header(response),
clientInit_error.assert_called_with("barfoo@www.example.org")
if __name__ == '__main__':
unittest.main()
| |
"""
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one or one-to-one
relation: ``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
If you're looking for ``ForwardOneToOneDescriptor``, use
``ForwardManyToOneDescriptor`` instead.
2. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
3. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
4. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``instance_type`` in the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(**self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` in the ``parent`` instance on the right of the equal sign
"""
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``instance_type`` in the ``Place`` class (we don't need it)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` in the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None:
if self.related.field.null:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
else:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_reverse_many_to_one_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, instance_type=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``instance_type`` in the ``Parent`` class (we don't need it)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` in the ``children`` sequence on the right of the equal sign
"""
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import traceback
import math
import collections
import six
import unittest
import numpy as np
import gc
gc.set_debug(gc.DEBUG_COLLECTABLE)
import paddle.fluid as fluid
class TranspilerTest(unittest.TestCase):
def setUp(self):
self.trainer_id = 0
self.trainers = 2
self.pservers = 2
# NOTE: we do not actually bind this port
self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
self.pserver1_ep = "127.0.0.1:6174"
self.pserver2_ep = "127.0.0.1:6175"
self.sync_mode = True
self.transpiler = None
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
sgd_optimizer.minimize(avg_cost)
def get_main_program(self):
main = fluid.Program()
main.random_seed = 1
with fluid.program_guard(main):
self.net_conf()
self.origin_prog = main.clone()
return main
def get_trainer(self, config=None, sync_mode=True):
src = fluid.default_startup_program().clone()
t = self._transpiler_instance(config, sync_mode=True)
trainer_main = t.get_trainer_program(wait_port=False)
trainer_startup = fluid.default_startup_program()
assert (src.num_blocks == 1)
assert (trainer_startup.num_blocks == src.num_blocks)
return trainer_main, trainer_startup
def get_pserver(self, ep, config=None, sync_mode=True):
t = self._transpiler_instance(config, sync_mode)
pserver = t.get_pserver_program(ep)
startup = t.get_startup_program(ep, pserver)
return pserver, startup
def _transpiler_instance(self, config=None, sync_mode=True):
if not self.transpiler:
main = self.get_main_program()
self.transpiler = fluid.DistributeTranspiler(config=config)
self.transpiler.transpile(
self.trainer_id,
program=main,
pservers=self.pserver_eps,
trainers=self.trainers,
sync_mode=sync_mode)
return self.transpiler
def transpiler_test_impl(self):
pass
def test_transpiler(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.transpiler_test_impl()
# NOTE: run gc.collect to eliminate pybind side objects to
# prevent random double-deallocate when inherited in python.
del self.transpiler
del main
del startup
gc.collect()
class TestBasicModel(TranspilerTest):
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
pserver2, startup2 = self.get_pserver(self.pserver2_ep)
trainer, trainer_startup = self.get_trainer()
# split var blocks should be in startup program
self.assertTrue("fc_w.block0" in trainer_startup.global_block().vars)
self.assertTrue("fc_w.block1" in trainer_startup.global_block().vars)
self.assertTrue("fc_w" in trainer_startup.global_block().vars)
self.assertTrue("fc_b" in trainer_startup.global_block().vars)
self.assertTrue("fc_w@GRAD" not in trainer_startup.global_block().vars)
self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars)
src = [op.type for op in trainer_startup.global_block().ops]
dst = ['fill_constant', 'fill_constant', 'uniform_random', 'recv', 'recv', \
'fetch_barrier', 'concat']
self.assertEqual(src, dst)
self.assertEqual([op.type for op in trainer.global_block().ops], [
'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean',
'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad',
'elementwise_add_grad', 'send', 'mul_grad', 'split_byref', 'send',
'send_barrier', 'recv', 'recv', 'fetch_barrier', 'concat'
])
self.assertEqual(len(pserver.blocks), 3)
# block0: listen_and_serv
self.assertEqual([op.type for op in pserver.blocks[0].ops],
["listen_and_serv"])
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "sgd"])
# confirm startup program
self.assertEqual([op.type for op in startup.global_block().ops],
["fill_constant", "fill_constant", "uniform_random"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup.global_block().var("fc_w.block1")
self.assertEqual(fc_w_var.shape, (500, 1000))
# all parameters should be optimized on pserver
pserver_params = []
for prog in [pserver, pserver2]:
for blk in prog.blocks:
for op in blk.ops:
if "Param" in op.input_names:
param_name = op.input("Param")[0]
is_block_idx = param_name.find(".block")
if is_block_idx != -1:
origin_param_name = param_name[:is_block_idx]
else:
origin_param_name = param_name
pserver_params.append(origin_param_name)
trainer_params = []
for op in self.origin_prog.global_block().ops:
if "Param" in op.input_names:
trainer_params.append(op.input("Param")[0])
self.assertEqual(set(pserver_params), set(trainer_params))
class TestBasicModelWithLargeBlockSize(TranspilerTest):
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.min_block_size = 1048576
pserver, startup = self.get_pserver(self.pserver1_ep, config)
pserver2, startup2 = self.get_pserver(self.pserver2_ep, config)
trainer, _ = self.get_trainer(config)
self.assertEqual([op.type for op in trainer.global_block().ops], [
'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean',
'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad',
'elementwise_add_grad', 'send', 'mul_grad', 'send', 'send_barrier',
'recv', 'recv', 'fetch_barrier'
])
self.assertEqual(len(pserver.blocks), 2)
# block0: listen_and_serv
self.assertEqual([op.type for op in pserver.blocks[0].ops],
["listen_and_serv"])
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "sgd"])
# confirm startup program
self.assertEqual([op.type for op in startup.global_block().ops],
["fill_constant", "fill_constant"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup2.global_block().var("fc_w")
self.assertEqual(fc_w_var.shape, (1000, 1000))
# all parameters should be optimized on pserver
pserver_params = []
for prog in [pserver, pserver2]:
for blk in prog.blocks:
for op in blk.ops:
if "Param" in op.input_names:
param_name = op.input("Param")[0]
is_block_idx = param_name.find(".block")
if is_block_idx != -1:
origin_param_name = param_name[:is_block_idx]
else:
origin_param_name = param_name
pserver_params.append(origin_param_name)
trainer_params = []
for op in self.origin_prog.global_block().ops:
if "Param" in op.input_names:
trainer_params.append(op.input("Param")[0])
self.assertEqual(set(pserver_params), set(trainer_params))
class TestNoSliceVar(TranspilerTest):
def setUp(self):
super(TestNoSliceVar, self).setUp()
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.slice_var_up = False
_, startup = self.get_pserver(self.pserver1_ep, config)
_, startup2 = self.get_pserver(self.pserver2_ep, config)
if "fc_w" in startup.global_block().vars:
fc_w_var = startup.global_block().vars["fc_w"]
elif "fc_w" in startup2.global_block().vars:
fc_w_var = startup2.global_block().vars["fc_w"]
self.assertEqual(fc_w_var.shape, (1000, 1000))
class TestLRDecay(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=1.0,
decay_steps=2100,
decay_rate=0.1,
staircase=True))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 4)
lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
self.assertEqual(lr_decay_ops, [
"increment", "cast", "fill_constant", "elementwise_div", "floor",
"fill_constant", "elementwise_pow", "fill_constant",
"elementwise_mul"
])
class TestFakeInit(TranspilerTest):
def net_conf(self):
dict_size, embedding_size, neg_num = 10000, 8, 5
input_word = fluid.layers.data(
name="input_word", shape=[1], dtype='int64', lod_level=1)
true_word = fluid.layers.data(
name='true_label', shape=[1], dtype='int64', lod_level=1)
neg_word = fluid.layers.data(
name="neg_label", shape=[1], dtype='int64', lod_level=1)
inputs = [input_word, true_word, neg_word]
init_width = 0.5 / embedding_size
input_emb = fluid.layers.embedding(
input=inputs[0],
is_sparse=True,
size=[dict_size, embedding_size],
param_attr=fluid.ParamAttr(
name='emb',
initializer=fluid.initializer.Uniform(-init_width, init_width)))
true_emb_w = fluid.layers.embedding(
input=inputs[1],
is_sparse=True,
size=[dict_size, embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w',
initializer=fluid.initializer.Constant(value=0.0)))
true_emb_b = fluid.layers.embedding(
input=inputs[1],
is_sparse=True,
size=[dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b',
initializer=fluid.initializer.Constant(value=0.0)))
neg_word_reshape = fluid.layers.reshape(inputs[2], shape=[-1, 1])
neg_word_reshape.stop_gradient = True
neg_emb_w = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=True,
size=[dict_size, embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w', learning_rate=1.0))
neg_emb_w_re = fluid.layers.reshape(
neg_emb_w, shape=[-1, neg_num, embedding_size])
neg_emb_b = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=True,
size=[dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b', learning_rate=1.0))
neg_emb_b_vec = fluid.layers.reshape(neg_emb_b, shape=[-1, neg_num])
true_logits = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
fluid.layers.elementwise_mul(input_emb, true_emb_w),
dim=1,
keep_dim=True),
true_emb_b)
input_emb_re = fluid.layers.reshape(
input_emb, shape=[-1, 1, embedding_size])
neg_matmul = fluid.layers.matmul(
input_emb_re, neg_emb_w_re, transpose_y=True)
neg_matmul_re = fluid.layers.reshape(neg_matmul, shape=[-1, neg_num])
neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
# nce loss
label_ones = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, 1], value=1.0, dtype='float32')
label_zeros = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, neg_num], value=0.0, dtype='float32')
true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(true_logits,
label_ones)
neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(neg_logits,
label_zeros)
cost = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
true_xent, dim=1),
fluid.layers.reduce_sum(
neg_xent, dim=1))
avg_cost = fluid.layers.reduce_mean(cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=1.0,
decay_steps=2100,
decay_rate=0.1,
staircase=True))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
trainer, startup = self.get_trainer()
fake_init_ops = []
for op in startup.global_block().ops:
if op.type == "fake_init":
fake_init_ops.append(op)
self.assertEqual(len(fake_init_ops), 3)
class TestDecayedAdagrad(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1)
opt.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
class TestFtrl(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.Ftrl(learning_rate=0.1)
opt.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
class TestLRDecayConditional(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.piecewise_decay([10000, 20000],
[1.0, 0.5, 1.0]))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
serv_op = pserver.blocks[0].ops[0]
sub_blocks = []
optimize_blocks = []
for b in serv_op.all_attrs()["optimize_blocks"]:
optimize_blocks.append(b.idx)
for b in pserver.blocks:
if b.idx not in optimize_blocks:
sub_blocks.append(b.idx)
self.assertEqual(len(pserver.blocks), 7)
lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
self.assertEqual(lr_decay_ops, [
"increment", "cast", "fill_constant", "fill_constant", "less_than",
"logical_not", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"conditional_block"
])
# test the condition blocks
for b in sub_blocks:
if b == 0:
continue
block = pserver.blocks[b]
self.assertEqual([op.type for op in block.ops], ["assign"])
class TestL2Decay(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(
input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(
name='fc_w', regularizer=fluid.regularizer.L2Decay()),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
def filter(param):
return param.name == "fc_w"
clip = fluid.clip.GradientClipByValue(0.1, need_clip=filter)
sgd_optimizer.minimize(avg_cost, grad_clip=clip)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 3)
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "clip", "sgd"])
self.assertEqual([op.type for op in pserver.blocks[2].ops],
["sum", "scale", "clip", "scale", "sum", "sgd"])
# TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer
class TestL2DecayWithPiecewise(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
base_lr = 1.0
bd = [1, 10, 20, 30]
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
sgd_optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 9)
self.assertEqual([op.type for op in pserver.blocks[1].ops], [
"increment", "cast", "fill_constant", "fill_constant", "less_than",
"logical_not", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"conditional_block"
])
self.assertEqual([op.type for op in pserver.blocks[7].ops],
["sum", "scale", "scale", "sum", "momentum"])
self.assertEqual([op.type for op in pserver.blocks[8].ops],
["sum", "scale", "scale", "sum", "momentum"])
class TestEmptyPserverOptimizeBlocks(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
# only one parameter
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=False)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0)
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.slice_var_up = False
pserver, startup = self.get_pserver(ep=self.pserver2_ep, config=config)
self.assertEqual(len(pserver.blocks), 2)
self.assertEqual(len(pserver.blocks[1].ops), 0)
class TestDistLookupTableBase(TranspilerTest):
def network_with_table(self, is_sparse, is_distributed):
self.table_size = 1000
self.emb_size = 64
self.lookup_table_name = 'shared_w'
def emb_pool(ids, table_name, is_distributed):
emb = fluid.layers.embedding(
input=ids,
size=[self.table_size, self.emb_size],
dtype='float32',
param_attr=table_name,
is_sparse=is_sparse,
is_distributed=is_distributed)
pool = fluid.layers.sequence_pool(input=emb, pool_type='average')
return pool
title_ids = fluid.layers.data(
name='title_ids', shape=[1], dtype='int64', lod_level=1)
brand_ids = fluid.layers.data(
name='brand_ids', shape=[1], dtype='int64', lod_level=1)
profile_ids = fluid.layers.data(
name='brand_ids', shape=[1], dtype='int64', lod_level=1)
title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed)
brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed)
profile_emb = emb_pool(profile_ids, "profile_emb", False)
fc0 = fluid.layers.concat(
input=[title_emb, brand_emb, profile_emb], axis=1)
predict = fluid.layers.fc(input=fc0,
size=2,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.003)
optimizer.minimize(avg_cost)
class TestLocalLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
pserver1, startup1 = self.get_pserver(self.pserver1_ep)
self.assertEqual(len(pserver1.blocks), 4)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["sum", "scale", "adam", "scale", "scale"])
# 2 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["sum", "scale", "adam", "scale", "scale"])
# 3 optimize for table 2 adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["sum", "scale", "adam", "scale", "scale"])
trainer, _ = self.get_trainer()
self.assertEqual(len(trainer.blocks), 1)
ops = [
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add',
'cross_entropy2', 'mean', 'fill_constant', 'mean_grad',
'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad',
'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad',
'split_selected_rows', 'send', 'sequence_pool_grad',
'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad',
'sum', 'split_selected_rows', 'send', 'send_barrier', 'recv',
'recv', 'fetch_barrier'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
class TestDistLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
pserver1, startup1 = self.get_pserver(self.pserver1_ep)
self.assertEqual(len(pserver1.blocks), 6)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["sum", "scale", "adam", "scale", "scale"])
# 4 prefetch -> lookup_sparse_table_read for data0
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["sum", "scale", "adam", "scale", "scale"])
# 2 optimize for table sgd
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["sum", "sgd"])
# 3 prefetch -> lookup_sparse_table_read for data0
self.assertEqual([op.type for op in pserver1.blocks[4].ops],
["lookup_sparse_table_read"])
# 5 save table
self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
trainer, trainer_startup = self.get_trainer()
self.assertEqual(len(trainer.blocks), 1)
ops = [
'split_ids', 'prefetch', 'merge_ids', 'sequence_pool',
'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul',
'elementwise_add', 'cross_entropy2', 'mean', 'fill_constant',
'mean_grad', 'cross_entropy_grad2', 'elementwise_add_grad', 'send',
'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad',
'lookup_table_grad', 'split_selected_rows', 'send',
'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad',
'lookup_table_grad', 'sum', 'split_ids', 'send', 'send_barrier',
'recv', 'recv', 'fetch_barrier'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
startup_ops = [
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'uniform_random',
'uniform_random', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat',
'fake_init'
]
self.assertEqual([op.type for op in trainer_startup.blocks[0].ops],
startup_ops)
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
self.assertEqual(len(pserver1.blocks), 4)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["adam", "scale", "scale"])
# 2 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["adam", "scale", "scale"])
# 3 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["adam", "scale", "scale"])
trainer, _ = self.get_trainer(config)
self.assertEqual(len(trainer.blocks), 1)
ops = [
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add',
'cross_entropy2', 'mean', 'fill_constant', 'mean_grad',
'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad',
'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad',
'split_selected_rows', 'send', 'sequence_pool_grad',
'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad',
'sum', 'split_selected_rows', 'send', 'recv', 'recv'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
class TestAsyncDistLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
self.assertEqual(len(pserver1.blocks), 6)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["adam", "scale", "scale"])
# 2 optimize for table adam
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["adam", "scale", "scale"])
# 3 optimize for table sgd
self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"])
# 4 prefetch -> lookup_sparse_table_read for data0
self.assertEqual([op.type for op in pserver1.blocks[4].ops],
["lookup_sparse_table_read"])
# 5 save table
self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
trainer, trainer_startup = self.get_trainer(config)
self.assertEqual(len(trainer.blocks), 1)
ops = [
'split_ids', 'prefetch', 'merge_ids', 'sequence_pool',
'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul',
'elementwise_add', 'cross_entropy2', 'mean', 'fill_constant',
'mean_grad', 'cross_entropy_grad2', 'elementwise_add_grad', 'send',
'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad',
'lookup_table_grad', 'split_selected_rows', 'send',
'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad',
'lookup_table_grad', 'sum', 'split_ids', 'send', 'recv', 'recv'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
startup_ops = [
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'uniform_random',
'uniform_random', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat',
'fake_init'
]
self.assertEqual([op.type for op in trainer_startup.blocks[0].ops],
startup_ops)
class TestDistLookupTableSliceSize(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, _ = self.get_pserver(self.pserver1_ep, config)
self.assertTrue(self.transpiler.has_distributed_lookup_table)
lookup_table_var = pserver1.global_block().vars[
self.transpiler.table_name]
row_size = lookup_table_var.shape[0]
calc_row_size = int(math.ceil(self.table_size / self.pservers))
self.assertEqual(row_size, calc_row_size)
class TestDistArgsInProgram(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
trainer, _ = self.get_trainer()
self.assertTrue(trainer._is_distributed)
self.assertTrue(trainer._is_chief)
self.assertEqual(trainer._distributed_lookup_table,
self.lookup_table_name)
self.assertEqual(trainer._endpoints,
[self.pserver1_ep, self.pserver2_ep])
class TestRMSPropOptimizer(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
pserver2, startup2 = self.get_pserver(self.pserver2_ep)
self.assertEqual(len(pserver.blocks), 3)
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "rmsprop"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup.global_block().var("fc_w.block1")
self.assertEqual(fc_w_var.shape, (500, 1000))
moment_var = startup.global_block().var("momentum_1")
self.assertEqual(moment_var.shape, (500, 1000))
class TestLoadSliceVar(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, _ = self.get_pserver(self.pserver1_ep)
pserver2, _ = self.get_pserver(self.pserver2_ep)
vars_ps1 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
self.pserver1_ep)
vars_ps2 = pserver._parameters_on_pservers.get_distributed_vars_by_ep(
self.pserver2_ep)
self.assertTrue(vars_ps1)
self.assertTrue(vars_ps2)
for idx in six.moves.xrange(len(vars_ps1)):
total_numel = 0
ps1_numel, ps2_numel = 0, 0
ps1_var = vars_ps1[idx]
if not ps1_var.is_slice:
total_numel = six.moves.reduce(lambda x, y: x * y,
vars_ps1[idx].origin.shape)
ps1_numel = six.moves.reduce(lambda x, y: x * y,
vars_ps1[idx].slice.shape)
else:
ps2_var = None
for var in vars_ps2:
if var.origin.name == ps1_var.origin.name:
ps2_var = var
break
total_numel = six.moves.reduce(lambda x, y: x * y,
ps1_var.origin.shape)
ps1_numel = six.moves.reduce(lambda x, y: x * y,
ps1_var.slice.shape)
ps2_numel = six.moves.reduce(lambda x, y: x * y,
ps2_var.slice.shape)
self.assertEqual(total_numel, ps1_numel + ps2_numel)
class TestNCCL2Transpile(TranspilerTest):
def test_nccl2_transpile(self):
if fluid.core.is_compiled_with_cuda(): # test nccl2 only with cuda
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
self.net_conf()
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
config.wait_port = False
t = fluid.DistributeTranspiler(config=config)
t.transpile(
0,
trainers="127.0.0.1:6174,127.0.0.1:6175",
current_endpoint="127.0.0.1:6174",
startup_program=startup)
print([op.type for op in startup.global_block().ops])
self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id")
self.assertIsNotNone(startup.global_block().vars.get("NCCLID"))
gc.collect()
else:
pass
# test for remote prefetch
class TestRemoteLookupTable(TestDistLookupTableBase):
def net_conf(self):
import os
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
pserver1, startup1 = self.get_pserver(self.pserver1_ep)
self.assertEqual(len(pserver1.blocks), 4)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["sum", "scale", "adam", "scale", "scale"])
# 2 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["sum", "scale", "adam", "scale", "scale"])
# 3 optimize for table 2 adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["sum", "scale", "adam", "scale", "scale"])
trainer, _ = self.get_trainer()
self.assertEqual(len(trainer.blocks), 1)
ops = [
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add',
'cross_entropy2', 'mean', 'fill_constant', 'mean_grad',
'cross_entropy_grad2', 'elementwise_add_grad', 'send', 'mul_grad',
'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad',
'split_selected_rows', 'send', 'sequence_pool_grad',
'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad',
'sum', 'split_selected_rows', 'send', 'send_barrier', 'recv',
'recv', 'fetch_barrier'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
# test for remote prefetch
class TestRemoteNce(TestDistLookupTableBase):
def network_with_table(self, is_sparse, is_distributed):
num_total_classes = 20
sampler = "uniform"
nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')
input = fluid.layers.data(name="input", shape=[10], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
w_param = fluid.default_main_program().global_block().create_parameter(
shape=[num_total_classes, 10],
dtype='float32',
name='nce_w',
initializer=fluid.initializer.ConstantInitializer())
b_param = fluid.default_main_program().global_block().create_parameter(
shape=[num_total_classes, 1],
dtype='float32',
name='nce_b',
initializer=fluid.initializer.ConstantInitializer())
cost = fluid.layers.nce(input=input,
label=label,
num_total_classes=num_total_classes,
sampler=sampler,
custom_dist=nid_freq_arr.tolist(),
sample_weight=None,
param_attr='nce_w',
bias_attr='nce_b',
seed=1,
num_neg_samples=5,
is_sparse=is_sparse)
avg_cost = fluid.layers.mean(cost)
# optimizer
optimizer = fluid.optimizer.Adam(learning_rate=0.003)
optimizer.minimize(avg_cost)
def net_conf(self):
import os
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
trainer, _ = self.get_trainer()
out_vars = ["nce_w"]
in_vars = ["nce_b"]
recv_var_names = []
for op in trainer.blocks[0].ops:
if op.type == "recv":
for var in op.output("Out"):
recv_var_names.append(var)
for out_var in out_vars:
self.assertFalse(out_var in recv_var_names)
for in_var in in_vars:
self.assertTrue(in_var in recv_var_names)
# test for remote prefetch
class TestRemoteHsigmoid(TestDistLookupTableBase):
def network_with_table(self, is_sparse, is_distributed):
num_total_classes = 3
input = fluid.layers.data(name="input", shape=[1], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
path_table = fluid.layers.data(
name='path_table', shape=[3], dtype='int64')
path_code = fluid.layers.data(
name='path_code', shape=[3], dtype='int64')
w_param = fluid.default_main_program().global_block().create_parameter(
shape=[num_total_classes, 10],
dtype='float32',
name='hs_w',
initializer=fluid.initializer.ConstantInitializer())
b_param = fluid.default_main_program().global_block().create_parameter(
shape=[3, 1],
dtype='float32',
name='hs_b',
initializer=fluid.initializer.ConstantInitializer())
emb = fluid.layers.embedding(
input=input,
is_sparse=is_sparse,
size=[3, 3],
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(
scale=1 / math.sqrt(num_total_classes))))
cost = fluid.layers.hsigmoid(
input=emb,
label=label,
num_classes=num_total_classes,
path_table=path_table,
path_code=path_code,
is_custom=True,
is_sparse=is_sparse)
avg_cost = fluid.layers.mean(cost)
# optimizer
optimizer = fluid.optimizer.SGD(learning_rate=0.003)
optimizer.minimize(avg_cost)
def net_conf(self):
import os
os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1"
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
trainer, _ = self.get_trainer()
params_to_check = list()
for op in trainer.blocks[0].ops:
if op.type == "hierarchical_sigmoid":
params_to_check = [op.input("W")[0], op.input("Bias")[0]]
for name in ["epmap", "table_names", "epmap"]:
assert op.has_attr(name)
if name == "epmap":
assert op.attr(name)[0] == u'127.0.0.1:6174'
elif name == "table_names":
assert op.attr(name)[0] == u'hierarchical_sigmoid_0.w_0'
else:
assert op.attr(name) == 3
elif op.type == "lookup_table":
params_to_check.append(op.input("W")[0])
else:
pass
op_count = 0
for op in trainer.blocks[0].ops:
if op.type == "recv":
assert len(op.output("Out")) == 1
assert op.output("Out")[0] == u'hierarchical_sigmoid_0.b_0'
op_count += 1
assert op_count == 1
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
""" logparser.py - PyQt graphical log parser
This is an easy-to-use tool which combines the advantages of *less* and *grep* in a fashion which doesn't require the
user to understand regular expressions.
Useage:
python logparser.py [filename.txt]
To load a file either pass the name in as an argument on the command line or drag and drop any file within the
bounds of the file output display.
Below the file output display there is a spot to type strings to filter for. The enter key will apply the filter.
All existing filters are displayed to the far right.
Filters:
By default green filters include all lines that contain the filter string.
By default red filters will exclude any line that contains the filter string.
Multiple green filters are treated as ORs. Both filters do not need to exist on the same line.
Red filters take priority over green filters.
Toggle filters from red to green or vice versa by double-clicking with the mouse or by selecting with mouse and
pressing <Space>.
Delete filters by selecting with mouse and pressing <Delete>.
To create an AND filter, click on an existing filter and then type in a new filter. The ANDed filter should appear
indented under the filter that was clicked on.
Incomplete features:
There are currently two "invisible" buttons that don't have an icon, but do have mouse-over tooltips. They are both
located just below the "File" menu.
- Left button exits the application
- Right button toggles filtering out empty lines (e.g. lines with just a newline)
"""
import os
import sys
import threading
from PyQt4 import QtGui
from PyQt4 import QtCore
class LogParser_ApplyFilterThread(QtCore.QThread):
def __init__(self, parent):
"""
As filters are applied, update the text as results are required.
This is basically a worker thread to help make the GUI appear more responsive.
:param parent: LogParer - the LogParser instance (i.e. the GUI application)
"""
QtCore.QThread.__init__(self, parent)
self.parent = parent
# Create Lock
self._lock = threading.Lock()
# Custom signal / slot for updating the results text as it's found
self.signal = QtCore.SIGNAL('Update text')
# Flag used to end the current filter search
self.running = False
def fileDisplayUI_ApplyFilters(self):
# Maximum number of lines to display on the file display widget
self.parent.maxMatches = 200
# Current number of lines found to display
numMatches = 0
# String output that will be written to the file display widget
display = ''
# Current line in the fileData that we are comparing with our filter
# currentLineInFile = ( self.pageNumber * self.maxMatches ) - 1
currentLineInFile = -1
# While the output buffer still has space
while numMatches < self.parent.maxMatches:
# If this filter job is being interrupted, then break out
if not self.running:
break
# Always increment the current line of the file
currentLineInFile += 1
# Flag used to continue the while loop if any exclusion filter matches
filterOmitTrigger = False
# Flag used to continue the while loop if no inclusion filter matches
filterIncludeTrigger = False
# Flag to determine the existence of an include filter
includeFilterExists = False
# For each parent filter, see if the text color is green, if so, an include filter exists
for group in self.parent.filterGroups:
if group[0].getState() == LogParser_Filter.STATE_INCLUDE:
includeFilterExists = True
# If our current line index for the fileData doesn't exceed the length of the file
if len(self.parent.fileData) > currentLineInFile:
# If the user wants to filter new lines
if not self.parent.newLineMode:
if self.parent.fileData[currentLineInFile] == '':
continue
# For each parent filter
for group in self.parent.filterGroups:
# If the parent is in the current line
if str(group[0].text()) in self.parent.fileData[currentLineInFile]:
# If the parent is an inclusion filter
if group[0].getState() == LogParser_Filter.STATE_INCLUDE:
filterIncludeTrigger = True
continue
else:
filterOmitTrigger = True
break
# If the line matches an exclusion filter, omit that line
if filterOmitTrigger:
continue
# If the line doesn't contain our inclusion criteria
# Ensure that an include filter exists, otherwise everything gets filtered out
if includeFilterExists and not filterIncludeTrigger:
continue
# For each child filter
for group in self.parent.filterGroups:
# Re-initialize the flags
filterIncludeTrigger = False
filterOmitTrigger = False
includeFilterExists = False
# Determine if an include filter exists within this group
for item in group:
if group[0] != item:
if item.getState() == LogParser_Filter.STATE_INCLUDE:
includeFilterExists = True
# Apply filters for each child
for item in group:
if group[0] != item:
childText = str(item.text())[3:]
if childText in self.parent.fileData[currentLineInFile]:
# If the parent is a green inclusion filter
if item.getState() == LogParser_Filter.STATE_INCLUDE:
filterIncludeTrigger = True
continue
else:
filterOmitTrigger = True
break
if filterOmitTrigger:
break
if includeFilterExists and not filterIncludeTrigger:
break
if filterOmitTrigger:
continue
# If the line doesn't contain our inclusion criteria
# Ensure that an include filter exists, otherwise everything gets filtered out
if includeFilterExists and not filterIncludeTrigger:
continue
# Add the current line of the fileData to our buffer
display = display + self.parent.fileData[currentLineInFile] + '\n'
numMatches += 1
# Display our filtered output
self.emit(self.signal, display)
# If the currentLineInFile exceeds the file's length, break
else:
break
# If there is no output to display, display 'No Results' to avoid user confusion
if display == '':
display = 'No Results!'
self.emit(self.signal, display)
self.running = False
def run(self):
with self._lock:
self.running = True
self.fileDisplayUI_ApplyFilters()
class LogParser_Filter(QtGui.QListWidgetItem):
"""
This class will hold relevant information regarding filters (e.x. filter states, filter colors)
"""
# Possible states for a filter
STATE_OMIT, STATE_INCLUDE = range(2)
# Default color for omition filters: RED
filterOmitColor = QtGui.QColor(255, 0, 0)
# Default color for inclusive filters: GREEN
filterIncludeColor = QtGui.QColor(0, 255, 0)
# Default state for a filter
filterState = STATE_OMIT
def __init__(self):
super(QtGui.QListWidgetItem, self).__init__()
# Set the of the filter to be either
def setState(self, state):
self.filterState = state
if self.filterState == self.STATE_INCLUDE:
self.setForeground(self.filterIncludeColor)
else:
self.setForeground(self.filterOmitColor)
# If more attributes are added to this class, the clone method will need to be updated
def clone(self):
clone = super(LogParser_Filter, self).clone()
clone.__class__ = self.__class__
clone.filterState = self.filterState
return clone
def getState(self):
return self.filterState
class LogParser(QtGui.QMainWindow):
def __init__(self, fname=None):
"""
Main LogParser class.
:param fname: str - filename to open at startup (optional)
"""
super(LogParser, self).__init__()
# Init all UI components
self.initProgramVariables()
self.initCentralWidgetUI()
self.initGUIStructureUI()
self.initComponentsUI()
# Main window colors
p = self.palette()
p.setColor(QtGui.QPalette.Window, QtGui.QColor(65, 75, 65))
p.setColor(QtGui.QPalette.Button, QtGui.QColor(220, 220, 220))
p.setColor(QtGui.QPalette.ButtonText, QtGui.QColor(0, 0, 0))
self.setPalette(p)
# Main window attributes
self.setGeometry(300, 300, 1300, 500)
self.setWindowTitle('Log Parser')
self.show()
# If a filename was passed in
if fname is not None:
with open(fname, 'r') as f:
self.fileData = f.read()
self.fileData = self.fileData.split('\n')
self.fileDisplayUI_ApplyFilters()
def eventFilter(self, source, event):
# Drag event for the file display UI
if event.type() == QtCore.QEvent.DragEnter and source is self.fileDisplayUI:
event.accept()
return True
# Drop event for file display UI
elif event.type() == QtCore.QEvent.Drop and source is self.fileDisplayUI:
if event.mimeData().hasUrls:
droppath = str(event.mimeData().urls().pop().toLocalFile())
with open(droppath, 'r') as f:
self.fileData = f.read()
self.fileData = self.fileData.split('\n')
self.fileDisplayUI_ApplyFilters()
return True
elif event.type() == QtCore.QEvent.KeyPress and event.key() == QtCore.Qt.Key_Tab:
if not self.filterDisplayUI.hasFocus():
self.filterInputUI.setFocus()
return True
else:
return super(LogParser, self).eventFilter(source, event)
def fileDisplayUI_BufferScroll(self):
maximumValue = float(self.fileDisplayUI.verticalScrollBar().maximum())
currentValue = self.fileDisplayUI.verticalScrollBar().value()
percentScrolled = 0.0
if maximumValue > 0:
percentScrolled = currentValue / maximumValue
#print(percentScrolled)
print(currentValue)
# If the user has scrolled above 75% of the available text
if percentScrolled > .75:
# If the pageNumber would go out of bounds, don't change it
if self.pageNumber * self.maxMatches < len(self.fileData):
print('add')
self.pageNumber += 1
self.fileDisplayUI_ApplyFilters()
#self.fileDisplayUI.verticalScrollBar().setValue( (self.maxMatches / 2) * 15 )
# If the user has scrolled below 25% of the available text
elif percentScrolled < .01 and False:
# If the pageNumber would go out of bounds, don't change it
if self.pageNumber > 0:
print('sub')
self.pageNumber -= 1
self.fileDisplayUI_ApplyFilters()
def filterDisplayUI_addNewFilter(self):
# Obtain the filter that was just typed from the UI
filterInput = self.filterInputUI.text()
# Set the filter input UI text back to being empty
self.filterInputUI.setText('')
# Disallow empty filters
if str(filterInput).strip() == '':
return
# Get all currently enabled filters from the UI
items = []
for index in range(self.filterDisplayUI.count()):
items.append(self.filterDisplayUI.item(index))
labels = [i.text() for i in items]
# Disallow repeated filters
if filterInput in labels:
return
# Insert filters into sorted groups
items = []
group = []
# Convert the filter string to a LogParser_Filter (QListWidgetItem), and customize it
filterInputItem = LogParser_Filter()
filterInputItem.setForeground(QtGui.QColor(255, 0, 0))
filterInputItem.setText(filterInput)
# Check if a filter is selected within the filter output display
for selectedItem in self.filterDisplayUI.selectedItems():
items.append(selectedItem)
# If a filter is selected, append the new filter as a child of the selection
if len(items) > 0:
selectedItem = items[0]
for group in self.filterGroups:
if selectedItem in group:
filterInputItem.setText(' ' + filterInput)
group.append(filterInputItem)
# If none were selected, the new filter will become a group parent
else:
group.append(filterInputItem)
self.filterGroups.append(group)
# Since QListWidget.clear() deletes all points of it's items, we make a copy
filterGroupsCopy = []
for group in self.filterGroups:
groupCopy = []
for item in group:
clone = item.clone()
groupCopy.append(clone)
filterGroupsCopy.append(groupCopy)
# Erase all items from display to ensure order of parent and children
self.filterDisplayUI.clear()
# Set the filter groups equal to the copy since the original was deleted
self.filterGroups = filterGroupsCopy
# Add all filters to the QListWidget
for group in self.filterGroups:
for item in group:
self.filterDisplayUI.addItem(item)
# Apply the new set of filters to the input file
self.fileDisplayUI_ApplyFilters()
def filterDisplayUI_toggleFilterMode(self):
for selectedItem in self.filterDisplayUI.selectedItems():
if selectedItem.getState() == LogParser_Filter.STATE_OMIT:
selectedItem.setState(LogParser_Filter.STATE_INCLUDE)
else:
selectedItem.setState(LogParser_Filter.STATE_OMIT)
self.filterDisplayUI.clearSelection()
self.fileDisplayUI_ApplyFilters()
def filterDisplayUI_mousePressedEvent(self, event):
"""
Allow clicking to clear the filter selection
"""
self.filterDisplayUI.clearSelection()
super(QtGui.QListWidget, self.filterDisplayUI).mousePressEvent(event)
def filterDisplayUI_mouseDoubleClickEvent(self, event):
"""
Allow toggling of filters by double-clicking
"""
self.filterDisplayUI_toggleFilterMode()
self.filterInputUI.setFocus()
def filterDisplayUI_keyPressEvent(self, event):
# Delete key
if event.matches(QtGui.QKeySequence.Delete):
# List to avoid concurrent modification
itemsToDelete = []
# If we're deleting a parent of a group, keep track of it
groupToDelete = None
selectedItem = None
# For each selected item, remove it from the UI
for selectedItem in self.filterDisplayUI.selectedItems():
# For each group in our list of groups
for group in self.filterGroups:
# If the item to delete is among this group
if selectedItem in group:
# Check if it's a parent
if group[0] == selectedItem:
# Remove all children of the parent
for item in group:
itemsToDelete.append(item)
# Delete the group
groupToDelete = group
# If the item to delete is a child
else:
itemsToDelete.append(selectedItem)
# For each item that we have flagged to delete
for item in itemsToDelete:
# Delete it from the GUI
itemToRemove = self.filterDisplayUI.takeItem(self.filterDisplayUI.row(item))
# Delete it from our list
for group in self.filterGroups:
if itemToRemove in group:
group.remove(itemToRemove)
# If we are deleting a parent group, remove the group
if groupToDelete is not None:
self.filterGroups.remove(groupToDelete)
if selectedItem is not None:
# De-select the selected item
self.filterDisplayUI.clearSelection()
# Update the file display with the new filter options
self.fileDisplayUI_ApplyFilters()
# Space key
if event.key() == QtCore.Qt.Key_Space:
self.filterDisplayUI_toggleFilterMode()
self.filterInputUI.setFocus()
def initComponentsUI(self):
# Wrapping options:
#QtGui.QTextEdit.NoWrap, WidgetWidth, FixedPixelWidth, FixedColumnWidth
# UI widget that will display the output of the input file
self.fileDisplayUI = QtGui.QTextEdit('Drop a file here or use the command line')
self.fileDisplayUI.setReadOnly(True)
self.fileDisplayUI.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.fileDisplayUI.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
p = self.fileDisplayUI.palette()
p.setColor(QtGui.QPalette.Base, QtGui.QColor(39, 40, 34))
p.setColor(QtGui.QPalette.Text, QtGui.QColor(255, 255, 255))
self.fileDisplayUI.setPalette(p)
#self.fileDisplayUI.verticalScrollBar().valueChanged.connect(self.fileDisplayUI_BufferScroll)
self.fileDisplayUI.installEventFilter(self)
self.upperSplitter.addWidget(self.fileDisplayUI)
# UI Widget that the user types filters into, the enter key adds the filter
self.filterInputUI = QtGui.QLineEdit()
self.filterInputUI.setFixedHeight(25)
self.filterInputUI.setPlaceholderText('Enter filters here')
p = self.filterInputUI.palette()
p.setColor(QtGui.QPalette.Base, QtGui.QColor(200, 200, 200))
p.setColor(QtGui.QPalette.Text, QtGui.QColor(0, 0, 0))
self.filterInputUI.setPalette(p)
self.filterInputUI.returnPressed.connect(self.filterDisplayUI_addNewFilter)
self.lowerSplitter.addWidget(self.filterInputUI)
# UI Widget that displays currently enabled filters
self.filterDisplayUI = QtGui.QListWidget()
self.filterDisplayUI.mousePressEvent = self.filterDisplayUI_mousePressedEvent
self.filterDisplayUI.mouseDoubleClickEvent = self.filterDisplayUI_mouseDoubleClickEvent
self.filterDisplayUI.keyPressEvent = self.filterDisplayUI_keyPressEvent
# self.filterDisplayUI.setFocusPolicy(QtCore.Qt.NoFocus)
p = self.filterDisplayUI.palette()
p.setColor(QtGui.QPalette.Base, QtGui.QColor(39, 40, 34))
p.setColor(QtGui.QPalette.Text, QtGui.QColor(255, 255, 255))
p.setColor(QtGui.QPalette.Highlight, QtGui.QColor(49, 50, 46))
self.filterDisplayUI.setPalette(p)
self.filterDisplayUI.installEventFilter(self)
self.upperSplitter.addWidget(self.filterDisplayUI)
self.upperSplitter.setSizes([1000, 100])
def initGUIStructureUI(self):
"""
Generally, to add widgets to the GUI, use the splitter.addWidget() method.
This can differ depending on the desired effect from the GUI.
|-----------------------------------------------|
|componentContainer |
| |-----------------------------------------| |
| |componentLayout | |
| | |-----------------------------------| | |
| | |upperContainer | | |
| | | |-----------------------------| | | |
| | | |upperLayout | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | |upperSplitter | | | |
| | | | | | | |
| | | |-----------------------------| | | |
| | | | | |
| | |-----------------------------------| | |
| | | |
| | ---componentSplitter--- | |
| | | |
| | |-----------------------------------| | |
| | |lowerContainer | | |
| | | |-----------------------------| | | |
| | | |lowerLayout | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | | | | | |
| | | | |lowerSplitter | | | |
| | | | | | | |
| | | |-----------------------------| | | |
| | | | | |
| | |-----------------------------------| | |
| | | |
| |-----------------------------------------| |
| |
|-----------------------------------------------|
"""
componentContainer = QtGui.QWidget()
componentLayout = QtGui.QVBoxLayout()
componentSplitter = QtGui.QSplitter()
componentSplitter.setOrientation(QtCore.Qt.Vertical)
componentContainer.setLayout(componentLayout)
componentLayout.addWidget(componentSplitter)
upperContainer = QtGui.QWidget()
upperLayout = QtGui.QVBoxLayout()
self.upperSplitter = QtGui.QSplitter()
upperContainer.setLayout(upperLayout)
upperLayout.addWidget(self.upperSplitter)
lowerContainer = QtGui.QWidget()
lowerLayout = QtGui.QVBoxLayout()
self.lowerSplitter = QtGui.QSplitter()
lowerContainer.setLayout(lowerLayout)
lowerLayout.addWidget(self.lowerSplitter)
componentSplitter.addWidget(upperContainer)
componentSplitter.addWidget(lowerContainer)
componentSplitter.setSizes([1000, 100])
self.centralVBox.addWidget(componentContainer)
def toggleNewLineMode(self):
self.newLineMode = not self.newLineMode
self.fileDisplayUI_ApplyFilters()
def initCentralWidgetUI(self):
# Status bar that appears on the bottom of the window
self.statusBar().showMessage('Ready')
self.statusBar().setStyleSheet("color: rgb(180, 180, 180);")
# This action describes exiting the application
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
# This action causes newlines to be ommitted from the filter output
newLineAction = QtGui.QAction(QtGui.QIcon('filter.png'), '&Toggle New Lines', self)
newLineAction.setShortcut('Ctrl+Shift+N')
newLineAction.setStatusTip('Toggle filtering new lines')
newLineAction.triggered.connect(self.toggleNewLineMode)
# Adds a File dropdown menu
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
# A bar that is always in view with buttons
toolbar = self.addToolBar('Exit')
toolbar.addAction(exitAction)
toolbar.addAction(newLineAction)
# Vertical box layout
self.centralVBox = QtGui.QVBoxLayout()
# Main container for all widgets
centralWidget = QtGui.QWidget()
centralWidget.setLayout(self.centralVBox)
self.setCentralWidget(centralWidget)
def fileDisplayUI_UpdateDisplay(self, displayText):
"""
Custom event handler for updating the GUI's display as filter results
are found from the thread that filters the file.
The filter thread emits the signal every time a new matching line is found.
"""
self.fileDisplayUI.setText(displayText)
def fileDisplayUI_ApplyFilters(self):
"""
This method can be called multiple times because when the thread reaches
the end of it's run method, the isRunning flag becomes false automatically.
"""
while self.applyFiltersThread.isRunning():
self.applyFiltersThread.running = False
self.fileDisplayUI.setText('Filtering/Loading file...')
self.applyFiltersThread.start()
def initProgramVariables(self):
# Don't filter new lines by default
self.newLineMode = True
# Contains every line of the file we wish to filter
self.fileData = []
# TODO: Will be used later for smooth scrolling of the entire file
self.pageNumber = 0
# Used for grouping filters, necessary for ANDing filters
self.filterGroups = []
# Thread used to apply filters, prevents the GUI from locking up with large files
self.applyFiltersThread = LogParser_ApplyFilterThread(self)
# Custom event handler for the filtering thread to update the GUI text as resutls are found
self.connect(self.applyFiltersThread, self.applyFiltersThread.signal, self.fileDisplayUI_UpdateDisplay)
def main():
# If a valid filename is passed in as an argument, then open that
filename = None
if len(sys.argv) > 1:
fname = sys.argv[1]
# Make sure the file exists and is a regular file
if os.path.isfile(fname):
filename = fname
else:
print("ERROR: {} does not exist or is not a regular file!".format(fname))
sys.exit(1)
app = QtGui.QApplication(sys.argv)
log_parser = LogParser(filename)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| |
import numpy as np
from ..utils.utils_def import FlopyBinaryData
class ObsFiles(FlopyBinaryData):
def __init__(self):
super(ObsFiles, self).__init__()
return
def get_times(self):
"""
Get a list of unique times in the file
Returns
----------
out : list of floats
List contains unique simulation times (totim) in binary file.
"""
return self.data['totim'].reshape(self.get_ntimes()).tolist()
def get_ntimes(self):
"""
Get the number of times in the file
Returns
----------
out : int
The number of simulation times (totim) in binary file.
"""
return self.data['totim'].shape[0]
def get_nobs(self):
"""
Get the number of observations in the file
Returns
----------
out : tuple of int
A tupe with the number of records and number of flow items
in the file. The number of flow items is non-zero only if
swrtype='flow'.
"""
return self.nobs
def get_obsnames(self):
"""
Get a list of observation names in the file
Returns
----------
out : list of strings
List of observation names in the binary file. totim is not
included in the list of observation names.
"""
return list(self.data.dtype.names[1:])
def get_data(self, idx=None, obsname=None, totim=None):
"""
Get data from the observation file.
Parameters
----------
idx : int
The zero-based record number. The first record is record 0.
If idx is None and totim are None, data for all simulation times
are returned. (default is None)
obsname : string
The name of the observation to return. If obsname is None, all
observation data are returned. (default is None)
totim : float
The simulation time to return. If idx is None and totim are None,
data for all simulation times are returned. (default is None)
Returns
----------
data : numpy record array
Array has size (ntimes, nitems). totim is always returned. nitems
is 2 if idx or obsname is not None or nobs+1.
See Also
--------
Notes
-----
If both idx and obsname are None, will return all of the observation
data.
Examples
--------
>>> hyd = HydmodObs("my_model.hyd")
>>> ts = hyd.get_data()
"""
i0 = 0
i1 = self.data.shape[0]
if totim is not None:
idx = np.where(self.data['totim'] == totim)[0][0]
i0 = idx
i1 = idx + 1
elif idx is not None:
if idx < i1:
i0 = idx
i1 = i0 + 1
r = None
if obsname is None:
obsname = self.get_obsnames()
else:
if obsname is not None:
if obsname not in self.data.dtype.names:
obsname = None
else:
if not isinstance(obsname, list):
obsname = [obsname]
if obsname is not None:
obsname.insert(0, 'totim')
r = get_selection(self.data, obsname)[i0:i1]
return r
def get_dataframe(self, start_datetime='1-1-1970',
idx=None, obsname=None, totim=None, timeunit='D'):
"""
Get pandas dataframe with the incremental and cumulative water budget
items in the hydmod file.
Parameters
----------
start_datetime : str
If start_datetime is passed as None, the rows are indexed on totim.
Otherwise, a DatetimeIndex is set. (default is 1-1-1970).
idx : int
The zero-based record number. The first record is record 0.
If idx is None and totim are None, a dataframe with all simulation
times is returned. (default is None)
obsname : string
The name of the observation to return. If obsname is None, all
observation data are returned. (default is None)
totim : float
The simulation time to return. If idx is None and totim are None,
a dataframe with all simulation times is returned.
(default is None)
timeunit : string
time unit of the simulation time. Valid values are 'S'econds,
'M'inutes, 'H'ours, 'D'ays, 'Y'ears. (default is 'D').
Returns
-------
out : pandas dataframe
Pandas dataframe of selected data.
See Also
--------
Notes
-----
If both idx and obsname are None, will return all of the observation
data as a dataframe.
Examples
--------
>>> hyd = HydmodObs("my_model.hyd")
>>> df = hyd.get_dataframes()
"""
try:
import pandas as pd
from ..utils.utils_def import totim_to_datetime
except Exception as e:
raise Exception(
"HydmodObs.get_dataframe() error import pandas: " + \
str(e))
i0 = 0
i1 = self.data.shape[0]
if totim is not None:
idx = np.where(self.data['totim'] == totim)[0][0]
i0 = idx
i1 = idx + 1
elif idx is not None:
if idx < i1:
i0 = idx
i1 = i0 + 1
if obsname is None:
obsname = self.get_obsnames()
else:
if obsname is not None:
if obsname not in self.data.dtype.names:
obsname = None
else:
if not isinstance(obsname, list):
obsname = [obsname]
if obsname is None:
return None
obsname.insert(0, 'totim')
dti = self.get_times()[i0:i1]
if start_datetime is not None:
dti = totim_to_datetime(dti,
start=pd.to_datetime(start_datetime),
timeunit=timeunit)
df = pd.DataFrame(self.data[i0:i1], index=dti, columns=obsname)
return df
def _read_data(self):
if self.data is not None:
return
while True:
try:
r = self.read_record(count=1)
if self.data is None:
self.data = r.copy()
elif r.size == 0:
break
else:
# should be hstack based on (https://mail.scipy.org/pipermail/numpy-discussion/2010-June/051107.html)
self.data = np.hstack((self.data, r))
except:
break
return
def _build_dtype(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception(
'Abstract method _build_dtype called in BinaryFiles. This method needs to be overridden.')
def _build_index(self):
"""
Build the recordarray and iposarray, which maps the header information
to the position in the formatted file.
"""
raise Exception(
'Abstract method _build_index called in BinaryFiles. This method needs to be overridden.')
class HydmodObs(ObsFiles):
"""
HydmodObs Class - used to read binary MODFLOW HYDMOD package output
Parameters
----------
filename : str
Name of the hydmod output file
verbose : boolean
If true, print additional information to to the screen during the
extraction. (default is False)
hydlbl_len : int
Length of hydmod labels. (default is 20)
Returns
-------
None
"""
def __init__(self, filename, verbose=False, hydlbl_len=20):
"""
Class constructor.
"""
super(HydmodObs, self).__init__()
# initialize class information
self.verbose = verbose
# --open binary head file
self.file = open(filename, 'rb')
# NHYDTOT,ITMUNI
self.nobs = self.read_integer()
precision = 'single'
if self.nobs < 0:
self.nobs = abs(self.nobs)
precision = 'double'
self.set_float(precision)
# continue reading the file
self.itmuni = self.read_integer()
self.v = np.empty(self.nobs, dtype=np.float)
self.v.fill(1.0E+32)
ctime = self.read_text(nchar=4)
self.hydlbl_len = int(hydlbl_len)
# read HYDLBL
hydlbl = []
for idx in range(0, self.nobs):
cid = self.read_text(self.hydlbl_len)
hydlbl.append(cid)
self.hydlbl = np.array(hydlbl)
# build dtype
self._build_dtype()
# build index
self._build_index()
self.data = None
self._read_data()
def _build_dtype(self):
# create dtype
dtype = [('totim', self.floattype)]
for site in self.hydlbl:
if not isinstance(site, str):
site_name = site.decode().strip()
else:
site_name = site.strip()
dtype.append((site_name, self.floattype))
self.dtype = np.dtype(dtype)
return
def _build_index(self):
return
class SwrObs(ObsFiles):
"""
Read binary SWR observations output from MODFLOW SWR Process
observation files
Parameters
----------
filename : string
Name of the cell budget file
precision : string
'single' or 'double'. Default is 'double'.
verbose : bool
Write information to the screen. Default is False.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> so = flopy.utils.SwrObs('mymodel.swr.obs')
"""
def __init__(self, filename, precision='double', verbose=False):
"""
Class constructor.
"""
super(SwrObs, self).__init__()
self.set_float(precision=precision)
# initialize class information
self.verbose = verbose
# open binary head file
self.file = open(filename, 'rb')
# NOBS
self.nobs = self.read_integer()
# read obsnames
obsnames = []
for idx in range(0, self.nobs):
cid = self.read_text()
if isinstance(cid, bytes):
cid = cid.decode()
obsnames.append(cid.strip())
self.obs = obsnames
# read header information
self._build_dtype()
# build index
self._build_index()
# read data
self.data = None
self._read_data()
def _build_dtype(self):
vdata = [('totim', self.floattype)]
for name in self.obs:
vdata.append((str(name), self.floattype))
self.dtype = np.dtype(vdata)
return
def _build_index(self):
return
def get_selection(data, names):
"""
Parameters
----------
data : numpy recarray
recarray of data to make a selection from
names : string or list of strings
column names to return
Returns
-------
out : numpy recarry
recarray with selection
"""
if not isinstance(names, list):
names = [names]
ierr = 0
for name in names:
if name not in data.dtype.names:
ierr += 1
print('Error: {} is not a valid column name'.format(name))
if ierr > 0:
raise Exception('Error: {} names did not match'.format(ierr))
# Valid list of names so make a selection
dtype2 = np.dtype({name: data.dtype.fields[name] for name in names})
return np.ndarray(data.shape, dtype2, data, 0, data.strides)
| |
"""distutils.command.bdist_rpm
Implements the Distutils 'bdist_rpm' command (create RPM source and binary
distributions)."""
__revision__ = "$Id: bdist_rpm.py 86234 2010-11-06 02:10:32Z eric.araujo $"
import sys, os
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.util import get_platform
from distutils.file_util import write_file
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_rpm(Command):
description = "create an RPM distribution"
user_options = [
('bdist-base=', None,
"base directory for creating built distributions"),
('rpm-base=', None,
"base directory for creating RPMs (defaults to \"rpm\" under "
"--bdist-base; must be specified for RPM 2)"),
('dist-dir=', 'd',
"directory to put final RPM files in "
"(and .spec files if --spec-only)"),
('python=', None,
"path to Python interpreter to hard-code in the .spec file "
"(default: \"python\")"),
('fix-python', None,
"hard-code the exact path to the current Python interpreter in "
"the .spec file"),
('spec-only', None,
"only regenerate spec file"),
('source-only', None,
"only generate source RPM"),
('binary-only', None,
"only generate binary RPM"),
('use-bzip2', None,
"use bzip2 instead of gzip to create source distribution"),
# More meta-data: too RPM-specific to put in the setup script,
# but needs to go in the .spec file -- so we make these options
# to "bdist_rpm". The idea is that packagers would put this
# info in setup.cfg, although they are of course free to
# supply it on the command line.
('distribution-name=', None,
"name of the (Linux) distribution to which this "
"RPM applies (*not* the name of the module distribution!)"),
('group=', None,
"package classification [default: \"Development/Libraries\"]"),
('release=', None,
"RPM release number"),
('serial=', None,
"RPM serial number"),
('vendor=', None,
"RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
"[default: maintainer or author from setup script]"),
('packager=', None,
"RPM packager (eg. \"Jane Doe <jane@example.net>\")"
"[default: vendor]"),
('doc-files=', None,
"list of documentation files (space or comma-separated)"),
('changelog=', None,
"RPM changelog"),
('icon=', None,
"name of icon file"),
('provides=', None,
"capabilities provided by this package"),
('requires=', None,
"capabilities required by this package"),
('conflicts=', None,
"capabilities which conflict with this package"),
('build-requires=', None,
"capabilities required to build this package"),
('obsoletes=', None,
"capabilities made obsolete by this package"),
('no-autoreq', None,
"do not automatically calculate dependencies"),
# Actions to take when building RPM
('keep-temp', 'k',
"don't clean up RPM build directory"),
('no-keep-temp', None,
"clean up RPM build directory [default]"),
('use-rpm-opt-flags', None,
"compile with RPM_OPT_FLAGS when building from source RPM"),
('no-rpm-opt-flags', None,
"do not pass any RPM CFLAGS to compiler"),
('rpm3-mode', None,
"RPM 3 compatibility mode (default)"),
('rpm2-mode', None,
"RPM 2 compatibility mode"),
# Add the hooks necessary for specifying custom scripts
('prep-script=', None,
"Specify a script for the PREP phase of RPM building"),
('build-script=', None,
"Specify a script for the BUILD phase of RPM building"),
('pre-install=', None,
"Specify a script for the pre-INSTALL phase of RPM building"),
('install-script=', None,
"Specify a script for the INSTALL phase of RPM building"),
('post-install=', None,
"Specify a script for the post-INSTALL phase of RPM building"),
('pre-uninstall=', None,
"Specify a script for the pre-UNINSTALL phase of RPM building"),
('post-uninstall=', None,
"Specify a script for the post-UNINSTALL phase of RPM building"),
('clean-script=', None,
"Specify a script for the CLEAN phase of RPM building"),
('verify-script=', None,
"Specify a script for the VERIFY phase of the RPM build"),
# Allow a packager to explicitly force an architecture
('force-arch=', None,
"Force an architecture onto the RPM build process"),
('quiet', 'q',
"Run the INSTALL phase of RPM building in quiet mode"),
]
boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
'no-autoreq', 'quiet']
negative_opt = {'no-keep-temp': 'keep-temp',
'no-rpm-opt-flags': 'use-rpm-opt-flags',
'rpm2-mode': 'rpm3-mode'}
def initialize_options(self):
self.bdist_base = None
self.rpm_base = None
self.dist_dir = None
self.python = None
self.fix_python = None
self.spec_only = None
self.binary_only = None
self.source_only = None
self.use_bzip2 = None
self.distribution_name = None
self.group = None
self.release = None
self.serial = None
self.vendor = None
self.packager = None
self.doc_files = None
self.changelog = None
self.icon = None
self.prep_script = None
self.build_script = None
self.install_script = None
self.clean_script = None
self.verify_script = None
self.pre_install = None
self.post_install = None
self.pre_uninstall = None
self.post_uninstall = None
self.prep = None
self.provides = None
self.requires = None
self.conflicts = None
self.build_requires = None
self.obsoletes = None
self.keep_temp = 0
self.use_rpm_opt_flags = 1
self.rpm3_mode = 1
self.no_autoreq = 0
self.force_arch = None
self.quiet = 0
def finalize_options(self):
self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
if self.rpm_base is None:
if not self.rpm3_mode:
raise DistutilsOptionError(
"you must specify --rpm-base in RPM 2 mode")
self.rpm_base = os.path.join(self.bdist_base, "rpm")
if self.python is None:
if self.fix_python:
self.python = sys.executable
else:
self.python = "python"
elif self.fix_python:
raise DistutilsOptionError(
"--python and --fix-python are mutually exclusive options")
if os.name != 'posix':
raise DistutilsPlatformError("don't know how to create RPM "
"distributions on platform %s" % os.name)
if self.binary_only and self.source_only:
raise DistutilsOptionError(
"cannot supply both '--source-only' and '--binary-only'")
# don't pass CFLAGS to pure python distributions
if not self.distribution.has_ext_modules():
self.use_rpm_opt_flags = 0
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
self.finalize_package_data()
def finalize_package_data(self):
self.ensure_string('group', "Development/Libraries")
self.ensure_string('vendor',
"%s <%s>" % (self.distribution.get_contact(),
self.distribution.get_contact_email()))
self.ensure_string('packager')
self.ensure_string_list('doc_files')
if isinstance(self.doc_files, list):
for readme in ('README', 'README.txt'):
if os.path.exists(readme) and readme not in self.doc_files:
self.doc_files.append(readme)
self.ensure_string('release', "1")
self.ensure_string('serial') # should it be an int?
self.ensure_string('distribution_name')
self.ensure_string('changelog')
# Format changelog correctly
self.changelog = self._format_changelog(self.changelog)
self.ensure_filename('icon')
self.ensure_filename('prep_script')
self.ensure_filename('build_script')
self.ensure_filename('install_script')
self.ensure_filename('clean_script')
self.ensure_filename('verify_script')
self.ensure_filename('pre_install')
self.ensure_filename('post_install')
self.ensure_filename('pre_uninstall')
self.ensure_filename('post_uninstall')
# XXX don't forget we punted on summaries and descriptions -- they
# should be handled here eventually!
# Now *this* is some meta-data that belongs in the setup script...
self.ensure_string_list('provides')
self.ensure_string_list('requires')
self.ensure_string_list('conflicts')
self.ensure_string_list('build_requires')
self.ensure_string_list('obsoletes')
self.ensure_string('force_arch')
def run(self):
if DEBUG:
print("before _get_package_data():")
print("vendor =", self.vendor)
print("packager =", self.packager)
print("doc_files =", self.doc_files)
print("changelog =", self.changelog)
# make directories
if self.spec_only:
spec_dir = self.dist_dir
self.mkpath(spec_dir)
else:
rpm_dir = {}
for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
rpm_dir[d] = os.path.join(self.rpm_base, d)
self.mkpath(rpm_dir[d])
spec_dir = rpm_dir['SPECS']
# Spec file goes into 'dist_dir' if '--spec-only specified',
# build/rpm.<plat> otherwise.
spec_path = os.path.join(spec_dir,
"%s.spec" % self.distribution.get_name())
self.execute(write_file,
(spec_path,
self._make_spec_file()),
"writing '%s'" % spec_path)
if self.spec_only: # stop if requested
return
# Make a source distribution and copy to SOURCES directory with
# optional icon.
saved_dist_files = self.distribution.dist_files[:]
sdist = self.reinitialize_command('sdist')
if self.use_bzip2:
sdist.formats = ['bztar']
else:
sdist.formats = ['gztar']
self.run_command('sdist')
self.distribution.dist_files = saved_dist_files
source = sdist.get_archive_files()[0]
source_dir = rpm_dir['SOURCES']
self.copy_file(source, source_dir)
if self.icon:
if os.path.exists(self.icon):
self.copy_file(self.icon, source_dir)
else:
raise DistutilsFileError(
"icon file '%s' does not exist" % self.icon)
# build package
log.info("building RPMs")
rpm_cmd = ['rpm']
if os.path.exists('/usr/bin/rpmbuild') or \
os.path.exists('/bin/rpmbuild'):
rpm_cmd = ['rpmbuild']
if self.source_only: # what kind of RPMs?
rpm_cmd.append('-bs')
elif self.binary_only:
rpm_cmd.append('-bb')
else:
rpm_cmd.append('-ba')
if self.rpm3_mode:
rpm_cmd.extend(['--define',
'_topdir %s' % os.path.abspath(self.rpm_base)])
if not self.keep_temp:
rpm_cmd.append('--clean')
if self.quiet:
rpm_cmd.append('--quiet')
rpm_cmd.append(spec_path)
# Determine the binary rpm names that should be built out of this spec
# file
# Note that some of these may not be really built (if the file
# list is empty)
nvr_string = "%{name}-%{version}-%{release}"
src_rpm = nvr_string + ".src.rpm"
non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
src_rpm, non_src_rpm, spec_path)
out = os.popen(q_cmd)
try:
binary_rpms = []
source_rpm = None
while True:
line = out.readline()
if not line:
break
l = line.strip().split()
assert(len(l) == 2)
binary_rpms.append(l[1])
# The source rpm is named after the first entry in the spec file
if source_rpm is None:
source_rpm = l[0]
status = out.close()
if status:
raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
finally:
out.close()
self.spawn(rpm_cmd)
if not self.dry_run:
if not self.binary_only:
srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
assert(os.path.exists(srpm))
self.move_file(srpm, self.dist_dir)
if not self.source_only:
for rpm in binary_rpms:
rpm = os.path.join(rpm_dir['RPMS'], rpm)
if os.path.exists(rpm):
self.move_file(rpm, self.dist_dir)
def _dist_path(self, path):
return os.path.join(self.dist_dir, os.path.basename(path))
def _make_spec_file(self):
"""Generate the text of an RPM spec file and return it as a
list of strings (one per line).
"""
# definitions and headers
spec_file = [
'%define name ' + self.distribution.get_name(),
'%define version ' + self.distribution.get_version().replace('-','_'),
'%define unmangled_version ' + self.distribution.get_version(),
'%define release ' + self.release.replace('-','_'),
'',
'Summary: ' + self.distribution.get_description(),
]
# put locale summaries into spec file
# XXX not supported for now (hard to put a dictionary
# in a config file -- arg!)
#for locale in self.summaries.keys():
# spec_file.append('Summary(%s): %s' % (locale,
# self.summaries[locale]))
spec_file.extend([
'Name: %{name}',
'Version: %{version}',
'Release: %{release}',])
# XXX yuck! this filename is available from the "sdist" command,
# but only after it has run: and we create the spec file before
# running "sdist", in case of --spec-only.
if self.use_bzip2:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
else:
spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
spec_file.extend([
'License: ' + self.distribution.get_license(),
'Group: ' + self.group,
'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
'Prefix: %{_prefix}', ])
if not self.force_arch:
# noarch if no extension modules
if not self.distribution.has_ext_modules():
spec_file.append('BuildArch: noarch')
else:
spec_file.append( 'BuildArch: %s' % self.force_arch )
for field in ('Vendor',
'Packager',
'Provides',
'Requires',
'Conflicts',
'Obsoletes',
):
val = getattr(self, field.lower())
if isinstance(val, list):
spec_file.append('%s: %s' % (field, ' '.join(val)))
elif val is not None:
spec_file.append('%s: %s' % (field, val))
if self.distribution.get_url() != 'UNKNOWN':
spec_file.append('Url: ' + self.distribution.get_url())
if self.distribution_name:
spec_file.append('Distribution: ' + self.distribution_name)
if self.build_requires:
spec_file.append('BuildRequires: ' +
' '.join(self.build_requires))
if self.icon:
spec_file.append('Icon: ' + os.path.basename(self.icon))
if self.no_autoreq:
spec_file.append('AutoReq: 0')
spec_file.extend([
'',
'%description',
self.distribution.get_long_description()
])
# put locale descriptions into spec file
# XXX again, suppressed because config file syntax doesn't
# easily support this ;-(
#for locale in self.descriptions.keys():
# spec_file.extend([
# '',
# '%description -l ' + locale,
# self.descriptions[locale],
# ])
# rpm scripts
# figure out default build script
def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
def_build = "%s build" % def_setup_call
if self.use_rpm_opt_flags:
def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
# insert contents of files
# XXX this is kind of misleading: user-supplied options are files
# that we open and interpolate into the spec file, but the defaults
# are just text that we drop in as-is. Hmmm.
install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT '
'--record=INSTALLED_FILES') % def_setup_call
script_options = [
('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
('build', 'build_script', def_build),
('install', 'install_script', install_cmd),
('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
('verifyscript', 'verify_script', None),
('pre', 'pre_install', None),
('post', 'post_install', None),
('preun', 'pre_uninstall', None),
('postun', 'post_uninstall', None),
]
for (rpm_opt, attr, default) in script_options:
# Insert contents of file referred to, if no file is referred to
# use 'default' as contents of script
val = getattr(self, attr)
if val or default:
spec_file.extend([
'',
'%' + rpm_opt,])
if val:
spec_file.extend(open(val, 'r').read().split('\n'))
else:
spec_file.append(default)
# files section
spec_file.extend([
'',
'%files -f INSTALLED_FILES',
'%defattr(-,root,root)',
])
if self.doc_files:
spec_file.append('%doc ' + ' '.join(self.doc_files))
if self.changelog:
spec_file.extend([
'',
'%changelog',])
spec_file.extend(self.changelog)
return spec_file
def _format_changelog(self, changelog):
"""Format the changelog correctly and convert it to a list of strings
"""
if not changelog:
return changelog
new_changelog = []
for line in changelog.strip().split('\n'):
line = line.strip()
if line[0] == '*':
new_changelog.extend(['', line])
elif line[0] == '-':
new_changelog.append(line)
else:
new_changelog.append(' ' + line)
# strip trailing newline inserted by first changelog entry
if not new_changelog[0]:
del new_changelog[0]
return new_changelog
| |
#
# Copyright 2012, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
try:
import json
except:
import simplejson as json
import urllib
import httplib2
import socket
import time
import string
import logger
import client
from exception import ServerAlreadyJoinedException,\
ServerUnavailableException, InvalidArgumentException,\
BucketCreationException, ServerJoinException, BucketUnavailableException
log = logger.logger("rest_client")
#helper library methods built on top of RestConnection interface
class RestHelper(object):
def __init__(self, rest_connection):
self.rest = rest_connection
def is_ns_server_running(self, timeout_in_seconds=360):
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
try:
if self.is_cluster_healthy():
return True
except ServerUnavailableException:
time.sleep(1)
msg = 'unable to connect to the node %s even after waiting %s seconds'
log.info(msg % (self.rest.ip, timeout_in_seconds))
return False
def is_cluster_healthy(self):
#get the nodes and verify that all the nodes.status are healthy
nodes = self.rest.node_statuses()
return all(node.status == 'healthy' for node in nodes)
def rebalance_reached(self, percentage=100):
start = time.time()
progress = 0
retry = 0
while progress != -1 and progress <= percentage and retry < 20:
#-1 is error , -100 means could not retrieve progress
progress = self.rest._rebalance_progress()
if progress == -100:
log.error("unable to retrieve rebalanceProgress.try again in"
" 2 seconds")
retry += 1
else:
retry = 0
time.sleep(.1)
if progress < 0:
log.error("rebalance progress code : %s" % (progress))
return False
else:
duration = time.time() - start
log.info('rebalance reached >%s percent in %s seconds ' %
(progress, duration))
return True
def is_cluster_rebalanced(self):
#get the nodes and verify that all the nodes.status are healthy
return self.rest.rebalance_statuses()
#this method will rebalance the cluster by passing the remote_node as
#ejected node
def remove_nodes(self, knownNodes, ejectedNodes):
self.rest.rebalance(knownNodes, ejectedNodes)
return self.rest.monitorRebalance()
def vbucket_map_ready(self, bucket, timeout_in_seconds=360):
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
vBuckets = self.rest.get_vbuckets(bucket)
if vBuckets:
return True
else:
time.sleep(0.5)
msg = 'vbucket map is not ready for bucket %s after waiting %s seconds'
log.info(msg % (bucket, timeout_in_seconds))
return False
def bucket_exists(self, bucket):
try:
buckets = self.rest.get_buckets()
names = [item.name for item in buckets]
log.info("existing buckets : %s" % (names))
for item in buckets:
if item.name == bucket:
log.info("found bucket %s" % (bucket))
return True
return False
except Exception:
return False
def wait_for_node_status(self, node, expected_status, timeout_in_seconds):
status_reached = False
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time and not status_reached:
nodes = self.rest.node_statuses()
for n in nodes:
if node.id == n.id:
log.info('node %s status : %s' % (node.id, n.status))
if n.status.lower() == expected_status.lower():
status_reached = True
break
if not status_reached:
log.info("sleep for 5 seconds before reading the node.status"
" again")
time.sleep(5)
log.info('node %s status_reached : %s' % (node.id, status_reached))
return status_reached
def wait_for_replication(self, timeout_in_seconds=120):
wait_count = 0
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
if self.all_nodes_replicated():
break
wait_count += 1
if wait_count == 10:
log.info('replication state : %s' %
(self.all_nodes_replicated(debug=True)))
wait_count = 0
time.sleep(5)
log.info('replication state : %s' % (self.all_nodes_replicated()))
return self.all_nodes_replicated()
def all_nodes_replicated(self, debug=False):
replicated = True
nodes = self.rest.node_statuses()
for node in nodes:
if debug:
log.info("node %s replication state : %s" %
(node.id, node.replication))
if node.replication != 1.0:
replicated = False
return replicated
class RestConnection(object):
def __init__(self, serverInfo):
#serverInfo can be a json object
if isinstance(serverInfo, dict):
self.ip = serverInfo["ip"]
self.username = serverInfo["username"]
self.password = serverInfo["password"]
self.port = serverInfo["port"]
self.couch_api_base = serverInfo.get("couchApiBase")
else:
self.ip = serverInfo.ip
self.username = serverInfo.rest_username
self.password = serverInfo.rest_password
self.port = serverInfo.port
self.couch_api_base = None
self.baseUrl = "http://%s:%s/" % (self.ip, self.port)
if self.couch_api_base is None:
server_config_uri = "http://%s:%s/pools/default" % (self.ip,
self.port)
config = client.ServerHelper.parse_server_config(server_config_uri,
self.username,
self.password)
#couchApiBase is not in node config before Couchbase Server 2.0
try:
self.couch_api_base = config["nodes"][0].get("couchApiBase")
except TypeError:
self.couch_api_base = "http://%s:8092/" % self.ip
def create_design_doc(self, bucket, design_doc, function):
api = self.couch_api_base + '%s/_design/%s' % (bucket, design_doc)
#check if this view exists and update the rev
headers = self._create_capi_headers()
status, content = self._http_request(api, 'PUT', function,
headers=headers)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to create design doc")
return json_parsed
def view_results(self, bucket, design_doc, view, params, limit=100):
if view:
view_query = '%s/_design/%s/_view/%s' % (bucket, design_doc, view)
else:
view_query = '%s/%s' % (bucket, design_doc)
api = self.couch_api_base + view_query
num_params = 0
if limit != None:
num_params = 1
api += "?limit=%s" % (limit)
for param in params:
if num_params > 0:
api += "&"
else:
api += "?"
num_params += 1
if param in ["key", "start_key", "end_key",
"startkey_docid", "endkey_docid"] or \
params[param] is True or \
params[param] is False:
api += "%s=%s" % (param, json.dumps(params[param]))
else:
api += "%s=%s" % (param, params[param])
headers = self._create_capi_headers()
status, content = self._http_request(api, headers=headers)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to obtain view results for " + api + "\n"
+ repr(status) + "\n" + content)
return json_parsed
def get_design_doc(self, bucket, design_doc):
api = self.couch_api_base + '%s/_design/%s' % (bucket, design_doc)
headers = self._create_capi_headers()
status, content = self._http_request(api, headers=headers)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to get design doc")
return json_parsed
def get_view(self, bucket, design_doc, view):
api = self.couch_api_base + ('%s/_design/%s/_view/%s' %
(bucket, design_doc, view))
headers = self._create_capi_headers()
status, content = self._http_request(api, headers=headers)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to get view")
return json_parsed
def delete_design_doc(self, bucket, design_doc):
api = self.couch_api_base + '%s/_design/%s' % (bucket, design_doc)
design_doc = self.get_design_doc(bucket, design_doc)
rev = design_doc["_rev"]
#pass in the rev
api = api + "?rev=%s" % (rev)
headers = self._create_capi_headers()
status, content = self._http_request(api, 'DELETE', headers=headers)
json_parsed = json.loads(content)
if not status:
raise Exception("unable to delete the design doc")
return json_parsed
def _create_capi_headers(self):
headers = {'Content-Type': 'application/json', 'Accept': '*/*'}
if self.username and self.username != "default":
auth = 'Basic ' + \
string.strip(base64.encodestring(self.username + ':' + (self.password or '')))
headers['Authorization'] = auth
return headers
#authorization must be a base64 string of username:password
def _create_headers(self):
if self.username == "default":
return {'Content-Type': 'application/json', 'Accept': '*/*'}
else:
authorization = base64.encodestring('%s:%s' % (self.username,
self.password))
return {'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
def _http_request(self, api, method='GET', params='', headers=None,
timeout=120):
if not headers:
headers = self._create_headers()
end_time = time.time() + timeout
while True:
try:
response, content = httplib2.Http().request(api, method,
params, headers)
if response['status'] in ['200', '201', '202']:
return True, content
else:
try:
json_parsed = json.loads(content)
except:
json_parsed = {}
reason = "unknown"
status = False
if "error" in json_parsed:
reason = json_parsed["error"]
status = reason
elif "errors" in json_parsed:
errors = [error for _, error in
json_parsed["errors"].iteritems()]
reason = ", ".join(errors)
status = reason
log.error('%s error %s reason: %s %s' %
(api, response['status'], reason, content))
return status, content
except socket.error, e:
log.error(e)
if time.time() > end_time:
raise ServerUnavailableException(ip=self.ip)
except httplib2.ServerNotFoundError, e:
log.error(e)
if time.time() > end_time:
raise ServerUnavailableException(ip=self.ip)
time.sleep(1)
def init_cluster(self, username='Administrator', password='password'):
api = self.baseUrl + 'settings/web'
params = urllib.urlencode({'port': str(self.port),
'username': username,
'password': password})
log.info('settings/web params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
def init_cluster_port(self, username='Administrator', password='password'):
api = self.baseUrl + 'settings/web'
params = urllib.urlencode({'port': '8091',
'username': username,
'password': password})
log.info('settings/web params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
def init_cluster_memoryQuota(self, username='Administrator',
password='password',
memoryQuota=256):
api = self.baseUrl + 'pools/default'
params = urllib.urlencode({'memoryQuota': memoryQuota,
'username': username,
'password': password})
log.info('pools/default params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
#params serverIp : the server to add to this cluster
#raises exceptions when
#unauthorized user
#server unreachable
#can't add the node to itself ( TODO )
#server already added
#returns otpNode
def add_node(self, user='', password='', remoteIp='', port='8091'):
otpNode = None
log.info('adding remote node : %s to this cluster @ : %s'\
% (remoteIp, self.ip))
api = self.baseUrl + 'controller/addNode'
params = urllib.urlencode({'hostname': "%s:%s" % (remoteIp, port),
'user': user,
'password': password})
status, content = self._http_request(api, 'POST', params)
if status:
json_parsed = json.loads(content)
otpNodeId = json_parsed['otpNode']
otpNode = OtpNode(otpNodeId)
if otpNode.ip == '127.0.0.1':
otpNode.ip = self.ip
else:
if content.find('Prepare join failed. Node is already part of'
' cluster') >= 0:
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=remoteIp)
elif content.find('Prepare join failed. Joining node to itself is'
' not allowed') >= 0:
raise ServerJoinException(nodeIp=self.ip,
remoteIp=remoteIp)
else:
log.error('add_node error : %s' % (content))
raise ServerJoinException(nodeIp=self.ip,
remoteIp=remoteIp)
return otpNode
def eject_node(self, user='', password='', otpNode=None):
if not otpNode:
log.error('otpNode parameter required')
return False
api = self.baseUrl + 'controller/ejectNode'
params = urllib.urlencode({'otpNode': otpNode,
'user': user,
'password': password})
status, content = self._http_request(api, 'POST', params)
if status:
log.info('ejectNode successful')
else:
if content.find('Prepare join failed. Node is already part of'
' cluster') >= 0:
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=otpNode)
else:
# todo : raise an exception here
log.error('eject_node error %s' % (content))
return True
def fail_over(self, otpNode=None):
if not otpNode:
log.error('otpNode parameter required')
return False
api = self.baseUrl + 'controller/failOver'
params = urllib.urlencode({'otpNode': otpNode})
status, content = self._http_request(api, 'POST', params)
if status:
log.info('fail_over successful')
else:
log.error('fail_over error : %s' % (content))
if not status:
return False
return status
def rebalance(self, otpNodes, ejectedNodes):
knownNodes = ''
index = 0
for node in otpNodes:
if not index:
knownNodes += node
else:
knownNodes += ',' + node
index += 1
ejectedNodesString = ''
index = 0
for node in ejectedNodes:
if not index:
ejectedNodesString += node
else:
ejectedNodesString += ',' + node
index += 1
params = urllib.urlencode({'knownNodes': knownNodes,
'ejectedNodes': ejectedNodesString,
'user': self.username,
'password': self.password})
log.info('rebalanace params : %s' % (params))
api = self.baseUrl + "controller/rebalance"
status, content = self._http_request(api, 'POST', params)
if status:
log.info('rebalance operation started')
else:
log.error('rebalance operation failed')
#extract the error
raise InvalidArgumentException('controller/rebalance',
parameters=params)
if not status:
return False
return status
def monitorRebalance(self):
start = time.time()
progress = 0
retry = 0
while progress != -1 and progress != 100 and retry < 20:
#-1 is error , -100 means could not retrieve progress
progress = self._rebalance_progress()
if progress == -100:
log.error("unable to retrieve rebalanceProgress.try again in"
"2 seconds")
retry += 1
else:
retry = 0
#sleep for 2 seconds
time.sleep(2)
if progress < 0:
log.error("rebalance progress code : %s" % (progress))
return False
else:
duration = time.time() - start
log.info('rebalance progress took %s seconds ' % (duration))
log.info("sleep for 10 seconds after rebalance...")
time.sleep(10)
return True
def _rebalance_progress(self):
percentage = -1
api = self.baseUrl + "pools/default/rebalanceProgress"
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
if "status" in json_parsed:
if "errorMessage" in json_parsed:
log.error('%s - rebalance failed' % (json_parsed))
elif json_parsed["status"] == "running":
for key in json_parsed:
if key.find('@') >= 0:
ns_1_dictionary = json_parsed[key]
percentage = ns_1_dictionary['progress'] * 100
log.info('rebalance percentage : %s percent' %
(percentage))
break
if percentage == -1:
percentage = 0
else:
percentage = 100
else:
percentage = -100
return percentage
#if status is none , is there an errorMessage
#convoluted logic which figures out if the rebalance failed or suceeded
def rebalance_statuses(self):
rebalanced = None
api = self.baseUrl + 'pools/rebalanceStatuses'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
rebalanced = json_parsed['balanced']
return rebalanced
def log_client_error(self, post):
api = self.baseUrl + 'logClientError'
status, content = self._http_request(api, 'POST', post)
if not status:
log.error('unable to logClientError')
#returns node data for this host
def get_nodes_self(self):
node = None
api = self.baseUrl + 'nodes/self'
status, content = self._http_request(api)
if status:
json_parsed = json.loads(content)
node = RestParser().parse_get_nodes_response(json_parsed)
return node
def node_statuses(self):
nodes = []
api = self.baseUrl + 'nodeStatuses'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
for key in json_parsed:
#each key contain node info
value = json_parsed[key]
#get otp,get status
node = OtpNode(id=value['otpNode'],
status=value['status'])
if node.ip == '127.0.0.1':
node.ip = self.ip
node.port = int(key[key.rfind(":") + 1:])
node.replication = value['replication']
nodes.append(node)
return nodes
def cluster_status(self):
parsed = {}
api = self.baseUrl + 'pools/default'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
parsed = json_parsed
return parsed
def get_pools_info(self):
parsed = {}
api = self.baseUrl + 'pools'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
parsed = json_parsed
return parsed
def get_pools(self):
version = None
api = self.baseUrl + 'pools'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
impl_version = json_parsed['implementationVersion']
comp_version = json_parsed['componentsVersion']
version = CouchbaseServerVersion(impl_version, comp_version)
return version
def get_buckets(self):
#get all the buckets
buckets = []
api = '%s%s' % (self.baseUrl, 'pools/default/buckets/')
status, content = self._http_request(api)
try:
json_parsed = json.loads(content)
except:
log.error("load json exception")
if status:
for item in json_parsed:
bucketInfo = RestParser().parse_get_bucket_json(item)
buckets.append(bucketInfo)
return buckets
def get_bucket_stats_for_node(self, bucket='default', node_ip=None):
if not Node:
log.error('node_ip not specified')
return None
stats = {}
api = "%s%s%s%s%s%s" % (self.baseUrl, 'pools/default/buckets/',
bucket, "/nodes/", node_ip, ":8091/stats")
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
op = json_parsed["op"]
samples = op["samples"]
for stat_name in samples:
stats[stat_name] = samples[stat_name][0]
return stats
def get_nodes(self):
nodes = []
api = self.baseUrl + 'pools/default'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
if "nodes" in json_parsed:
for json_node in json_parsed["nodes"]:
node = RestParser().parse_get_nodes_response(json_node)
node.rest_username = self.username
node.rest_password = self.password
node.port = self.port
if node.ip == "127.0.0.1":
node.ip = self.ip
nodes.append(node)
return nodes
def get_bucket_stats(self, bucket='default'):
stats = {}
api = "".join([self.baseUrl, 'pools/default/buckets/', bucket,
"/stats"])
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
op = json_parsed["op"]
samples = op["samples"]
for stat_name in samples:
if samples[stat_name]:
last_sample = len(samples[stat_name]) - 1
if last_sample:
stats[stat_name] = samples[stat_name][last_sample]
return stats
def get_bucket(self, bucket='default'):
bucketInfo = None
api = '%s%s%s' % (self.baseUrl, 'pools/default/buckets/', bucket)
status, content = self._http_request(api)
if status:
bucketInfo = RestParser().parse_get_bucket_response(content)
# log.debug('set stats to %s' % (bucketInfo.stats.ram))
else:
raise BucketUnavailableException(ip=self.ip, bucket_name=bucket,
error=status)
return bucketInfo
def get_vbuckets(self, bucket='default'):
return self.get_bucket(bucket).vbuckets
def delete_bucket(self, bucket='default'):
api = '%s%s%s' % (self.baseUrl, '/pools/default/buckets/', bucket)
status, content = self._http_request(api, 'DELETE')
if not status:
return False
return status
def is_ns_server_ready(self, timeout_in_seconds=360):
end_time = time.time() + timeout_in_seconds
while time.time() <= end_time:
try:
status = self.get_nodes_self()
if status is not None and status.status == 'healthy':
return True, None
except:
pass
time.sleep(1)
err = "Unable to connect to the node %s even after waiting %d seconds" % \
(self.rest.ip, timeout_in_seconds)
return False, err
# figure out the proxy port
def create_bucket(self, bucket='',
ramQuotaMB=1,
authType='none',
saslPassword='',
replicaNumber=1,
proxyPort=11211,
bucketType='membase'):
status, err = self.is_ns_server_ready()
if not status:
raise BucketCreationException(ip=self.ip, bucket_name=bucket,
error=err)
api = '%s%s' % (self.baseUrl, '/pools/default/buckets')
params = urllib.urlencode({})
#this only works for default bucket ?
if bucket == 'default':
params = urllib.urlencode({'name': bucket,
'authType': 'sasl',
'saslPassword': saslPassword,
'ramQuotaMB': ramQuotaMB,
'replicaNumber': replicaNumber,
'proxyPort': proxyPort,
'bucketType': bucketType})
elif authType == 'none':
params = urllib.urlencode({'name': bucket,
'ramQuotaMB': ramQuotaMB,
'authType': authType,
'replicaNumber': replicaNumber,
'proxyPort': proxyPort,
'bucketType': bucketType})
elif authType == 'sasl':
node = self.get_nodes_self()
if node:
proxyPort = node.moxi
params = urllib.urlencode({'name': bucket,
'ramQuotaMB': ramQuotaMB,
'authType': authType,
'saslPassword': saslPassword,
'replicaNumber': replicaNumber,
'proxyPort': proxyPort,
'bucketType': bucketType})
log.info("%s with param: %s" % (api, params))
status, content = self._http_request(api, 'POST', params)
if not status:
raise BucketCreationException(ip=self.ip, bucket_name=bucket,
error=status)
return status
#return AutoFailoverSettings
def get_autofailover_settings(self):
settings = None
api = self.baseUrl + 'settings/autoFailover'
status, content = self._http_request(api)
json_parsed = json.loads(content)
if status:
settings = AutoFailoverSettings()
settings.enabled = json_parsed["enabled"]
settings.count = json_parsed["count"]
settings.timeout = json_parsed["timeout"]
return settings
def update_autofailover_settings(self, enabled, timeout, max_nodes):
if enabled:
params = urllib.urlencode({'enabled': 'true',
'timeout': timeout,
'maxNodes': max_nodes})
else:
params = urllib.urlencode({'enabled': 'false',
'timeout': timeout,
'maxNodes': max_nodes})
api = self.baseUrl + 'settings/autoFailover'
log.info('settings/autoFailover params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
def reset_autofailover(self):
api = self.baseUrl + 'settings/autoFailover/resetCount'
status, content = self._http_request(api, 'POST', '')
if not status:
return False
return status
def enable_autofailover_alerts(self, recipients, sender, email_username,
email_password, email_host='localhost',
email_port=25, email_encrypt='false',
alerts=('auto_failover_node,'
'auto_failover_maximum_reached')):
api = self.baseUrl + 'settings/alerts'
params = urllib.urlencode({'enabled': 'true',
'recipients': recipients,
'sender': sender,
'emailUser': email_username,
'emailPass': email_password,
'emailHost': email_host,
'emailPrt': email_port,
'emailEncrypt': email_encrypt,
'alerts': alerts})
log.info('settings/alerts params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
def disable_autofailover_alerts(self):
api = self.baseUrl + 'settings/alerts'
params = urllib.urlencode({'enabled': 'false'})
log.info('settings/alerts params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
def stop_rebalance(self):
api = self.baseUrl + '/controller/stopRebalance'
status, content = self._http_request(api, 'POST')
if not status:
return False
return status
def set_data_path(self, data_path):
api = self.baseUrl + '/nodes/self/controller/settings'
params = urllib.urlencode({'path': data_path})
log.info('/nodes/self/controller/settings params : %s' % (params))
status, content = self._http_request(api, 'POST', params)
if not status:
return False
return status
class CouchbaseServerVersion:
def __init__(self, implementationVersion='', componentsVersion=''):
self.implementationVersion = implementationVersion
self.componentsVersion = componentsVersion
#this class will also contain more node related info
class OtpNode(object):
def __init__(self, id='', status=''):
self.id = id
self.ip = ''
self.replication = ''
self.port = 8091
#extract ns ip from the otpNode string
#its normally ns_1@10.20.30.40
if id.find('@') >= 0:
self.ip = id[id.index('@') + 1:]
self.status = status
class NodeInfo(object):
def __init__(self):
self.availableStorage = None # list
self.memoryQuota = None
class NodeDataStorage(object):
def __init__(self):
self.type = '' # hdd or ssd
self.path = ''
self.quotaMb = ''
self.state = '' # ok
def __str__(self):
return '%s' % ({'type': self.type,
'path': self.path,
'quotaMb': self.quotaMb,
'state': self.state})
class NodeDiskStorage(object):
def __init__(self):
self.type = 0
self.path = ''
self.sizeKBytes = 0
self.usagePercent = 0
class Bucket(object):
def __init__(self):
self.name = ''
self.port = 11211
self.type = ''
self.nodes = None
self.stats = None
self.servers = []
self.vbuckets = []
self.forward_map = []
self.numReplicas = 0
self.saslPassword = ""
self.authType = ""
class Node(object):
def __init__(self):
self.uptime = 0
self.memoryTotal = 0
self.memoryFree = 0
self.mcdMemoryReserved = 0
self.mcdMemoryAllocated = 0
self.status = ""
self.hostname = ""
self.clusterCompatibility = ""
self.version = ""
self.os = ""
self.ports = []
self.availableStorage = []
self.storage = []
self.memoryQuota = 0
self.moxi = 11211
self.memcached = 11210
self.id = ""
self.ip = ""
self.rest_username = ""
self.rest_password = ""
class AutoFailoverSettings(object):
def __init__(self):
self.enabled = True
self.timeout = 0
self.count = 0
class NodePort(object):
def __init__(self):
self.proxy = 0
self.direct = 0
class BucketStats(object):
def __init__(self):
self.quotaPercentUsed = 0
self.opsPerSec = 0
self.diskFetches = 0
self.itemCount = 0
self.diskUsed = 0
self.memUsed = 0
self.ram = 0
class vBucket(object):
def __init__(self):
self.master = ''
self.replica = []
self.id = -1
class RestParser(object):
def parse_get_nodes_response(self, parsed):
node = Node()
node.uptime = parsed['uptime']
node.memoryFree = parsed['memoryFree']
node.memoryTotal = parsed['memoryTotal']
node.mcdMemoryAllocated = parsed['mcdMemoryAllocated']
node.mcdMemoryReserved = parsed['mcdMemoryReserved']
node.status = parsed['status']
node.hostname = parsed['hostname']
node.clusterCompatibility = parsed['clusterCompatibility']
node.version = parsed['version']
node.os = parsed['os']
if "otpNode" in parsed:
node.id = parsed["otpNode"]
if parsed["otpNode"].find('@') >= 0:
node.ip = node.id[node.id.index('@') + 1:]
# memoryQuota
if 'memoryQuota' in parsed:
node.memoryQuota = parsed['memoryQuota']
if 'availableStorage' in parsed:
availableStorage = parsed['availableStorage']
for key in availableStorage:
#let's assume there is only one disk in each noce
dict_parsed = parsed['availableStorage']
if 'path' in dict_parsed and 'sizeKBytes' in dict_parsed and\
'usagePercent' in dict_parsed:
diskStorage = NodeDiskStorage()
diskStorage.path = dict_parsed['path']
diskStorage.sizeKBytes = dict_parsed['sizeKBytes']
diskStorage.type = key
diskStorage.usagePercent = dict_parsed['usagePercent']
node.availableStorage.append(diskStorage)
log.info(diskStorage)
if 'storage' in parsed:
storage = parsed['storage']
for key in storage:
disk_storage_list = storage[key]
for dict_parsed in disk_storage_list:
if 'path' in dict_parsed and 'state' in dict_parsed and\
'quotaMb' in dict_parsed:
dataStorage = NodeDataStorage()
dataStorage.path = dict_parsed['path']
dataStorage.quotaMb = dict_parsed['quotaMb']
dataStorage.state = dict_parsed['state']
dataStorage.type = key
node.storage.append(dataStorage)
# ports":{"proxy":11211,"direct":11210}
if "ports" in parsed:
ports = parsed["ports"]
if "proxy" in ports:
node.moxi = ports["proxy"]
if "direct" in ports:
node.memcached = ports["direct"]
return node
def parse_get_bucket_response(self, response):
parsed = json.loads(response)
return self.parse_get_bucket_json(parsed)
def parse_get_bucket_json(self, parsed):
bucket = Bucket()
bucket.name = parsed['name']
bucket.type = parsed['bucketType']
bucket.port = parsed['proxyPort']
bucket.authType = parsed["authType"]
bucket.saslPassword = parsed["saslPassword"]
bucket.nodes = list()
if 'vBucketServerMap' in parsed:
vBucketServerMap = parsed['vBucketServerMap']
serverList = vBucketServerMap['serverList']
bucket.servers.extend(serverList)
if "numReplicas" in vBucketServerMap:
bucket.numReplicas = vBucketServerMap["numReplicas"]
#vBucketMapForward
if 'vBucketMapForward' in vBucketServerMap:
#let's gather the forward map
vBucketMapForward = vBucketServerMap['vBucketMapForward']
for vbucket in vBucketMapForward:
#there will be n number of replicas
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
(vbucketInfo.replica
.append(serverList[vbucket[i]]))
bucket.forward_map.append(vbucketInfo)
vBucketMap = vBucketServerMap['vBucketMap']
counter = 0
for vbucket in vBucketMap:
#there will be n number of replicas
vbucketInfo = vBucket()
vbucketInfo.master = serverList[vbucket[0]]
if vbucket:
for i in range(1, len(vbucket)):
if vbucket[i] != -1:
vbucketInfo.replica.append(serverList[vbucket[i]])
vbucketInfo.id = counter
counter += 1
bucket.vbuckets.append(vbucketInfo)
#now go through each vbucket and populate the info
#who is master , who is replica
# get the 'storageTotals'
log.debug('read %s vbuckets' % (len(bucket.vbuckets)))
stats = parsed['basicStats']
#vBucketServerMap
bucketStats = BucketStats()
log.debug('stats:%s' % (stats))
bucketStats.quotaPercentUsed = stats['quotaPercentUsed']
bucketStats.opsPerSec = stats['opsPerSec']
if 'diskFetches' in stats:
bucketStats.diskFetches = stats['diskFetches']
bucketStats.itemCount = stats['itemCount']
if bucket.type == "membase":
bucketStats.diskUsed = stats['diskUsed']
else:
bucketStats.diskUsed = 0
bucketStats.memUsed = stats['memUsed']
quota = parsed['quota']
bucketStats.ram = quota['ram']
bucket.stats = bucketStats
nodes = parsed['nodes']
for nodeDictionary in nodes:
node = Node()
node.uptime = nodeDictionary['uptime']
node.memoryFree = nodeDictionary['memoryFree']
node.memoryTotal = nodeDictionary['memoryTotal']
node.mcdMemoryAllocated = nodeDictionary['mcdMemoryAllocated']
node.mcdMemoryReserved = nodeDictionary['mcdMemoryReserved']
node.status = nodeDictionary['status']
node.hostname = nodeDictionary['hostname']
cluster_compat = 'clusterCompatibility'
if cluster_compat in nodeDictionary:
node.clusterCompatibility = nodeDictionary[cluster_compat]
node.version = nodeDictionary['version']
node.os = nodeDictionary['os']
if "ports" in nodeDictionary:
ports = nodeDictionary["ports"]
if "proxy" in ports:
node.moxi = ports["proxy"]
if "direct" in ports:
node.memcached = ports["direct"]
if "hostname" in nodeDictionary:
value = str(nodeDictionary["hostname"])
node.ip = value[:value.rfind(":")]
node.port = int(value[value.rfind(":") + 1:])
if "otpNode" in nodeDictionary:
node.id = nodeDictionary["otpNode"]
bucket.nodes.append(node)
return bucket
| |
#!/usr/bin/env python
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
from grr.lib.aff4_objects import cronjobs
class FakeCronJob(flow.GRRFlow):
"""A Cron job which does nothing."""
lifetime = rdfvalue.Duration("1d")
@flow.StateHandler(next_state="End")
def Start(self):
self.CallState(next_state="End")
class FailingFakeCronJob(flow.GRRFlow):
"""A Cron job that only fails."""
@flow.StateHandler(next_state="End")
def Start(self):
raise RuntimeError("Oh, no!")
class OccasionallyFailingFakeCronJob(flow.GRRFlow):
"""A Cron job that only fails."""
@flow.StateHandler(next_state="End")
def Start(self):
if time.time() > 30:
raise RuntimeError("Oh, no!")
class DummySystemCronJob(cronjobs.SystemCronFlow):
"""Dummy system cron job."""
lifetime = rdfvalue.Duration("42h")
frequency = rdfvalue.Duration("42d")
@flow.StateHandler(next_state="End")
def Start(self):
self.CallState(next_state="End")
class DummyStatefulSystemCronJob(cronjobs.StatefulSystemCronFlow):
"""Dummy stateful system cron job."""
VALUES = []
@flow.StateHandler()
def Start(self):
state = self.ReadCronState()
value = state.get("value", default=0)
DummyStatefulSystemCronJob.VALUES.append(value)
state.Register("value", value + 1)
self.WriteCronState(state)
class CronTest(test_lib.GRRBaseTest):
"""Tests for cron functionality."""
def testCronJobPreservesFlowNameAndArguments(self):
"""Testing initialization of a ConfigManager."""
pathspec = rdfvalue.PathSpec(path="/foo",
pathtype=rdfvalue.PathSpec.PathType.TSK)
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(periodicity="1d",
allow_overruns=False)
cron_args.flow_runner_args.flow_name = "GetFile"
cron_args.flow_args.pathspec = pathspec
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
# Check that CronJob definition is saved properly
cron_root = aff4.FACTORY.Open(cron_manager.CRON_JOBS_PATH, token=self.token)
cron_jobs = list(cron_root.ListChildren())
self.assertEqual(len(cron_jobs), 1)
self.assertEqual(cron_jobs[0], cron_job_urn)
cron_job = aff4.FACTORY.Open(cron_jobs[0], token=self.token)
cron_args = cron_job.Get(cron_job.Schema.CRON_ARGS)
self.assertEqual(cron_args.flow_runner_args.flow_name, "GetFile")
self.assertEqual(cron_args.flow_args.pathspec, pathspec)
self.assertEqual(cron_args.periodicity, rdfvalue.Duration("1d"))
self.assertEqual(cron_args.allow_overruns, False)
def testCronJobStartsFlowAndCreatesSymlinkOnRun(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# The job never ran, so DueToRun() should return true.
self.assertTrue(cron_job.DueToRun())
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
# Check that a link to the flow is created under job object.
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Check that the link points to the correct flow.
cron_job_flow = aff4.FACTORY.Open(cron_job_flows[0], token=self.token)
self.assertEqual(cron_job_flow.state.context.args.flow_name, "FakeCronJob")
def testDisabledCronJobDoesNotScheduleFlows(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn1 = cron_manager.ScheduleFlow(cron_args, token=self.token)
cron_job_urn2 = cron_manager.ScheduleFlow(cron_args, token=self.token)
cron_job1 = aff4.FACTORY.Open(cron_job_urn1, aff4_type="CronJob",
mode="rw", token=self.token)
cron_job1.Set(cron_job1.Schema.DISABLED(1))
cron_job1.Close()
cron_manager.RunOnce(token=self.token)
cron_job1 = aff4.FACTORY.Open(cron_job_urn1, aff4_type="CronJob",
token=self.token)
cron_job2 = aff4.FACTORY.Open(cron_job_urn2, aff4_type="CronJob",
token=self.token)
# Disabled flow shouldn't be running, while not-disabled flow should run
# as usual.
self.assertFalse(cron_job1.IsRunning())
self.assertTrue(cron_job2.IsRunning())
def testCronJobRunMonitorsRunningFlowState(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args, token=self.token)
# Run() wasn't called, so nothing is supposed to be running
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
cron_manager.RunOnce(token=self.token)
# Run() was called and flow was started, so the job should be
# considered running.
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
# Find the flow that is currently running for the job and terminate it.
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
cron_job_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
self.assertTrue(cron_job_flow_urn is not None)
flow.GRRFlow.TerminateFlow(cron_job_flow_urn, token=self.token)
# Check we're dead
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# This will understand that current flow has terminated. New flow won't be
# started, because iterations are supposed to be started once per day
# (frequency=1d).
cron_manager.RunOnce(token=self.token)
# Still dead
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
def testCronJobRunDoesNothingIfCurrentFlowIsRunning(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunDoesNothingIfDueTimeHasNotComeYet(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=False, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let 59 minutes pass. Frequency is 1 hour, so new flow is not
# supposed to start.
time.time = lambda: 59 * 60
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunPreventsOverrunsWhenAllowOverrunsIsFalse(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=False, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let an hour pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). However, as allow_overruns is False, and previous
# iteration flow hasn't finished yet, no flow will be started.
time.time = lambda: 60*60 + 1
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
def testCronJobRunAllowsOverrunsWhenAllowOverrunsIsTrue(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1h")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 1)
# Let an hour pass. Frequency is 1h (i.e. cron job iterations are
# supposed to be started every hour), so the new flow should be started
# by RunOnce(). Previous iteration flow hasn't finished yet, but
# allow_overruns is True, so it's ok to start new iteration.
time.time = lambda: 60*60 + 1
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_job_flows = list(cron_job.ListChildren())
self.assertEqual(len(cron_job_flows), 2)
def testCronManagerListJobsDoesNotListDeletedJobs(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token)
cron_jobs = list(cron_manager.ListJobs(token=self.token))
self.assertEqual(len(cron_jobs), 1)
cron_manager.DeleteJob(cron_job_urn, token=self.token)
cron_jobs = list(cron_manager.ListJobs(token=self.token))
self.assertEqual(len(cron_jobs), 0)
def testKillOldFlows(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_args.periodicity = "1w"
cron_args.lifetime = FakeCronJob.lifetime
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertTrue(cron_job.IsRunning())
self.assertFalse(cron_job.KillOldFlows())
prev_timeout_value = stats.STATS.GetMetricValue(
"cron_job_timeout", fields=[cron_job_urn.Basename()])
prev_latency_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
# Fast foward one day
with test_lib.FakeTime(24*60*60 + 1):
flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
self.assertFalse(cron_job.IsRunning())
# Check the termination log
current_flow = aff4.FACTORY.Open(urn=flow_urn,
token=self.token, mode="r")
log = current_flow.Get(current_flow.Schema.LOG)
self.assertTrue("lifetime exceeded" in str(log))
# Check that timeout counter got updated.
current_timeout_value = stats.STATS.GetMetricValue(
"cron_job_timeout", fields=[cron_job_urn.Basename()])
self.assertEqual(current_timeout_value - prev_timeout_value, 1)
# Check that latency stat got updated.
current_latency_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
self.assertEqual(current_latency_value.count - prev_latency_value.count,
1)
self.assertEqual(current_latency_value.sum - prev_latency_value.sum,
24*60*60 + 1)
def testFailedFlowUpdatesStats(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(allow_overruns=False,
periodicity="1d")
cron_args.flow_runner_args.flow_name = "FailingFakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
prev_metric_value = stats.STATS.GetMetricValue(
"cron_job_failure", fields=[cron_job_urn.Basename()])
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has failed
cron_manager.RunOnce(token=self.token)
# Check that stats got updated
current_metric_value = stats.STATS.GetMetricValue(
"cron_job_failure", fields=[cron_job_urn.Basename()])
self.assertEqual(current_metric_value - prev_metric_value, 1)
def testLatencyStatsAreCorrectlyRecorded(self):
with test_lib.FakeTime(0):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_args.periodicity = "1w"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
cron_manager.RunOnce(token=self.token)
prev_metric_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
# Fast foward one minute
with test_lib.FakeTime(60):
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has finished
cron_manager.RunOnce(token=self.token)
# Check that stats got updated
current_metric_value = stats.STATS.GetMetricValue(
"cron_job_latency", fields=[cron_job_urn.Basename()])
self.assertEqual(current_metric_value.count - prev_metric_value.count, 1)
self.assertEqual(current_metric_value.sum - prev_metric_value.sum, 60)
def testSchedulingJobWithFixedNamePreservesTheName(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
self.assertEqual("TheJob", cron_job_urn.Basename())
def testReschedulingJobWithFixedNameDoesNotCreateNewObjectVersion(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs(
allow_overruns=True, periodicity="1d")
cron_args.flow_runner_args.flow_name = "FakeCronJob"
# Schedule cron job with a fixed name. Check that we have 1 version
# of "TYPE" attribute.
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
attr_values = list(cron_job.GetValuesForAttribute(cron_job.Schema.TYPE))
self.assertTrue(len(attr_values) == 1)
# Reschedule the job. Check that we still have only one "TYPE" version.
cron_job_urn = cron_manager.ScheduleFlow(
cron_args=cron_args, token=self.token, job_name="TheJob")
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
attr_values = list(cron_job.GetValuesForAttribute(cron_job.Schema.TYPE))
self.assertTrue(len(attr_values) == 1)
def testLastRunStatusGetsUpdatedOnEveryRun(self):
cron_manager = cronjobs.CronManager()
cron_args = rdfvalue.CreateCronJobFlowArgs()
cron_args.flow_runner_args.flow_name = "OccasionallyFailingFakeCronJob"
cron_args.periodicity = "30s"
cron_job_urn = cron_manager.ScheduleFlow(cron_args=cron_args,
token=self.token)
for fake_time in [0, 60]:
with test_lib.FakeTime(fake_time):
# This call should start a new cron job flow
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, aff4_type="CronJob",
token=self.token)
cron_flow_urn = cron_job.Get(cron_job.Schema.CURRENT_FLOW_URN)
for _ in test_lib.TestFlowHelper(cron_flow_urn,
check_flow_errors=False,
token=self.token):
pass
# This RunOnce call should determine that the flow has finished
cron_manager.RunOnce(token=self.token)
cron_job = aff4.FACTORY.Open(cron_job_urn, age=aff4.ALL_TIMES,
token=self.token)
statuses = list(cron_job.GetValuesForAttribute(
cron_job.Schema.LAST_RUN_STATUS))
statuses = sorted(statuses, key=lambda x: x.age)
self.assertEqual(len(statuses), 2)
self.assertEqual(statuses[0].age,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(0))
self.assertEqual(statuses[1].age,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(60))
self.assertEqual(statuses[0].status, rdfvalue.CronJobRunStatus.Status.OK)
self.assertEqual(statuses[1].status, rdfvalue.CronJobRunStatus.Status.ERROR)
def testSystemCronFlowsGetScheduledAutomatically(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["DummySystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
jobs = cronjobs.CRON_MANAGER.ListJobs(token=self.token)
dummy_jobs = [j for j in jobs
if j.Basename() == "DummySystemCronJob"]
self.assertTrue(dummy_jobs)
# System cron job should be enabled by default.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertFalse(job.Get(job.Schema.DISABLED))
def testSystemCronFlowsMayBeDisabledViaConfig(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["DummySystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
jobs = cronjobs.CRON_MANAGER.ListJobs(token=self.token)
dummy_jobs = [j for j in jobs
if j.Basename() == "DummySystemCronJob"]
self.assertTrue(dummy_jobs)
# System cron job should be enabled.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertFalse(job.Get(job.Schema.DISABLED))
# Now remove the cron job from the list and check that it gets disabled
# after next ScheduleSystemCronFlows() call.
config_lib.CONFIG.Set("Cron.enabled_system_jobs", [])
cronjobs.ScheduleSystemCronFlows(token=self.token)
# This cron job should be disabled, because it's listed in
# Cron.disabled_system_jobs config variable.
job = aff4.FACTORY.Open(dummy_jobs[0], aff4_type="CronJob",
token=self.token)
self.assertTrue(job.Get(job.Schema.DISABLED))
def testScheduleSystemCronFlowsRaisesWhenFlowCanNotBeFound(self):
config_lib.CONFIG.Set("Cron.enabled_system_jobs", ["NonExistent"])
self.assertRaises(KeyError, cronjobs.ScheduleSystemCronFlows,
token=self.token)
def testStatefulSystemCronFlowRaisesWhenRunningWithoutCronJob(self):
self.assertRaises(cronjobs.StateReadError, flow.GRRFlow.StartFlow,
flow_name="DummyStatefulSystemCronJob",
token=self.token)
def testStatefulSystemCronFlowMaintainsState(self):
DummyStatefulSystemCronJob.VALUES = []
config_lib.CONFIG.Set("Cron.enabled_system_jobs",
["DummyStatefulSystemCronJob"])
cronjobs.ScheduleSystemCronFlows(token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
flow.GRRFlow.StartFlow(flow_name="DummyStatefulSystemCronJob",
token=self.token)
self.assertListEqual(DummyStatefulSystemCronJob.VALUES, [0, 1, 2])
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'views/new_project.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_create_new_project(object):
def setupUi(self, create_new_project):
create_new_project.setObjectName("create_new_project")
create_new_project.setWindowModality(QtCore.Qt.ApplicationModal)
create_new_project.setMinimumSize(QtCore.QSize(550, 500))
create_new_project.setAutoFillBackground(False)
create_new_project.setModal(True)
create_new_project.setWizardStyle(QtWidgets.QWizard.ClassicStyle)
create_new_project.setOptions(QtWidgets.QWizard.NoBackButtonOnStartPage)
self.newp_p1 = QtWidgets.QWizardPage()
self.newp_p1.setObjectName("newp_p1")
self.verticalLayout = QtWidgets.QVBoxLayout(self.newp_p1)
self.verticalLayout.setObjectName("verticalLayout")
self.newp_p1_title = QtWidgets.QLabel(self.newp_p1)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.newp_p1_title.setFont(font)
self.newp_p1_title.setObjectName("newp_p1_title")
self.verticalLayout.addWidget(self.newp_p1_title)
self.newp_p1_layout = QtWidgets.QFormLayout()
self.newp_p1_layout.setContentsMargins(-1, 8, -1, -1)
self.newp_p1_layout.setObjectName("newp_p1_layout")
self.newp_projectname_label = QtWidgets.QLabel(self.newp_p1)
self.newp_projectname_label.setObjectName("newp_projectname_label")
self.newp_p1_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.newp_projectname_label)
self.newp_projectname_input = QtWidgets.QLineEdit(self.newp_p1)
self.newp_projectname_input.setObjectName("newp_projectname_input")
self.newp_p1_layout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.newp_projectname_input)
self.verticalLayout.addLayout(self.newp_p1_layout)
self.newp_ldir_label = QtWidgets.QLabel(self.newp_p1)
font = QtGui.QFont()
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.newp_ldir_label.setFont(font)
self.newp_ldir_label.setText("")
self.newp_ldir_label.setObjectName("newp_ldir_label")
self.verticalLayout.addWidget(self.newp_ldir_label)
create_new_project.addPage(self.newp_p1)
self.newp_p1_5 = QtWidgets.QWizardPage()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newp_p1_5.sizePolicy().hasHeightForWidth())
self.newp_p1_5.setSizePolicy(sizePolicy)
self.newp_p1_5.setObjectName("newp_p1_5")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.newp_p1_5)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.newp_add_server_title = QtWidgets.QLabel(self.newp_p1_5)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.newp_add_server_title.setFont(font)
self.newp_add_server_title.setObjectName("newp_add_server_title")
self.verticalLayout_2.addWidget(self.newp_add_server_title)
self.newp_p2_add_server_description = QtWidgets.QLabel(self.newp_p1_5)
self.newp_p2_add_server_description.setWordWrap(True)
self.newp_p2_add_server_description.setObjectName("newp_p2_add_server_description")
self.verticalLayout_2.addWidget(self.newp_p2_add_server_description)
self.newp_p2_video_layout = QtWidgets.QFormLayout()
self.newp_p2_video_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.newp_p2_video_layout.setContentsMargins(-1, 8, -1, -1)
self.newp_p2_video_layout.setObjectName("newp_p2_video_layout")
self.newp_video_server_label = QtWidgets.QLabel(self.newp_p1_5)
self.newp_video_server_label.setObjectName("newp_video_server_label")
self.newp_p2_video_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.newp_video_server_label)
self.newp_video_server_input = QtWidgets.QLineEdit(self.newp_p1_5)
self.newp_video_server_input.setMinimumSize(QtCore.QSize(0, 21))
self.newp_video_server_input.setObjectName("newp_video_server_input")
self.newp_p2_video_layout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.newp_video_server_input)
self.newp_video_email_label = QtWidgets.QLabel(self.newp_p1_5)
self.newp_video_email_label.setMinimumSize(QtCore.QSize(0, 21))
self.newp_video_email_label.setObjectName("newp_video_email_label")
self.newp_p2_video_layout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.newp_video_email_label)
self.newp_video_email_input = QtWidgets.QLineEdit(self.newp_p1_5)
self.newp_video_email_input.setMinimumSize(QtCore.QSize(0, 21))
self.newp_video_email_input.setObjectName("newp_video_email_input")
self.newp_p2_video_layout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.newp_video_email_input)
self.verticalLayout_2.addLayout(self.newp_p2_video_layout)
create_new_project.addPage(self.newp_p1_5)
self.newp_p2 = QtWidgets.QWizardPage()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newp_p2.sizePolicy().hasHeightForWidth())
self.newp_p2.setSizePolicy(sizePolicy)
self.newp_p2.setObjectName("newp_p2")
self.verticalLayout_21 = QtWidgets.QVBoxLayout(self.newp_p2)
self.verticalLayout_21.setObjectName("verticalLayout_21")
self.newp_p2_add_video_title = QtWidgets.QLabel(self.newp_p2)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.newp_p2_add_video_title.setFont(font)
self.newp_p2_add_video_title.setObjectName("newp_p2_add_video_title")
self.verticalLayout_21.addWidget(self.newp_p2_add_video_title)
self.newp_p2_add_vido_description = QtWidgets.QLabel(self.newp_p2)
self.newp_p2_add_vido_description.setWordWrap(True)
self.newp_p2_add_vido_description.setObjectName("newp_p2_add_vido_description")
self.verticalLayout_21.addWidget(self.newp_p2_add_vido_description)
self.newp_p2_video_layout1 = QtWidgets.QFormLayout()
self.newp_p2_video_layout1.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.newp_p2_video_layout1.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.newp_p2_video_layout1.setObjectName("newp_p2_video_layout1")
self.newp_video_label = QtWidgets.QLabel(self.newp_p2)
self.newp_video_label.setMinimumSize(QtCore.QSize(0, 28))
self.newp_video_label.setObjectName("newp_video_label")
self.newp_p2_video_layout1.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.newp_video_label)
self.newp_p2_video_browse_layout = QtWidgets.QHBoxLayout()
self.newp_p2_video_browse_layout.setObjectName("newp_p2_video_browse_layout")
self.newp_video_input = QtWidgets.QLineEdit(self.newp_p2)
self.newp_video_input.setMinimumSize(QtCore.QSize(0, 21))
self.newp_video_input.setObjectName("newp_video_input")
self.newp_p2_video_browse_layout.addWidget(self.newp_video_input)
self.newp_video_browse = QtWidgets.QPushButton(self.newp_p2)
self.newp_video_browse.setMinimumSize(QtCore.QSize(0, 32))
self.newp_video_browse.setObjectName("newp_video_browse")
self.newp_p2_video_browse_layout.addWidget(self.newp_video_browse)
self.newp_p2_video_layout1.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.newp_p2_video_browse_layout)
self.newp_video_start_time_label = QtWidgets.QLabel(self.newp_p2)
self.newp_video_start_time_label.setMinimumSize(QtCore.QSize(0, 24))
self.newp_video_start_time_label.setObjectName("newp_video_start_time_label")
self.newp_p2_video_layout1.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.newp_video_start_time_label)
self.newp_video_start_time_input = QtWidgets.QDateTimeEdit(self.newp_p2)
self.newp_video_start_time_input.setMinimumSize(QtCore.QSize(0, 24))
self.newp_video_start_time_input.setObjectName("newp_video_start_time_input")
self.newp_p2_video_layout1.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.newp_video_start_time_input)
self.verticalLayout_21.addLayout(self.newp_p2_video_layout1)
create_new_project.addPage(self.newp_p2)
self.newp_p2_5 = QtWidgets.QWizardPage()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.newp_p2_5.sizePolicy().hasHeightForWidth())
self.newp_p2_5.setSizePolicy(sizePolicy)
self.newp_p2_5.setObjectName("newp_p2_5")
self.verticalLayout_22 = QtWidgets.QVBoxLayout(self.newp_p2_5)
self.verticalLayout_22.setObjectName("verticalLayout_22")
self.newp_add_aerial_image_title = QtWidgets.QLabel(self.newp_p2_5)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.newp_add_aerial_image_title.setFont(font)
self.newp_add_aerial_image_title.setObjectName("newp_add_aerial_image_title")
self.verticalLayout_22.addWidget(self.newp_add_aerial_image_title)
self.newp_p2_add_aerial_image_description = QtWidgets.QLabel(self.newp_p2_5)
self.newp_p2_add_aerial_image_description.setWordWrap(True)
self.newp_p2_add_aerial_image_description.setObjectName("newp_p2_add_aerial_image_description")
self.verticalLayout_22.addWidget(self.newp_p2_add_aerial_image_description)
self.newp_p2_aerial_layout = QtWidgets.QFormLayout()
self.newp_p2_aerial_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.AllNonFixedFieldsGrow)
self.newp_p2_aerial_layout.setContentsMargins(-1, 8, -1, -1)
self.newp_p2_aerial_layout.setObjectName("newp_p2_aerial_layout")
self.newp_aerial_image_label = QtWidgets.QLabel(self.newp_p2_5)
self.newp_aerial_image_label.setObjectName("newp_aerial_image_label")
self.newp_p2_aerial_layout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.newp_aerial_image_label)
self.newp_p2_image_browse_layout = QtWidgets.QHBoxLayout()
self.newp_p2_image_browse_layout.setObjectName("newp_p2_image_browse_layout")
self.newp_aerial_image_input = QtWidgets.QLineEdit(self.newp_p2_5)
self.newp_aerial_image_input.setObjectName("newp_aerial_image_input")
self.newp_p2_image_browse_layout.addWidget(self.newp_aerial_image_input)
self.newp_aerial_image_browse = QtWidgets.QPushButton(self.newp_p2_5)
self.newp_aerial_image_browse.setObjectName("newp_aerial_image_browse")
self.newp_p2_image_browse_layout.addWidget(self.newp_aerial_image_browse)
self.newp_p2_aerial_layout.setLayout(0, QtWidgets.QFormLayout.FieldRole, self.newp_p2_image_browse_layout)
self.verticalLayout_22.addLayout(self.newp_p2_aerial_layout)
create_new_project.addPage(self.newp_p2_5)
self.newp_p3 = QtWidgets.QWizardPage()
self.newp_p3.setObjectName("newp_p3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.newp_p3)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.newp_p2_add_video_title_2 = QtWidgets.QLabel(self.newp_p3)
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.newp_p2_add_video_title_2.setFont(font)
self.newp_p2_add_video_title_2.setObjectName("newp_p2_add_video_title_2")
self.verticalLayout_3.addWidget(self.newp_p2_add_video_title_2)
self.line = QtWidgets.QFrame(self.newp_p3)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
self.newp_p3_add_server_description = QtWidgets.QLabel(self.newp_p3)
self.newp_p3_add_server_description.setWordWrap(True)
self.newp_p3_add_server_description.setObjectName("newp_p3_add_server_description")
self.verticalLayout_3.addWidget(self.newp_p3_add_server_description)
self.newp_creation_progress = QtWidgets.QProgressBar(self.newp_p3)
self.newp_creation_progress.setProperty("value", 0)
self.newp_creation_progress.setVisible(False)
self.newp_creation_progress.setObjectName("newp_creation_progress")
self.verticalLayout_3.addWidget(self.newp_creation_progress)
self.newp_start_creation = QtWidgets.QPushButton(self.newp_p3)
self.newp_start_creation.setObjectName("newp_start_creation")
self.verticalLayout_3.addWidget(self.newp_start_creation)
self.newp_creation_status = QtWidgets.QLabel(self.newp_p3)
self.newp_creation_status.setVisible(False)
self.newp_creation_status.setObjectName("newp_creation_status")
self.verticalLayout_3.addWidget(self.newp_creation_status)
create_new_project.addPage(self.newp_p3)
self.retranslateUi(create_new_project)
QtCore.QMetaObject.connectSlotsByName(create_new_project)
def retranslateUi(self, create_new_project):
_translate = QtCore.QCoreApplication.translate
create_new_project.setWindowTitle(_translate("create_new_project", "Create New Project"))
self.newp_p1_title.setText(_translate("create_new_project", "New Safety Project"))
self.newp_projectname_label.setText(_translate("create_new_project", "Project Name"))
self.newp_add_server_title.setText(_translate("create_new_project", "Add server address"))
self.newp_p2_add_server_description.setText(_translate("create_new_project", "Browse and select a server address to run processing.\n"
"(Optional) Add an email address for status messages."))
self.newp_video_server_label.setText(_translate("create_new_project", "Server IP or URL"))
self.newp_video_email_label.setText(_translate("create_new_project", "Email"))
self.newp_p2_add_video_title.setText(_translate("create_new_project", "Add project video"))
self.newp_p2_add_vido_description.setText(_translate("create_new_project", "Browse and select a video file to analyze.\n"
"Input the time when the video recording occurred."))
self.newp_video_label.setText(_translate("create_new_project", "Selected video"))
self.newp_video_browse.setText(_translate("create_new_project", "Browse..."))
self.newp_video_start_time_label.setText(_translate("create_new_project", "Recording start time"))
self.newp_add_aerial_image_title.setText(_translate("create_new_project", "Add aerial image"))
self.newp_p2_add_aerial_image_description.setText(_translate("create_new_project", "Browse and select an aerial image of the video\'s target. "))
self.newp_aerial_image_label.setText(_translate("create_new_project", "Aerial image"))
self.newp_aerial_image_browse.setText(_translate("create_new_project", "Browse..."))
self.newp_p2_add_video_title_2.setText(_translate("create_new_project", "Confirm project settings"))
self.newp_p3_add_server_description.setText(_translate("create_new_project", "If complete, create your project. Otherwise, go back to make any necessary changes."))
self.newp_start_creation.setText(_translate("create_new_project", "Click to send project to server"))
self.newp_creation_status.setText(_translate("create_new_project", "Beginning project creation..."))
| |
import sys,os,re,time,cPickle
import numpy as np
from networkx import bidirectional_dijkstra,shortest_path_length
import networkx as nx
from scipy.cluster.vq import kmeans2
import scipy.stats as stats
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist,cdist,squareform
#from SpectralMix import SilValueGenerator
#from mpl_toolkits.mplot3d import Axes3D
EPS = np.finfo(float).eps
## the base cluster class for other spectral clustering methods
class ClusterBase:
## constructor
# this class takes as input a raw matrix consisting of observations and features
# the observations occupy the rows and the features the rows
# the class also takes as input a similarity matrix or a networkx graph
# @param mat is a raw matrix (numpp.array((n,d))) or a networkx graph
# @param k is the number of components in the mixture
# @param dataHeader is a list or numpy.array() of length n consisting of labels for the data
# @param labels are an optional vector corresponding to dataHeader that is used for evalutaion purposes
# @param dtype is the data type that may be 'raw', 'similarity' or 'graph'
# @param weighted defines whether the input graph is of type weighted or not (True or False)
# @param verbose generally used for debugging mode
# @param refine used to specify the method for noise refinement 'kmeans'
# @param classify step used to carry out the clustering of the normalized stacked and ranked eigenvectors
# Note on distance matrics:
# \li chebyshev - the Chebyshev distance.
# \li cityblock - the Manhattan distance.
# \li correlation - the Correlation distance.
# \li cosine - the Cosine distance.
# \li euclidean - the Euclidean distance.
# \li hamming - the Hamming distance (boolean).
# \li mahalanobis - the Mahalanobis distance.
# \li minkowski - the Minkowski distance.
# \li seuclidean - the normalized Euclidean distance.
# \li sqeuclidean - the squared Euclidean distance.
def __init__(self,mat,k=None,dataHeader=None,labels=None,dtype='raw',weighted=False,verbose=False,classifyStep='kmeans',dmatPath=None,projID='generic'):
## error check input
if dtype not in ['raw','graph','distance']:
raise ValueError, "matrix input type not valid", dtype
## class-wide variables
self.k = k
self.dtype = dtype
self.weighted = weighted
self.verbose = verbose
self.noiseValue = 999
self.projID = projID
self.dmatPath = dmatPath
self.unusedGenes = None
self.unusedIndices = None
usedIndices = None
if dtype == 'graph':
self.G = mat
self.n = len(self.G.nodes())
else:
self.mat = mat
self.n ,self.d = np.shape(mat)
## handle header and labels
if dataHeader != None:
self.dataHeader = [dat for dat in dataHeader]
self.origDataHeader = [odat for odat in dataHeader]
else:
self.dataHeader = None
self.origDataHeader = None
if labels != None:
self.origLabels = np.array([float(l) for l in labels])
self.labels = np.array([float(l) for l in labels])
else:
self.labels = None
self.origLabels = None
#################
### methods ###
#################
def graph_to_distance_mat(self,G,dataHeader,weighted=False,reweighting=True,verbose=False):
nodeList = dataHeader
n = len(nodeList)
dMat = np.zeros((n,n))
if verbose == True:
print "\tINFO: making graph from distance matrix... reweighting is %s"%reweighting
### get all pairwise shortest paths and add distance to matrix
total = (n * (n-1)) / 2.0
count = 0
for i in range(n):
nodeI = nodeList[i]
for j in range(n):
nodeJ = nodeList[j]
if j >= i:
continue
if reweighting == True:
if weighted == True:
bdResults = bidirectional_dijkstra(G,nodeI,nodeJ)
if bdResults == False:
distance = 1e08
else:
distance, dijkPath = bdResults
else:
distance = shortest_path_length(G,nodeI,nodeJ)
dMat[i,j] = distance
dMat[j,i] = distance
else:
if G.has_edge(nodeI,nodeJ) == True or G.has_edge(nodeJ,nodeI) == True:
weight = G[nodeI][nodeJ]['weight']
dMat[i,j] = weight
dMat[j,i] = weight
count+=1
#if verbose == True:
# if count%100.0 == 0.0:
# print "\t\tpercent complete",round(float(count) / float(total) * 100.0,2), '%'
#print "\t\tpercent complete 100", '%'
return dMat
# mat is a matrix of type numpy.array(n,d) where n are the observations and d are features
def raw_to_distance_mat(self,mat):
values = pdist(mat,'sqeuclidean') # sqeuclidean, euclidean
dMat = squareform(values)
return dMat
# dMmat is a symmetric positive distance matrix of type numpy.array(n,n) where n are the observations
# sigma is the bandwidth parameter that controls how quickly the affinity drops off
# the 1.0 or -1.0 in the numerator is used to control the direction of the drop.
def distance_to_affinity_mat(self,dMat,sigma,reshape=True):
if dMat == None:
print "ERROR: distance matrix is None cannot find affinity"
return None
aMat = np.exp(-1.0 * (dMat**2.0) / 2.0 * (sigma**2.0))
if reshape == True:
aMat = self._reshape_affinity_matrix_to_original_header(aMat)
return aMat
# aram sigma is the bandwidth parameter that controls how quickly the affinity drops off
def get_affinity_matrix(self,sigma,reshape=True,reweighting=True,verbose=False):
self._error_check_input_data()
dmatPickle = 'NotAFile'
if self.dtype == 'raw':
self.dMat = self.raw_to_distance_mat(self.mat)
elif self.dtype == 'graph':
print 'dtype is ', self.dtype
if self.dmatPath != None and os.path.isfile(self.dmatPath) == False:
if verbose == True:
print '\t...............creating new dMat to be pickled...'
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=reweighting,verbose=verbose)
cPickle.dump(self.dMat,open(self.dmatPath,'w'))
elif self.dmatPath != None and os.path.isfile(self.dmatPath) == True:
if verbose== True:
print '\t...............using pickled dmat'
self.dMat = cPickle.load(open(self.dmatPath,'r'))
else:
self.dMat = self.graph_to_distance_mat(self.G,self.dataHeader,weighted=self.weighted,reweighting=reweighting,verbose=verbose)
elif self.dtype == 'distance':
self.dMat = self.mat
if self.dMat == None:
print "ERROR: did not find dMat"
return None
aMat = self.distance_to_affinity_mat(self.dMat,sigma,reshape=reshape)
if aMat == None:
print "ERROR: could not find aMat"
return None
return aMat
def affinity_to_diagonal_mat(self,aMat):
diaMat = np.diag(aMat.sum(axis=1)**-0.5)
return diaMat
def affinity_to_nx(self,aMat,header):
G = nx.Graph()
distances = []
n,m = np.shape(aMat)
if n != m or n != np.size(header):
print "INPUT ERROR: for affinity to nx - sizes must be the same"
return None
for i in range(n):
nodeI = header[i]
for j in range(n):
nodeJ = header[j]
if j >= i:
continue
G.add_edge(nodeI, nodeJ, weight=aMat[i,j])
distances.append(aMat[i,j])
return G, distances
def get_silhouette_values(self,rawMat,dMat=None,labels=None):
if labels == None:
centroids, labels = kmeans2(rawMat,self.k,iter=25,minit='points')
svg= SilValueGenerator(rawMat,labels)
return svg.silValues
def _generate_heatmap(self,mat):
cMap = self.plt.cm.spectral # jet, hot, gist_stern
self.plt.imshow(mat,aspect='auto',interpolation='nearest',cmap=cMap)
#self.plt.colorbar()
def _plot_scatter_data(self,mat,color='blue',labels=None,buffer=0.2,use3D=False):
colors = ['blue','orange','red','green','yellow','magenta','cyan','black']
## error checking
if type(labels) == type([]):
labels = np.array(labels)
if use3D == False:
if labels == None:
print 'labels are none'
self.plt.plot([mat[:,0]],[mat[:,1]], marker='o',color=color,markersize=8.0)
else:
numLabels = len(list(set(labels)))
for l in labels:
x = mat[:,0][np.where(labels==l)]
y = mat[:,1][np.where(labels==l)]
if l == self.noiseValue:
self.plt.plot([x],[y],marker='o',markersize=10.0,color='gray')
else:
self.plt.plot([x],[y],marker='o',markersize=10.0,color=colors[l])
self.plt.xlim([mat[:,0].min()-buffer,mat[:,0].max()+buffer])
self.plt.ylim([mat[:,1].min()-buffer,mat[:,1].max()+buffer])
def calculate_distortion_measure(self,clustResults):
clusteredData = {}
totalJ = 0
errorCk = 0
for k in range(self.k):
clusteredData[k] = clustResults['yMat'][np.where(clustResults['labels']==k)[0],:]
for k in range(self.k):
sumOfSquares = (clusteredData[k] - clusteredData[k].mean(axis=0))**2.0
totalJ = totalJ + sumOfSquares.sum()
errorCk = errorCk + len(sumOfSquares)
if errorCk != len(clustResults['labels']):
print "ERROR: Did not pass error check in distortion measure calc"
return totalJ
def _error_check_input_data(self):
## check gene list for genes not in G
newLabels = []
self.unusedGenes = []
if self.dtype == 'graph':
if type(self.dataHeader)==type([]):
self.dataHeader = np.array(self.dataHeader)
for g1 in range(len(self.dataHeader)):
gene = self.dataHeader[g1]
geneIndex = np.where(np.array(self.G.nodes())==gene)
if len(geneIndex[0]) == 0:
self.unusedGenes.append(gene)
## save original labels and orig data header
self.unusedGenes = np.array(self.unusedGenes)
if self.labels != None:
self.origLabels = self.labels.copy()
self.origDataHeader = self.dataHeader.copy()
self.unusedIndices = np.array([np.where(self.origDataHeader==gene)[0][0] for gene in self.unusedGenes])
usedIndices = []
for ind in range(len(self.origDataHeader)): #origLabels
if self.unusedIndices.__contains__(ind) == False:
usedIndices.append(ind)
self.usedIndices = np.array(usedIndices)
self.dataHeader = self.origDataHeader[self.usedIndices]
if self.labels != None:
self.labels = self.origLabels[self.usedIndices]
## error check for genes in G that are not in header
for g2 in range(len(self.G.nodes())):
node = self.G.nodes()[g2]
nodeIndex = np.where(self.dataHeader==node)
if len(nodeIndex[0]) == 0:
print "WARNING: a gene was found in the graph that was not listed in the data header", node
continue
self.n = len(self.dataHeader)
if self.verbose == True:
print "\tINFO: out of %s genes possible genes only %s appear in the graph"%(len(self.origDataHeader),len(self.dataHeader))
## error checking input
if self.dtype not in ['raw','distance','affinity','graph']:
raise ValueError, "matrix input type not valid"
if self.labels != None:
if len(self.labels) != self.n:
raise ValueError, "labels length not matching number observations"
def _reshape_affinity_matrix_to_original_header(self,aMat):
origLength = len(self.origDataHeader)
newAMat = np.zeros((origLength,origLength),)
newAMat = newAMat + EPS
for i in range(origLength):
obj = self.origDataHeader[i]
if i in self.usedIndices:
newRow = np.zeros((origLength),) + EPS
aMatInd = np.where(self.dataHeader==obj)[0][0]
newRow[self.usedIndices] = aMat[aMatInd,:]
newAMat[i,:] = newRow
return newAMat
| |
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
import struct
import math
from time import sleep
from pynq import MMIO
from pynq.iop import request_iop
from pynq.iop import iop_const
from pynq.iop import PMODA
from pynq.iop import PMODB
from pynq.iop import ARDUINO
from pynq.iop import PMOD_GROVE_G3
from pynq.iop import PMOD_GROVE_G4
from pynq.iop import ARDUINO_GROVE_I2C
PMOD_GROVE_IMU_PROGRAM = "pmod_grove_imu.bin"
ARDUINO_GROVE_IMU_PROGRAM = "arduino_grove_imu.bin"
class Grove_IMU(object):
"""This class controls the Grove IIC IMU.
Grove IMU 10DOF is a combination of grove IMU 9DOF (MPU9250) and grove
barometer sensor (BMP180). MPU-9250 is a 9-axis motion tracking device
that combines a 3-axis gyroscope, 3-axis accelerometer, 3-axis
magnetometer and a Digital Motion Processor (DMP). BMP180 is a high
precision, low power digital pressure sensor. Hardware version: v1.1.
Attributes
----------
iop : _IOP
I/O processor instance used by Grove_IMU.
mmio : MMIO
Memory-mapped I/O instance to read and write instructions and data.
"""
def __init__(self, if_id, gr_pin):
"""Return a new instance of an Grove IMU object.
Parameters
----------
if_id : int
IOP ID (1, 2, 3) corresponding to (PMODA, PMODB, ARDUINO).
gr_pin: list
A group of pins on stickit connector or arduino shield.
"""
if if_id in [PMODA, PMODB]:
if not gr_pin in [PMOD_GROVE_G3,
PMOD_GROVE_G4]:
raise ValueError("IMU group number can only be G3 - G4.")
GROVE_IMU_PROGRAM = PMOD_GROVE_IMU_PROGRAM
elif if_id in [ARDUINO]:
if not gr_pin in [ARDUINO_GROVE_I2C]:
raise ValueError("IMU group number can only be I2C.")
GROVE_IMU_PROGRAM = ARDUINO_GROVE_IMU_PROGRAM
else:
raise ValueError("No such IOP for grove device.")
self.iop = request_iop(if_id, GROVE_IMU_PROGRAM)
self.mmio = self.iop.mmio
self.iop.start()
if if_id in [PMODA, PMODB]:
# Write SCL and SDA pin config
self.mmio.write(iop_const.MAILBOX_OFFSET, gr_pin[0])
self.mmio.write(iop_const.MAILBOX_OFFSET+4, gr_pin[1])
# Write configuration and wait for ACK
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 1)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 1):
pass
self.reset()
def reset(self):
"""Reset all the sensors on the grove IMU.
Returns
-------
None
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0xF)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0xF):
pass
def get_accl(self):
"""Get the data from the accelerometer.
Returns
-------
list
A list of the acceleration data along X-axis, Y-axis, and Z-axis.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0x3)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0x3):
pass
ax = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET))
ay = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+4))
az = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+8))
return [float("{0:.2f}".format(ax/16384)),
float("{0:.2f}".format(ay/16384)),
float("{0:.2f}".format(az/16384))]
def get_gyro(self):
"""Get the data from the gyroscope.
Returns
-------
list
A list of the gyro data along X-axis, Y-axis, and Z-axis.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0x5)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0x5):
pass
gx = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET))
gy = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+4))
gz = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+8))
return [float("{0:.2f}".format(gx*250/32768)),
float("{0:.2f}".format(gy*250/32768)),
float("{0:.2f}".format(gz*250/32768))]
def get_compass(self):
"""Get the data from the magnetometer.
Returns
-------
list
A list of the compass data along X-axis, Y-axis, and Z-axis.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0x7)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0x7):
pass
mx = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET))
my = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+4))
mz = self._reg2int(self.mmio.read(iop_const.MAILBOX_OFFSET+8))
return [float("{0:.2f}".format(mx*1200/4096)),
float("{0:.2f}".format(my*1200/4096)),
float("{0:.2f}".format(mz*1200/4096))]
def get_heading(self):
"""Get the value of the heading.
Returns
-------
float
The angle deviated from the X-axis, toward the positive Y-axis.
"""
[mx, my, _] = self.get_compass()
heading = 180 * math.atan2(my, mx) / math.pi
if heading < 0:
heading += 360
return float("{0:.2f}".format(heading))
def get_tilt_heading(self):
"""Get the value of the tilt heading.
Returns
-------
float
The tilt heading value.
"""
[ax, ay, _] = self.get_accl()
[mx, my, mz] = self.get_compass()
try:
pitch = math.asin(-ax)
roll = math.asin(ay / math.cos(pitch))
except ZeroDivisionError:
raise RuntimeError("Value out of range or device not connected.")
xh = mx * math.cos(pitch) + mz * math.sin(pitch)
yh = mx * math.sin(roll) * math.sin(pitch) + \
my * math.cos(roll) - mz * math.sin(roll) * math.cos(pitch)
_ = -mx * math.cos(roll) * math.sin(pitch) + \
my * math.sin(roll) + mz * math.cos(roll) * math.cos(pitch)
tilt_heading = 180 * math.atan2(yh, xh) / math.pi
if yh < 0:
tilt_heading += 360
return float("{0:.2f}".format(tilt_heading))
def get_temperature(self):
"""Get the current temperature in degree C.
Returns
-------
float
The temperature value.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0xB)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0xB):
pass
value = self.mmio.read(iop_const.MAILBOX_OFFSET)
return self._reg2float(value)
def get_pressure(self):
"""Get the current pressure in Pa.
Returns
-------
float
The pressure value.
"""
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET, 0xD)
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET) == 0xD):
pass
value = self.mmio.read(iop_const.MAILBOX_OFFSET)
return self._reg2float(value)
def get_atm(self):
"""Get the current pressure in relative atmosphere.
Returns
-------
float
The related atmosphere.
"""
return float("{0:.2f}".format(self.get_pressure()/101325))
def get_altitude(self):
"""Get the current altitude.
Returns
-------
float
The altitude value.
"""
pressure = self.get_pressure()
a = pressure/101325
b = 1/5.255
c = 1-pow(a,b)
altitude = 44300 * c
return float("{0:.2f}".format(altitude))
def _reg2float(self, reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
if reg == 0:
return 0.0
sign = (reg & 0x80000000) >> 31 & 0x01
exp = ((reg & 0x7f800000) >> 23)-127
if exp == 0:
man = (reg & 0x007fffff)/pow(2,23)
else:
man = 1+(reg & 0x007fffff)/pow(2,23)
result = pow(2,exp)*man*((sign*-2) +1)
return float("{0:.2f}".format(result))
def _reg2int(self, reg):
"""Converts 32-bit register value to signed integer in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
int
A signed integer translated from the register value.
"""
result = -(reg>>31 & 0x1)*(1<<31)
for i in range(31):
result += (reg>>i & 0x1)*(1<<i)
return result
| |
from collections import Counter
from hadafuna.core.cardset import MountainCardSet, PileCardSet
from hadafuna.core.log import logger
class Round(object):
def __init__(self, cards, players):
self.pile_cards = PileCardSet(cards)
self.mountain_cards = MountainCardSet([])
self.players = players
def start(self):
raise NotImplementedError
class UnknownOyaError(Exception):
pass
class KoikoiRound(Round):
def __init__(self, cards, players, month, oya):
super(KoikoiRound, self).__init__(cards, players)
logger.info('Cards: %s, Players: %s', cards, players)
for player in self.players:
player.taken_cards.set_current_month(month)
self.current_player = None
self.end_player = None
self.oya = oya
if oya is None:
self.ko = None
elif oya == players[0]:
self.ko = players[1]
elif oya == players[1]:
self.ko = players[0]
else:
raise UnknownOyaError
def start(self):
if self.oya is None:
self.decide_oya()
self.distribute_cards()
if self.has_one_month_in_mountain():
self.debug()
self.end(has_one_month_in_mountain=True)
return
if self.has_yaku_in_hand():
self.debug()
self.end(has_yaku_in_hand=True)
return
while True:
self.current_player = self.oya
self.process_player_action()
self.debug()
if self.should_end():
break
self.current_player = self.ko
self.process_player_action()
self.debug()
if self.should_end():
break
self.end()
def decide_oya(self):
card_one = self.pile_cards.choice()
card_two = self.pile_cards.choice()
logger.info('First Drawing for Player %s: %s', self.players[0], card_one)
logger.info('First Drawing for Player %s: %s', self.players[1], card_two)
if card_one.month < card_two.month:
self.set_player_one_oya()
elif card_one.month == card_two.month:
if card_one.type_ > card_two.type_:
self.set_player_one_oya()
elif card_one.type_ == card_two.type_:
logger.info('Two Identical Cards, Will Redraw')
self.decide_oya()
else:
self.set_player_two_oya()
else:
self.set_player_two_oya()
def set_player_one_oya(self):
logger.info('Player: %s is the Oya', self.players[0])
self.oya, self.ko = self.players[0], self.players[1]
def set_player_two_oya(self):
logger.info('Player: %s is the Oya', self.players[1])
self.oya, self.ko = self.players[0], self.players[1]
def distribute_cards(self):
self.pile_cards.shuffle()
for i in xrange(8):
self.oya.hand_cards.add(self.pile_cards.pop())
for i in xrange(8):
self.ko.hand_cards.add(self.pile_cards.pop())
for i in xrange(8):
self.mountain_cards.add(self.pile_cards.pop())
self.debug()
def has_one_month_in_mountain(self):
month_cards = self.mountain_cards.split_by_month()
return Counter(map(len, month_cards.itervalues()))[4] != 0
def debug(self):
logger.debug('[DEBUG] Mountain Cards: %s', self.mountain_cards)
logger.debug('[DEBUG] Oya: %s, in Hand: %s, Taken: %s', self.oya, self.oya.hand_cards, self.oya.taken_cards)
logger.debug('[DEBUG] Ko: %s, in Hand: %s, Taken: %s', self.ko, self.ko.hand_cards, self.ko.taken_cards)
def end(self, has_yaku_in_hand=False, has_one_month_in_mountain=False):
while True:
if has_one_month_in_mountain:
logger.info('This round will be discarded because all four cards of one month in the mountain cards')
break
if has_yaku_in_hand:
logger.info('This round will be discarded because all four cards of one month in the player hand cards')
break
if self.end_player is None:
# No one has enough yaku, oya get 6 points
logger.info('No one gets yaku')
self.oya.score += 6
break
if self.end_player == self.oya:
logger.info('Oya %s wins with %s', self.oya, self.oya.taken_cards.score)
self.oya.score += self.get_points(self.oya)
else:
logger.info('Ko %s wins with %s', self.ko, self.ko.taken_cards.score)
self.ko.score += self.get_points(self.ko)
self.oya, self.ko = self.ko, self.oya
break
self.clear()
def get_points(self, player):
points = 0
for yaku in player.taken_cards.score:
points += yaku.point
return points
def clear(self):
self.oya.hand_cards.clear()
self.oya.taken_cards.clear()
self.ko.hand_cards.clear()
self.ko.taken_cards.clear()
self.debug()
def has_yaku_in_hand(self):
return (
self.oya.hand_cards.score != [] or
self.ko.hand_cards.score != []
)
def process_player_action(self):
played_card = self.current_player.play()
logger.info('Player %s plays card: %s', self.current_player, played_card)
if played_card is not None:
self.pair_card(played_card)
next_card = self.pile_cards.pop()
logger.info('Next card from pile: %s', next_card)
if next_card is not None:
self.pair_card(next_card)
if self.current_player.taken_cards.score != []:
is_koikoi = self.current_player.get_koikoi()
if not is_koikoi:
self.end_player = self.current_player
def pair_card(self, card):
paired_cards = self.mountain_cards.get_paired_cards(card)
if len(paired_cards) == 3 or len(paired_cards) == 1:
self.current_player.taken_cards.add(card)
self.current_player.taken_cards.add_multi(paired_cards)
elif len(paired_cards) == 2:
selected_card, unselected_card = self.current_player.get_selected_card(paired_cards)
self.current_player.taken_cards.add(card)
self.current_player.taken_cards.add(selected_card)
self.mountain_cards.add(unselected_card)
else:
self.mountain_cards.add(card)
def should_end(self):
return (
self.end_player != None or
self.is_hand_card_empty() or
len(self.pile_cards) == 0
)
def is_hand_card_empty(self):
return (
len(self.oya.hand_cards) == 0 or
len(self.ko.hand_cards) == 0
)
if __name__ == '__main__':
from hadafuna.core.card import ALL_CARDS
from hadafuna.core.player import ComputerPlayer
player_one = ComputerPlayer('Deep Blue')
player_two = ComputerPlayer('Deep Red')
test_round = KoikoiRound(ALL_CARDS, [player_one, player_two], None)
test_round.start()
| |
"""The tests for the Google Wifi platform."""
import unittest
from unittest.mock import patch, Mock
from datetime import datetime, timedelta
import requests_mock
from homeassistant import core as ha
from homeassistant.setup import setup_component
import homeassistant.components.sensor.google_wifi as google_wifi
from homeassistant.const import STATE_UNKNOWN
from homeassistant.util import dt as dt_util
from tests.common import get_test_home_assistant, assert_setup_component
NAME = 'foo'
MOCK_DATA = ('{"software": {"softwareVersion":"initial",'
'"updateNewVersion":"initial"},'
'"system": {"uptime":86400},'
'"wan": {"localIpAddress":"initial", "online":true,'
'"ipAddress":true}}')
MOCK_DATA_NEXT = ('{"software": {"softwareVersion":"next",'
'"updateNewVersion":"0.0.0.0"},'
'"system": {"uptime":172800},'
'"wan": {"localIpAddress":"next", "online":false,'
'"ipAddress":false}}')
MOCK_DATA_MISSING = ('{"software": {},'
'"system": {},'
'"wan": {}}')
class TestGoogleWifiSetup(unittest.TestCase):
"""Tests for setting up the Google Wifi sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
@requests_mock.Mocker()
def test_setup_minimum(self, mock_req):
"""Test setup with minimum configuration."""
resource = '{}{}{}'.format(
'http://', google_wifi.DEFAULT_HOST, google_wifi.ENDPOINT)
mock_req.get(resource, status_code=200)
assert setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'google_wifi',
'monitored_conditions': ['uptime']
}
})
assert_setup_component(1, 'sensor')
@requests_mock.Mocker()
def test_setup_get(self, mock_req):
"""Test setup with full configuration."""
resource = '{}{}{}'.format(
'http://', 'localhost', google_wifi.ENDPOINT)
mock_req.get(resource, status_code=200)
assert setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'google_wifi',
'host': 'localhost',
'name': 'Test Wifi',
'monitored_conditions': ['current_version',
'new_version',
'uptime',
'last_restart',
'local_ip',
'status']
}
})
assert_setup_component(6, 'sensor')
class TestGoogleWifiSensor(unittest.TestCase):
"""Tests for Google Wifi sensor platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
with requests_mock.Mocker() as mock_req:
self.setup_api(MOCK_DATA, mock_req)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def setup_api(self, data, mock_req):
"""Set up API with fake data."""
resource = '{}{}{}'.format(
'http://', 'localhost', google_wifi.ENDPOINT)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
mock_req.get(resource, text=data, status_code=200)
conditions = google_wifi.MONITORED_CONDITIONS.keys()
self.api = google_wifi.GoogleWifiAPI("localhost", conditions)
self.name = NAME
self.sensor_dict = dict()
for condition, cond_list in google_wifi.MONITORED_CONDITIONS.items():
sensor = google_wifi.GoogleWifiSensor(
self.api, self.name, condition)
name = '{}_{}'.format(self.name, condition)
units = cond_list[1]
icon = cond_list[2]
self.sensor_dict[condition] = {
'sensor': sensor,
'name': name,
'units': units,
'icon': icon
}
def fake_delay(self, ha_delay):
"""Fake delay to prevent update throttle."""
hass_now = dt_util.utcnow()
shifted_time = hass_now + timedelta(seconds=ha_delay)
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})
def test_name(self):
"""Test the name."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
test_name = self.sensor_dict[name]['name']
assert test_name == sensor.name
def test_unit_of_measurement(self):
"""Test the unit of measurement."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
assert \
self.sensor_dict[name]['units'] == sensor.unit_of_measurement
def test_icon(self):
"""Test the icon."""
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
assert self.sensor_dict[name]['icon'] == sensor.icon
@requests_mock.Mocker()
def test_state(self, mock_req):
"""Test the initial state."""
self.setup_api(MOCK_DATA, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert '1969-12-31 00:00:00' == sensor.state
elif name == google_wifi.ATTR_UPTIME:
assert 1 == sensor.state
elif name == google_wifi.ATTR_STATUS:
assert 'Online' == sensor.state
else:
assert 'initial' == sensor.state
@requests_mock.Mocker()
def test_update_when_value_is_none(self, mock_req):
"""Test state gets updated to unknown when sensor returns no data."""
self.setup_api(None, mock_req)
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
assert STATE_UNKNOWN == sensor.state
@requests_mock.Mocker()
def test_update_when_value_changed(self, mock_req):
"""Test state gets updated when sensor returns a new status."""
self.setup_api(MOCK_DATA_NEXT, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
if name == google_wifi.ATTR_LAST_RESTART:
assert '1969-12-30 00:00:00' == sensor.state
elif name == google_wifi.ATTR_UPTIME:
assert 2 == sensor.state
elif name == google_wifi.ATTR_STATUS:
assert 'Offline' == sensor.state
elif name == google_wifi.ATTR_NEW_VERSION:
assert 'Latest' == sensor.state
elif name == google_wifi.ATTR_LOCAL_IP:
assert STATE_UNKNOWN == sensor.state
else:
assert 'next' == sensor.state
@requests_mock.Mocker()
def test_when_api_data_missing(self, mock_req):
"""Test state logs an error when data is missing."""
self.setup_api(MOCK_DATA_MISSING, mock_req)
now = datetime(1970, month=1, day=1)
with patch('homeassistant.util.dt.now', return_value=now):
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
self.fake_delay(2)
sensor.update()
assert STATE_UNKNOWN == sensor.state
def test_update_when_unavailable(self):
"""Test state updates when Google Wifi unavailable."""
self.api.update = Mock('google_wifi.GoogleWifiAPI.update',
side_effect=self.update_side_effect())
for name in self.sensor_dict:
sensor = self.sensor_dict[name]['sensor']
sensor.update()
assert STATE_UNKNOWN == sensor.state
def update_side_effect(self):
"""Mock representation of update function."""
self.api.data = None
self.api.available = False
| |
"""Tests for the WiLight integration."""
import pytest
import pywilight
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.components.wilight import (
HOST,
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_COLOR,
UPNP_MODEL_NAME_DIMMER,
UPNP_MODEL_NAME_LIGHT_FAN,
UPNP_MODEL_NAME_P_B,
UPNP_MODEL_NUMBER,
UPNP_SERIAL,
WILIGHT_ID,
setup_integration,
)
@pytest.fixture(name="dummy_get_components_from_model_light")
def mock_dummy_get_components_from_model_light():
"""Mock a components list with light."""
components = ["light"]
with patch(
"pywilight.get_components_from_model",
return_value=components,
):
yield components
@pytest.fixture(name="dummy_device_from_host_light_fan")
def mock_dummy_device_from_host_light_fan():
"""Mock a valid api_devce."""
device = pywilight.wilight_from_discovery(
f"http://{HOST}:45995/wilight.xml",
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_LIGHT_FAN,
UPNP_SERIAL,
UPNP_MODEL_NUMBER,
)
device.set_dummy(True)
with patch(
"pywilight.device_from_host",
return_value=device,
):
yield device
@pytest.fixture(name="dummy_device_from_host_pb")
def mock_dummy_device_from_host_pb():
"""Mock a valid api_devce."""
device = pywilight.wilight_from_discovery(
f"http://{HOST}:45995/wilight.xml",
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_P_B,
UPNP_SERIAL,
UPNP_MODEL_NUMBER,
)
device.set_dummy(True)
with patch(
"pywilight.device_from_host",
return_value=device,
):
yield device
@pytest.fixture(name="dummy_device_from_host_dimmer")
def mock_dummy_device_from_host_dimmer():
"""Mock a valid api_devce."""
device = pywilight.wilight_from_discovery(
f"http://{HOST}:45995/wilight.xml",
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_DIMMER,
UPNP_SERIAL,
UPNP_MODEL_NUMBER,
)
device.set_dummy(True)
with patch(
"pywilight.device_from_host",
return_value=device,
):
yield device
@pytest.fixture(name="dummy_device_from_host_color")
def mock_dummy_device_from_host_color():
"""Mock a valid api_devce."""
device = pywilight.wilight_from_discovery(
f"http://{HOST}:45995/wilight.xml",
UPNP_MAC_ADDRESS,
UPNP_MODEL_NAME_COLOR,
UPNP_SERIAL,
UPNP_MODEL_NUMBER,
)
device.set_dummy(True)
with patch(
"pywilight.device_from_host",
return_value=device,
):
yield device
async def test_loading_light(
hass: HomeAssistantType,
dummy_device_from_host_light_fan,
dummy_get_components_from_model_light,
) -> None:
"""Test the WiLight configuration entry loading."""
# Using light_fan and removind fan from get_components_from_model
# to test light.py line 28
entry = await setup_integration(hass)
assert entry
assert entry.unique_id == WILIGHT_ID
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# First segment of the strip
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
entry = entity_registry.async_get("light.wl000000000099_1")
assert entry
assert entry.unique_id == "WL000000000099_0"
async def test_on_off_light_state(
hass: HomeAssistantType, dummy_device_from_host_pb
) -> None:
"""Test the change of state of the light switches."""
await setup_integration(hass)
# Turn on
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
# Turn off
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
async def test_dimmer_light_state(
hass: HomeAssistantType, dummy_device_from_host_dimmer
) -> None:
"""Test the change of state of the light switches."""
await setup_integration(hass)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_BRIGHTNESS: 42, ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 42
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_BRIGHTNESS: 0, ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_BRIGHTNESS: 100, ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 100
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
# Turn on
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
async def test_color_light_state(
hass: HomeAssistantType, dummy_device_from_host_color
) -> None:
"""Test the change of state of the light switches."""
await setup_integration(hass)
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 42,
ATTR_HS_COLOR: [0, 100],
ATTR_ENTITY_ID: "light.wl000000000099_1",
},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 42
state_color = [
round(state.attributes.get(ATTR_HS_COLOR)[0]),
round(state.attributes.get(ATTR_HS_COLOR)[1]),
]
assert state_color == [0, 100]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_BRIGHTNESS: 0, ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_BRIGHTNESS: 100,
ATTR_HS_COLOR: [270, 50],
ATTR_ENTITY_ID: "light.wl000000000099_1",
},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 100
state_color = [
round(state.attributes.get(ATTR_HS_COLOR)[0]),
round(state.attributes.get(ATTR_HS_COLOR)[1]),
]
assert state_color == [270, 50]
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_OFF
# Turn on
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
# Hue = 0, Saturation = 100
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_HS_COLOR: [0, 100], ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
state_color = [
round(state.attributes.get(ATTR_HS_COLOR)[0]),
round(state.attributes.get(ATTR_HS_COLOR)[1]),
]
assert state_color == [0, 100]
# Brightness = 60
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_BRIGHTNESS: 60, ATTR_ENTITY_ID: "light.wl000000000099_1"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("light.wl000000000099_1")
assert state
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 60
| |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import token
import tokenize
from io import BytesIO
from keyword import iskeyword
from pathlib import Path
from typing import (
Any, AnyStr, Callable, IO, Iterable, Optional, Sequence, Tuple, Union,
overload,
)
from .. import Language, SourceSummary
from ...lexical_analysis import Lexeme, Location, Position, Token
from ...vocabulary import Vocabulary
here = Path(__file__).parent
class Python(Language):
"""
Defines the Python 3.6 language.
"""
extensions = {'.py'}
vocabulary = Vocabulary.from_json_file(Path(__file__).parent /
'vocabulary.json')
def tokenize(self, source: Union[str, bytes, IO[bytes]]) -> Sequence[Token]:
"""
Tokenizes Python sources.
NOTE:
This may include extra, unwanted tokens, including
COMMENT, ENCODING, ENDMARKER, ERRORTOKEN
NOTE:
There are TWO newline tokens:
NEWLINE and NL
NEWLINES are actually used in the grammar;
whereas NL are "filler" newlines, for formatting.
BUGS: does not accept the same language as check_syntax()!
To use CPython's internal tokenizer, one must create a brand new
Python-accessible interface for it.
"""
def open_as_file() -> IO[bytes]:
if isinstance(source, str):
# TODO: technically incorrect -- have to check coding line,
# but I ain't doing that...
return BytesIO(source.encode('UTF-8'))
elif isinstance(source, bytes):
return BytesIO(source)
else:
return source
with open_as_file() as source_file:
token_stream = tokenize.tokenize(source_file.readline)
return [Token(name=token.tok_name[tok.type],
value=tok.string,
start=Position(line=tok.start[0], column=tok.start[1]),
end=Position(line=tok.end[0], column=tok.end[1]))
for tok in token_stream]
def check_syntax(self, source: Union[str, bytes]) -> bool:
r"""
Given a source file, returns True if the file compiles.
>>> python.check_syntax('print("Hello, World!")')
True
>>> python.check_syntax('import java.util.*;')
False
>>> python.check_syntax('\x89PNG\x0D\x0A\x1A\x0A\x00\x00\x00\x0D')
False
>>> python.check_syntax(r"AWESOME_CHAR_ESCAPE = '\x0G'")
False
"""
# I have the sneaking suspicion that compile() puts stuff in a cache that
# is NOT garbage collected! Since I consider this a pretty serious memory
# leak, I implemented this batshit crazy technique. Basically, let the
# operating system be our garbage collector.
pid = os.fork()
if pid == 0:
# Child process. Let it crash!!!
try:
compile(source, '<unknown>', 'exec')
except Exception:
# Use _exit so it doesn't raise a SystemExit exception.
os._exit(-1)
else:
os._exit(0)
else:
# Parent process.
child_pid, status = os.waitpid(pid, 0)
return status == 0
def summarize_tokens(self, source: Iterable[Token]) -> SourceSummary:
r"""
Calculates the word count of a Python source.
>>> python.summarize('import sys\n\nsys.stdout.write("hello")\n')
SourceSummary(sloc=2, n_tokens=12)
"""
tokens = list(source)
if any(tok.name == 'ERRORTOKEN' for tok in tokens):
raise SyntaxError('ERRORTOKEN')
tokens = [token for token in tokens if is_physical_token(token)]
INTANGIBLE_TOKENS = {'DEDENT', 'NEWLINE'}
# Special case DEDENT and NEWLINE tokens:
# They're do not count towards the line count (they are often on empty
# lines).
unique_lines = set(lineno for token in tokens
for lineno in token.lines
if token.name not in INTANGIBLE_TOKENS)
return SourceSummary(sloc=len(unique_lines), n_tokens=len(tokens))
def vocabularize_tokens(self, source: Iterable[Token]) -> Iterable[Tuple[Location, str]]:
EXTRANEOUS_TOKENS = {
# Always occurs as the first token: internally indicates the file
# ecoding, but is irrelelvant once the stream is already tokenized
'ENCODING',
# Always occurs as the last token.
'ENDMARKER',
# Insignificant newline; not to be confused with NEWLINE
'NL',
# Discard comments
'COMMENT',
# Represents a tokenization error. This should never appear for
# syntatically correct files.
'ERRORTOKEN',
}
for token in source:
vocab_entry = open_closed_tokens(token)
# Skip the extraneous tokens
if vocab_entry in EXTRANEOUS_TOKENS:
continue
yield token.location, vocab_entry
def is_physical_token(token: Lexeme) -> bool:
"""
Return True when the token is something that should be counted towards
sloc (source lines of code).
"""
FAKE_TOKENS = {
'ENDMARKER', 'ENCODING', 'COMMENT', 'NL', 'ERRORTOKEN'
}
return token.name not in FAKE_TOKENS
def open_closed_tokens(token: Lexeme) -> str:
"""
'Flattens' Python into tokens based on whether the token is open or
closed.
"""
# List of token names that whose text should be used verbatim as the type.
VERBATIM_CLASSES = {
"AMPER", "AMPEREQUAL", "ASYNC", "AT", "ATEQUAL", "AWAIT", "CIRCUMFLEX",
"CIRCUMFLEXEQUAL", "COLON", "COMMA", "DOT", "DOUBLESLASH",
"DOUBLESLASHEQUAL", "DOUBLESTAR", "DOUBLESTAREQUAL", "ELLIPSIS",
"EQEQUAL", "EQUAL", "GREATER", "GREATEREQUAL", "LBRACE", "LEFTSHIFT",
"LEFTSHIFTEQUAL", "LESS", "LESSEQUAL", "LPAR", "LSQB", "MINEQUAL",
"MINUS", "NOTEQUAL", "OP", "PERCENT", "PERCENTEQUAL", "PLUS", "PLUSEQUAL",
"RARROW", "RBRACE", "RIGHTSHIFT", "RIGHTSHIFTEQUAL", "RPAR", "RSQB",
"SEMI", "SLASH", "SLASHEQUAL", "STAR", "STAREQUAL", "TILDE", "VBAR",
"VBAREQUAL"
}
if token.name == 'NAME':
# Special case for NAMES, because they can also be keywords.
if iskeyword(token.value):
return token.value
else:
return '<IDENTIFIER>'
elif token.name in VERBATIM_CLASSES:
# These tokens should be mapped verbatim to their names.
assert ' ' not in token.value
return token.value
elif token.name in {'NUMBER', 'STRING'}:
# These tokens should be abstracted.
# Use the <ANGLE-BRACKET> notation to signify these classes.
return f'<{token.name.upper()}>'
else:
# Use these token's name verbatim.
assert token.name in {
'NEWLINE', 'INDENT', 'DEDENT',
'ENDMARKER', 'ENCODING', 'COMMENT', 'NL', 'ERRORTOKEN'
}
return token.name
python: Language = Python()
| |
from datetime import datetime, timedelta
import calendar
from dateutil.relativedelta import relativedelta
from django.db import models
from timezone_field import TimeZoneField
import fleming
import pytz
INTERVAL_CHOICES = (
('DAY', 'Day'),
('WEEK', 'Week'),
('MONTH', 'Month'),
('QUARTER', 'Quarter'),
('YEAR', 'Year'),
)
class LocalizedRecurrenceQuerySet(models.query.QuerySet):
def update_schedule(self, time=None):
"""
Update the schedule times for all the provided recurrences.
:type time: :py:class:`datetime.datetime`
:param time: The time the schedule was checked. If ``None``,
defaults to ``datetime.utcnow()``.
In the common case, this can be called without any arguments.
.. code-block:: python
>>> past_due = LocalizedRecurrence.objects.filter(
... next_scheduled__lte=datetime.utcnow()
... )
>>> # Do something with past_due recurrences
>>> past_due.update_schedule()
The code above will ensure that all the processed recurrences
are re-scheduled for their next recurrence.
Calling this function has the side effect that the
``next_scheduled`` attribute of every recurrence in the
queryset will be updated to the new time in utc.
"""
_update_schedule(self, time=time)
class LocalizedRecurrenceManager(models.Manager):
def get_queryset(self):
return LocalizedRecurrenceQuerySet(self.model)
def update_schedule(self, time=None):
"""
Update the schedule times for all recurrences.
Functions exactly the same as the method on the querysets. The
following to calls are equivalent:
.. code-block:: python
>>> LocalizedRecurrence.objects.all().update_schedule()
>>> LocalizedRecurrence.objects.update_schedule()
Calling this function has the side effect that the
``next_scheduled`` attribute of every recurrence will be
updated to the new time in utc.
"""
self.get_queryset().update_schedule(time=time)
class LocalizedRecurrence(models.Model):
"""The information necessary to act on events in users local
times. Can be instantiated with ``LocalizedRecurrence.objects.create``
:type interval: str
:param interval: The interval at which the event recurs.
One of ``'DAY'``, ``'WEEK'``, ``'MONTH'``, ``'QUARTER'``, ``'YEAR'``.
:type offset: :py:class:`datetime.timedelta`
:param offset: The amount of time into the interval that the event
occurs at.
If the interval is monthly, quarterly, or yearly, the number
of days in the interval are variable. In the case of offsets
with more days than the number of days in the interval,
updating the schedule will not raise an error, but will update
to the last day in the interval if necessary.
:type timezone: pytz.timezone
:param timezone: The local timezone for the user.
Localized recurrences are simply objects in the database. They can
be created with standard django ORM tools:
.. code-block:: python
>>> from datetime import datetime, timedelta
>>> my_lr = LocalizedRecurrence.objects.create(
... interval='DAY',
... offset=timedela(hours=15),
... timezone=pytz.timedelta('US/Eastern'),
... )
Once instantiated it is simple to check if a localized recurrence
is due to be acted upon.
.. code-block:: python
>>> my_lr.next_scheduled < datetime.utcnow()
True
After a recurrence has been acted upon, it's schedule can be
simply reset to occur at the prescribed time in the next interval.
.. code-block:: python
>>> my_lr.update_schedule()
>>> my_lr.next_scheduled < datetime.utcnow()
False
"""
interval = models.CharField(max_length=18, default='DAY', choices=INTERVAL_CHOICES)
offset = models.DurationField(default=timedelta(0))
timezone = TimeZoneField(default='UTC')
previous_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
next_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
objects = LocalizedRecurrenceManager()
def __str__(self):
return 'ID: {0}, Interval: {1}, Next Scheduled: {2}'.format(self.id, self.interval, self.next_scheduled)
def update(self, **updates):
"""Updates fields in the localized recurrence."""
for update in updates:
setattr(self, update, updates[update])
return self.save()
def update_schedule(self, time=None):
"""
Update the schedule for this recurrence or an object it tracks.
:type time: :py:class:`datetime.datetime`
:param time: The time the schedule was checked. If ``None``,
defaults to ``datetime.utcnow()``.
Calling this function has the side effect that the
``next_scheduled`` attribute will be updated to the new time
in utc.
"""
_update_schedule([self], time)
def utc_of_next_schedule(self, current_time):
"""
Generates the next recurrence time in utc after the current time
:type current_time: :py:class:`datetime.datetime`
:param current_time: The current time in utc.
Usually this function does not need to be called directly, but
will be used by ``update_schedule``. If however, you need to
check when the next recurrence of a instance would happen,
without persisting an update to the schedule, this function
can be called without side-effect.
"""
# Make a copy of the next scheduled datetime
next_scheduled_utc = datetime(
self.next_scheduled.year, self.next_scheduled.month, self.next_scheduled.day,
self.next_scheduled.hour, self.next_scheduled.minute, self.next_scheduled.second
)
additional_time = {
'DAY': timedelta(days=1),
'WEEK': timedelta(weeks=1),
'MONTH': relativedelta(months=1),
'QUARTER': relativedelta(months=3),
'YEAR': relativedelta(years=1),
}
# Keep updating next scheduled to the next recurrence until it is greater than current time
while next_scheduled_utc <= current_time:
# Convert to local time
next_scheduled_local = fleming.convert_to_tz(next_scheduled_utc, self.timezone)
# Replace with the offset data
replaced_with_offset = _replace_with_offset(next_scheduled_local, self.offset, self.interval)
# Normalize to handle dst
local_scheduled_time = fleming.fleming.dst_normalize(replaced_with_offset)
# Add the time delta
next_local_scheduled_time = fleming.add_timedelta(
local_scheduled_time,
additional_time[self.interval],
within_tz=self.timezone
)
# Check if last day of month
is_last_day = self.interval == 'MONTH' and self.offset.days >= 28
# Check if we need to manually set the day to the next month's last day rather than apply the offset info
if self.interval == 'MONTH' and is_last_day:
_, last_day_of_next_month = calendar.monthrange(
next_local_scheduled_time.year,
next_local_scheduled_time.month
)
# Replace day with last day of month
next_local_scheduled_time = next_local_scheduled_time.replace(day=last_day_of_next_month)
else:
# Apply the offset info for all cases that are not end of month
next_local_scheduled_time = _replace_with_offset(next_local_scheduled_time, self.offset, self.interval)
# Convert back to utc
next_scheduled_utc = fleming.convert_to_tz(next_local_scheduled_time, pytz.utc, return_naive=True)
return next_scheduled_utc
def _update_schedule(recurrences, time=None):
"""
Update the schedule times for all the provided recurrences.
"""
time = time or datetime.utcnow()
for recurrence in recurrences:
recurrence.next_scheduled = recurrence.utc_of_next_schedule(time)
recurrence.previous_scheduled = time
recurrence.save()
def _replace_with_offset(dt, offset, interval):
"""
Replace components of a datetime with those of a timedelta.
This replacement is done within the given interval. This means the
the final result, will the be a datetime, at the desired offset
given the interval.
"""
hours, minutes, seconds = offset.seconds // 3600, (offset.seconds // 60) % 60, offset.seconds % 60
interval = interval.lower()
if interval == 'day':
dt_out = dt.replace(hour=hours, minute=minutes, second=seconds)
elif interval == 'week':
dt_out = dt + timedelta(days=offset.days - dt.weekday())
dt_out = dt_out.replace(hour=hours, minute=minutes, second=seconds)
elif interval == 'month':
_, last_day = calendar.monthrange(dt.year, dt.month)
day = (offset.days + 1) if (offset.days + 1) <= last_day else last_day
dt_out = dt.replace(day=day, hour=hours, minute=minutes, second=seconds)
elif interval == 'quarter':
month_range = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]][int((dt.month - 1) / 3)]
quarter_days = sum(calendar.monthrange(dt.year, month)[1] for month in month_range)
days = offset.days if offset.days <= (quarter_days - 1) else (quarter_days - 1)
dt_out = fleming.floor(dt, month=3).replace(hour=hours, minute=minutes, second=seconds)
dt_out += timedelta(days)
elif interval == 'year':
leap_year_extra_days = 1 if calendar.isleap(dt.year) else 0
days = offset.days if offset.days <= 364 + leap_year_extra_days else 364 + leap_year_extra_days
dt_out = fleming.floor(dt, year=1).replace(hour=hours, minute=minutes, second=seconds)
dt_out += timedelta(days)
else:
raise ValueError('{i} is not a proper interval value'.format(i=interval))
return dt_out
| |
import json
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from wagtail.core.models import Page
from molo.core.models import (
Main, SectionPage, ArticlePage, PageTranslation, Tag,
BannerPage, Languages, SiteLanguageRelation)
from molo.core.utils import generate_slug
from molo.forms.models import (
MoloFormPage, MoloFormField, ArticlePageForms)
class GemTestCaseMixin(object):
def login(self):
# Create a user
user = get_user_model().objects.create_superuser(
username='superuser', email='superuser@email.com', password='pass')
# Login
self.client.login(username='superuser', password='pass')
return user
def mk_root(self):
page_content_type, created = ContentType.objects.get_or_create(
model='page',
app_label='wagtailcore'
)
self.root, _ = Page.objects.get_or_create(
title="Root",
slug='root',
content_type=page_content_type,
path='0001',
depth=1,
numchild=1,
url_path='/',
)
def mk_main(self, title, slug, path, url_path):
self.mk_root()
main_content_type, created = ContentType.objects.get_or_create(
model='main', app_label='core')
# Create a new homepage
main = Main.objects.create(
title=title,
slug=slug,
content_type=main_content_type,
path=path,
depth=2,
numchild=0,
url_path=url_path,
)
main.save_revision().publish()
main.save()
language_setting = Languages.objects.create(
site_id=main.get_site().pk)
SiteLanguageRelation.objects.create(
language_setting=language_setting,
locale='en',
is_active=True)
return main
def mk_tag(self, parent, slug=None, **kwargs):
data = {}
data.update({
'title': 'Test Tag',
})
data.update(kwargs)
if slug:
data.update({'slug': slug})
else:
data.update({'slug': generate_slug(data['title'])})
tag = Tag(**data)
parent.add_child(instance=tag)
tag.save_revision().publish()
return tag
def mk_tags(self, parent, count=2, **kwargs):
tags = []
for i in range(count):
data = {}
data.update({
'title': 'Test Tag {}'.format(i),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
tag = Tag(**data)
parent.add_child(instance=tag)
tag.save_revision().publish()
tags.append(tag)
return tags
def mk_reaction_question(self, parent, article, **kwargs):
data = {}
data.update({
'introduction': 'Test Question',
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
form = MoloFormPage(**data)
parent.add_child(instance=form)
form.save_revision().publish()
field = MoloFormField(
choices='yes,maybe,no', success_message='well done')
form.add_child(instance=field)
field.save_revision().publish()
ArticlePageForms.objects.create(
reaction_question=form, page=article)
return form
def mk_sections(self, parent, count=2, **kwargs):
sections = []
for i in range(count):
data = {}
data.update({
'title': 'Test Section %s' % (i, ),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title']),
})
section = SectionPage(**data)
parent.add_child(instance=section)
section.save_revision().publish()
sections.append(section)
return sections
def mk_articles(self, parent, count=2, **kwargs):
articles = []
for i in range(count):
data = {}
data.update({
'title': 'Test page %s' % (i, ),
'subtitle': 'Sample page description for %s' % (i, ),
'body': json.dumps([{
'type': 'paragraph',
'value': 'Sample page content for %s' % (i, )}]),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
article = ArticlePage(**data)
parent.add_child(instance=article)
article.save_revision().publish()
articles.append(article)
return articles
def mk_banners(self, parent, count=2, **kwargs):
banners = []
for i in range(count):
data = {}
data.update({
'title': 'Test Banner {}'.format(i),
})
data.update(kwargs)
data.update({
'slug': generate_slug(data['title'])
})
banner = BannerPage(**data)
parent.add_child(instance=banner)
banner.save_revision().publish()
banners.append(banner)
return banners
def mk_section(self, parent, **kwargs):
return self.mk_sections(parent, count=1, **kwargs)[0]
def mk_article(self, parent, **kwargs):
return self.mk_articles(parent, count=1, **kwargs)[0]
def mk_banner(self, parent, **kwargs):
return self.mk_banners(parent, count=1, **kwargs)[0]
def mk_translation(self, source, language, translation):
language_relation = translation.languages.first()
language_relation.language = language
language_relation.save()
translation.language = language
translation.save_revision().publish()
source.specific.translated_pages.add(translation)
source.save()
PageTranslation.objects.get_or_create(
page=source, translated_page=translation)
for translated_page in \
source.specific.translated_pages.all():
translations = source.specific.translated_pages.all().\
exclude(language__pk=translated_page.language.pk)
for t in translations:
translated_page.translated_pages.add(t)
translated_page.save()
return translation
def mk_section_translation(self, source, language, **kwargs):
instance = self.mk_section(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_article_translation(self, source, language, **kwargs):
instance = self.mk_article(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_tag_translation(self, source, language, **kwargs):
instance = self.mk_tag(source.get_parent(), **kwargs)
return self.mk_translation(source, language, instance)
def mk_reaction_translation(self, source, article, language, **kwargs):
instance = self.mk_reaction_question(
source.get_parent(), article, **kwargs)
return self.mk_translation(source, language, instance)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE.TXT file)
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from django.contrib.auth.decorators import login_required
from apps.common.shortcuts import render_response
from apps.account.models import Account
from apps.team.models import Team
from apps.team.models import JoinRequest
from apps.team.models import RemoveRequest
from apps.team import forms
from apps.team.utils import render_team_response as rtr
from apps.team.utils import assert_member
from apps.team import control
from apps.link.models import Link
@login_required
@require_http_methods(["GET", "POST"])
def link_delete(request, team_link, link_id):
account = get_object_or_404(Account, user=request.user)
team = control.get_or_404(team_link)
link = get_object_or_404(Link, id=link_id)
if request.method == "POST":
form = forms.LinkDelete(request.POST, team=team, link=link, account=account)
if form.is_valid():
control.link_delete(account, team, link)
return HttpResponseRedirect("/%s" % team.link)
else:
form = forms.LinkDelete(team=team, link=link, account=account)
args = {
"form" : form, "form_title" : _("LINK_DELETE?"),
"form_subtitle" : link.get_label(),
"cancel_url" : "/%s" % team.link
}
return rtr(team, "", request, "site/form.html", args)
@login_required
@require_http_methods(["GET", "POST"])
def link_create(request, team_link):
account = get_object_or_404(Account, user=request.user)
team = control.get_or_404(team_link)
if request.method == "POST":
form = forms.LinkCreate(request.POST, team=team, account=account)
if form.is_valid():
control.link_create(
account, team,
form.cleaned_data["site"],
form.cleaned_data["profile"].strip(),
)
return HttpResponseRedirect("/%s" % team.link)
else:
form = forms.LinkCreate(team=team, account=account)
args = {
"form" : form, "cancel_url" : "/%s" % team.link,
"form_title" : account, "form_subtitle" : _("ADD_LINK_SUBTITLE")
}
return rtr(team, "", request, "site/form.html", args)
@login_required
@require_http_methods(["GET", "POST"])
def replace_logo(request, team_link):
account = get_object_or_404(Account, user=request.user)
team = control.get_or_404(team_link)
assert_member(account, team)
if request.method == "POST":
form = forms.ReplaceLogo(request.POST, request.FILES)
if form.is_valid():
logo = form.cleaned_data["logo"]
control.replace_logo(account, team, logo)
return HttpResponseRedirect("/%s" % team.link)
else:
form = forms.ReplaceLogo()
args = {
"form" : form, "form_title" : _("REPLACE_LOGO"),
"multipart_form" : True, "cancel_url" : "/%s" % team.link
}
return render_response(request, "site/form.html", args)
@login_required
@require_http_methods(["GET", "POST"])
def create(request):
account = get_object_or_404(Account, user=request.user)
if request.method == "POST":
form = forms.CreateTeam(request.POST, request.FILES)
if form.is_valid():
name = form.cleaned_data["name"].strip()
country = form.cleaned_data["country"]
logo = form.cleaned_data["logo"]
application = form.cleaned_data["application"]
team = control.create(account, name, country, logo, application)
return HttpResponseRedirect("/%s/created" % team.link)
else:
form = forms.CreateTeam()
args = {
"form" : form, "form_title" : _("CREATE_TEAM"),
"multipart_form" : True, "cancel_url" : "/"
}
return render_response(request, "site/form.html", args)
@login_required
@require_http_methods(["GET"])
def created(request, team_link):
account = get_object_or_404(Account, user=request.user)
team = get_object_or_404(Team, link=team_link)
return render_response(request, "team/created.html", { "team" : team })
@login_required
@require_http_methods(["GET"])
def members(request, team_link):
team = control.get_or_404(team_link)
args = {
"members" : team.members.all(),
"page_title" : _("MEMBERS")
}
return rtr(team, "members", request, "team/members.html", args)
#################
# JOIN REQUESTS #
#################
@login_required
@require_http_methods(["GET"])
def join_request_list(request, team_link):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
assert_member(account, team)
template = "team/join_request/list.html"
args = { "join_requests" : JoinRequest.objects.filter(team=team) }
return rtr(team, "join_request/list", request, template, args)
@login_required
@require_http_methods(["GET", "POST"])
def join_request_create(request, team_link):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
if request.method == "POST":
form = forms.CreateJoinRequest(request.POST)
if form.is_valid():
application = form.cleaned_data["application"]
jr = control.create_join_request(account, team, application)
return HttpResponseRedirect("/%s/join_request/created" % team_link)
else:
form = forms.CreateJoinRequest()
args = {
"form" : form, "form_title" : _("JOIN_REQUEST"),
"cancel_url" : "/%s" % team.link
}
return rtr(team, "join_request/list", request, "site/form.html", args)
@login_required
@require_http_methods(["GET", "POST"])
def join_request_process(request, team_link, join_request_id):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
jr = get_object_or_404(JoinRequest, id=join_request_id)
if request.method == "POST":
form = forms.ProcessJoinRequest(request.POST)
if form.is_valid():
response = form.cleaned_data["response"]
status = form.cleaned_data["status"]
control.process_join_request(account, jr, response, status)
return HttpResponseRedirect("/%s/join_request/list" % team_link)
else:
form = forms.ProcessJoinRequest()
args = {
"form" : form, "form_title" : "PROCESS_JOIN_REQUEST",
"cancel_url" : "/%s/join_request/list" % team.link
}
return rtr(team, "join_request/list", request, "site/form.html", args)
@login_required
@require_http_methods(["GET"])
def join_request_created(request, team_link):
team = control.get_or_404(team_link)
template = "team/join_request/created.html"
return rtr(team, "join_request/list", request, template, {})
###################
# REMOVE REQUESTS #
###################
@login_required
@require_http_methods(["GET", "POST"])
def remove_request_create(request, team_link, concerned_id):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
concerned = get_object_or_404(Account, id=concerned_id)
if request.method == "POST":
form = forms.CreateRemoveRequest(request.POST)
if form.is_valid():
reason = form.cleaned_data["reason"]
control.create_remove_request(account, concerned, team, reason)
return HttpResponseRedirect("/%s/remove_request/created" % team_link)
else:
form = forms.CreateRemoveRequest()
args = {
"form" : form, "form_title" : _("REMOVE_REQUEST_CREATE"),
"form_subtitle" : concerned,
"cancel_url" : "/%s/members" % team.link
}
return rtr(team, "remove_request/list", request, "site/form.html", args)
@login_required
@require_http_methods(["GET"])
def remove_request_created(request, team_link):
team = control.get_or_404(team_link)
template = "team/remove_request/created.html"
return rtr(team, "remove_request/list", request, template, {})
@login_required
@require_http_methods(["GET"])
def remove_request_list(request, team_link):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
assert_member(account, team)
template = "team/remove_request/list.html"
args = { "remove_requests" : RemoveRequest.objects.filter(team=team) }
return rtr(team, "remove_request/list", request, template, args)
@login_required
@require_http_methods(["GET", "POST"])
def remove_request_process(request, team_link, remove_request_id):
team = control.get_or_404(team_link)
account = get_object_or_404(Account, user=request.user)
remove_request = get_object_or_404(RemoveRequest, id=remove_request_id)
if request.method == "POST":
form = forms.ProcessRemoveRequest(request.POST)
if form.is_valid():
response = form.cleaned_data["response"]
status = form.cleaned_data["status"]
control.process_remove_request(account, remove_request,
response, status)
return HttpResponseRedirect("/%s/remove_request/list" % team_link)
else:
form = forms.ProcessRemoveRequest()
args = {
"form" : form, "form_title" : "PROCESS_REMOVE_REQUEST",
"cancel_url" : "/%s/remove_request/list" % team.link
}
return rtr(team, "remove_request/list", request, "site/form.html", args)
| |
import numpy as np
import statsmodels.api as sm
import pandas as pd
import datetime
# For Debugging
import os
import sys
import pickle
debug_logs_directory = r'C:\PROJECTS\gbrunner-raster-functions\pickles\daymet'
class SeasonalARIMA():
def __init__(self):
self.name = "Seasonal ARIMA"
self.description = "This function performs a Seasonal AutoRegressive Integrated Moving Average (ARIMA) on " \
"a time-series of rasters. The function takes in the rasters as a mosaic dataset, trains " \
"a seasonal ARIMA model on the input mosaic dataset, and predicts the change in the " \
"observed variable (pixel values). This currently only supports single band time-series " \
"rasters that generally contain scientific data."
self.times = []
self.data_start_year = None
self.predict_month = None
self.predict_year = None
self.train_start_year = None
self.train_end_year = None
self.p = None
self.d = None
self.q = None
self.s = None
def getParameterInfo(self):
return [
{
'name': 'rasters',
'dataType': 'rasters',
'value': None,
'required': True,
'displayName': "Rasters",
'description': "The collection of temporal reasters for which we want to predict seasonal change.",
},
{
'name': 'data_start_year',
'dataType': 'numeric',
'value': 1980,
'required': True,
'displayName': 'Data Start Year',
'description': 'The first year in the dataset'
},
{
'name': 'train_start_year',
'dataType': 'numeric',
'value': 1980,
'required': True,
'displayName': 'Training Start Year',
'description': 'The year on which to start training model'
},
{
'name': 'train_end_year',
'dataType': 'numeric',
'value': 2010,
'required': True,
'displayName': 'Training End Year',
'description': 'The year on which to end training model'
},
{
'name': 'predict_year',
'dataType': 'numeric',
'value': 2050,
'required': True,
'displayName': 'Prediction Year',
'description': 'The year for which we want to predict our seasonal variable'
},
{
'name': 'predict_month',
'dataType': 'string',
'value': 'Jun',
'required': True,
'domain': ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
'displayName': 'Month to Predict',
'description': 'The month for which we want to predict the change in the observed variable.'
},
{
'name': 'seasonal_order',
'dataType': 'string',
'value': '0,1,1,12',
'required': True,
'displayName': 'Seasonal Order (P, D, Q, s)',
'description': 'The (P,D,Q,s) order of the seasonal component of the model for the AR parameters, ' \
'differences, MA parameters, and periodicity. d must be an integer indicating the ' \
'integration order of the process, while p and q may either be an integers indicating ' \
'the AR and MA orders (so that all lags up to those orders are included) or else ' \
'iterables giving specific AR and / or MA lags to include. s is an integer giving ' \
'the periodicity (number of periods in season), often it is 4 for quarterly data ' \
'or 12 for monthly data. Default is no seasonal effect.'
}
]
def getConfiguration(self, **scalars):
return {
'compositeRasters': False,
'inheritProperties': 1 | 2 | 4 | 8, # inherit all from the raster
'invalidateProperties': 2 | 4 | 8, # reset stats, histogram, key properties
'inputMask': False,
'keyMetadata': ['time'] #['StdTime']#
}
def updateRasterInfo(self, **kwargs):
#outStats = {'minimum': 0,'maximum': 25}
outBandCount = 1
kwargs['output_info']['pixelType'] = 'f4' # output pixels are floating-point values
kwargs['output_info']['histogram'] = () # no statistics/histogram for output raster specified
kwargs['output_info']['statistics'] = ()
kwargs['output_info']['bandCount'] = outBandCount # number of output bands.
month_dict = {'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12}
self.data_start_year = int(kwargs['data_start_year'])
self.predict_month = int(month_dict[kwargs['predict_month']])
self.predict_year = int(kwargs['predict_year'])
self.train_start_year = int(kwargs['train_start_year'])
self.train_end_year = int(kwargs['train_end_year'])
seasonal_order = kwargs['seasonal_order'].split(',')
self.p = int(seasonal_order[0])
self.d = int(seasonal_order[1])
self.q = int(seasonal_order[2])
self.s = int(seasonal_order[3])
self.times = kwargs['rasters_keyMetadata']
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
# pixelBlocks['rasters_pixels']: tuple of 3-d array containing pixel blocks from each input raster
# apply the selected operator over each array in the tuple
#fname = '{:%Y_%b_%d_%H_%M_%S}_t.txt'.format(datetime.datetime.now())
#filename = os.path.join(debug_logs_directory, fname)
#file = open(filename,"w")
#file.write("File Open.\n")
pix_blocks = pixelBlocks['rasters_pixels']
pix_array = np.asarray(pix_blocks)
pix_time = [j['time'] for j in self.times]
sorted_t = np.sort(pix_time)
sorted_t_idx = np.argsort(pix_time)
#pickle_filename = os.path.join(debug_logs_directory, fname)
#pickle.dump(pix_blocks, open(pickle_filename[:-4]+'pix_blocks.p',"wb"))
#pickle_filename = os.path.join(debug_logs_directory, fname)
#pickle.dump(pix_time, open(pickle_filename[:-4]+'pix_time.p',"wb"))
pix_array_dim = pix_array.shape
num_squares_x = pix_array_dim[2]
num_squares_y = pix_array_dim[3]
new_stack = np.zeros((1, num_squares_x, num_squares_y))
my_order = (1,0,0)
my_seasonal_order = (self.p, self.d, self.q, self.s)
now = datetime.datetime.now()
current_year = int(now.year)
data_start_year = self.data_start_year
train_start_year = self.train_start_year
predict_year = self.predict_year
train_end_year = self.train_end_year
predict_month = self.predict_month
train_data_end_index = (train_end_year - data_start_year) * 12
train_data_start_index = (train_start_year - data_start_year) * 12
predict_data_end_index = (predict_year - train_end_year) * 12
current_year_index = (current_year - train_end_year) * 12
for num_x in range(0, int(num_squares_x)):
for num_y in range(0, int(num_squares_y)):
data = pix_array[:, 0, num_x, num_y]
sorted_data = data[sorted_t_idx]
try:
# define model
model = sm.tsa.statespace.SARIMAX(sorted_data[train_data_start_index:train_data_end_index],
order=my_order,
seasonal_order=my_seasonal_order, trend='c',
enforce_invertibility=False, enforce_stationarity=False)
model_fit = model.fit()
#index = (predict_year - train_end_year) * 12 + predict_month
yhat = model_fit.predict(start=train_data_end_index,
end=train_data_end_index + predict_data_end_index)
final_year_prediction = yhat[predict_data_end_index - (12 - predict_month)]
current_year_prediction = yhat[current_year_index - (12 - predict_month)]
delta = final_year_prediction - current_year_prediction
new_stack[0, num_x, num_y] = delta
#print(delta, num_x, num_y)
except:
delta = -999
new_stack[0, num_x, num_y] = delta
#print(delta, num_x, num_y)
pixelBlocks['output_pixels'] = new_stack.astype(props['pixelType'], copy=False)#new_stack.astype(props['pixelType'], copy=False)
#file.write("Done.")
#file.close()
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
return keyMetadata
| |
#!/usr/bin/env python
""" Test functions for the sparse.linalg.isolve module
"""
import numpy as np
from numpy.testing import TestCase, assert_equal, assert_array_equal, \
assert_, assert_allclose, assert_raises
from numpy import zeros, ones, arange, array, abs, max
from numpy.linalg import cond
from scipy.linalg import norm
from scipy.sparse import spdiags, csr_matrix
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres
#TODO check that method preserve shape and type
#TODO test both preconditioner methods
class Case(object):
def __init__(self, name, A, skip=None):
self.name = name
self.A = A
if skip is None:
self.skip = []
else:
self.skip = skip
def __repr__(self):
return "<%s>" % self.name
class IterativeParams(object):
def __init__(self):
# list of tuples (solver, symmetric, positive_definite )
solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres]
sym_solvers = [minres, cg]
posdef_solvers = [cg]
real_solvers = [minres]
self.solvers = solvers
# list of tuples (A, symmetric, positive_definite )
self.cases = []
# Symmetric and Positive Definite
N = 40
data = ones((3,N))
data[0,:] = 2
data[1,:] = -1
data[2,:] = -1
Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')
self.Poisson1D = Case("poisson1d", Poisson1D)
self.cases.append(self.Poisson1D)
# Symmetric and Negative Definite
self.cases.append(Case("neg-poisson1d", -Poisson1D,
skip=posdef_solvers))
# Symmetric and Indefinite
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')
RandDiag = spdiags( data, [0], 10, 10, format='csr' )
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
# Random real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
self.cases.append(Case("rand", data, skip=posdef_solvers+sym_solvers))
# Random symmetric real-valued
np.random.seed(1234)
data = np.random.rand(4, 4)
data = data + data.T
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
# Random pos-def symmetric real
np.random.seed(1234)
data = np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-sym-pd", data))
# Random complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
self.cases.append(Case("rand-cmplx", data,
skip=posdef_solvers+sym_solvers+real_solvers))
# Random hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)
data = data + data.T.conj()
self.cases.append(Case("rand-cmplx-herm", data,
skip=posdef_solvers+real_solvers))
# Random pos-def hermitian complex-valued
np.random.seed(1234)
data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)
data = np.dot(data.conj(), data.T)
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
# Non-symmetric and Positive Definite
#
# cgs, qmr, and bicg fail to converge on this one
# -- algorithmic limitation apparently
data = ones((2,10))
data[0,:] = 2
data[1,:] = -1
A = spdiags( data, [0,-1], 10, 10, format='csr')
self.cases.append(Case("nonsymposdef", A,
skip=sym_solvers+[cgs, qmr, bicg]))
def setup_module():
global params
params = IterativeParams()
def check_maxiter(solver, case):
A = case.A
tol = 1e-12
b = arange(A.shape[0], dtype=float)
x0 = 0*b
residuals = []
def callback(x):
residuals.append(norm(b - case.A*x))
x, info = solver(A, b, x0=x0, tol=tol, maxiter=3, callback=callback)
assert_equal(len(residuals), 3)
assert_equal(info, 3)
def test_maxiter():
case = params.Poisson1D
for solver in params.solvers:
if solver in case.skip: continue
yield check_maxiter, solver, case
def assert_normclose(a, b, tol=1e-8):
residual = norm(a - b)
tolerance = tol*norm(b)
msg = "residual (%g) not smaller than tolerance %g" % (residual, tolerance)
assert_(residual < tolerance, msg=msg)
def check_convergence(solver, case):
tol = 1e-8
A = case.A
b = arange(A.shape[0], dtype=float)
x0 = 0*b
x, info = solver(A, b, x0=x0, tol=tol)
assert_array_equal(x0, 0*b) #ensure that x0 is not overwritten
assert_equal(info,0)
assert_normclose(A.dot(x), b, tol=tol)
def test_convergence():
for solver in params.solvers:
for case in params.cases:
if solver in case.skip: continue
yield check_convergence, solver, case
def check_precond_dummy(solver, case):
tol = 1e-8
def identity(b,which=None):
"""trivial preconditioner"""
return b
A = case.A
M,N = A.shape
D = spdiags( [1.0/A.diagonal()], [0], M, N)
b = arange(A.shape[0], dtype=float)
x0 = 0*b
precond = LinearOperator(A.shape, identity, rmatvec=identity)
if solver is qmr:
x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)
else:
x, info = solver(A, b, M=precond, x0=x0, tol=tol)
assert_equal(info,0)
assert_normclose(A.dot(x), b, tol)
A = aslinearoperator(A)
A.psolve = identity
A.rpsolve = identity
x, info = solver(A, b, x0=x0, tol=tol)
assert_equal(info,0)
assert_normclose(A*x, b, tol=tol)
def test_precond_dummy():
case = params.Poisson1D
for solver in params.solvers:
if solver in case.skip: continue
yield check_precond_dummy, solver, case
def test_gmres_basic():
A = np.vander(np.arange(10) + 1)[:, ::-1]
b = np.zeros(10)
b[0] = 1
x = np.linalg.solve(A, b)
x_gm, err = gmres(A, b, restart=5, maxiter=1)
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
def test_reentrancy():
non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]
reentrant = [lgmres, minres]
for solver in reentrant + non_reentrant:
yield _check_reentrancy, solver, solver in reentrant
def _check_reentrancy(solver, is_reentrant):
def matvec(x):
A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])
y, info = solver(A, x)
assert_equal(info, 0)
return y
b = np.array([1, 1./2, 1./3])
op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,
dtype=b.dtype)
if not is_reentrant:
assert_raises(RuntimeError, solver, op, b)
else:
y, info = solver(op, b)
assert_equal(info, 0)
assert_allclose(y, [1, 1, 1])
#------------------------------------------------------------------------------
class TestQMR(TestCase):
def test_leftright_precond(self):
"""Check that QMR works with left and right preconditioners"""
from scipy.sparse.linalg.dsolve import splu
from scipy.sparse.linalg.interface import LinearOperator
n = 100
dat = ones(n)
A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n)
b = arange(n,dtype='d')
L = spdiags([-dat/2, dat], [-1,0], n, n)
U = spdiags([4*dat, -dat], [ 0,1], n, n)
L_solver = splu(L)
U_solver = splu(U)
def L_solve(b):
return L_solver.solve(b)
def U_solve(b):
return U_solver.solve(b)
def LT_solve(b):
return L_solver.solve(b,'T')
def UT_solve(b):
return U_solver.solve(b,'T')
M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve )
M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve )
x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)
assert_equal(info,0)
assert_normclose(A*x, b, tol=1e-8)
class TestGMRES(TestCase):
def test_callback(self):
def store_residual(r, rvec):
rvec[rvec.nonzero()[0].max()+1] = r
#Define, A,b
A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))
b = ones((A.shape[0],))
maxiter=1
rvec = zeros(maxiter+1)
rvec[0] = 1.0
callback = lambda r:store_residual(r, rvec)
x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)
diff = max(abs((rvec - array([1.0, 0.81649658092772603]))))
assert_(diff < 1e-5)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| |
# Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from .anyjson import simplejson
from oauth2client.client import Storage as BaseStorage
from oauth2client.client import Credentials
from oauth2client import util
from .locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
@util.positional(4)
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
@util.positional(2)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._client_id, self._user_agent,
self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0o177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in list(self._data.items()):
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _delete_credential(self, client_id, user_agent, scope):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: The scope(s) that this credential covers
"""
key = (client_id, user_agent, scope)
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo is a Python module to show the cpuinfo of a processor
# It uses a MIT style license
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# FIXME: Figure out how /proc/cpuinfo simulates cpuinfo on non x86 cpus
# FIXME: See if running this in a multiprocessing process will stop it from segfaulting when it breaks
# FIXME: Check how this compares to numpy. How does numpy get MHz and sse3 detection when the registry
# does not have this info, and there is no /proc/cpuinfo ? Does it use win32 __cpuinfo ?
# Assembly code can be assembled and disassembled like this:
'''
; cpuid.asm
; clear && nasm -o out -f bin cpuid.asm && ndisasm out
BITS 32
section .data
section .text
global main
main:
mov ax, 1
cpuid
mov ax, bx
ret
'''
import os
import re
import time
import platform
import multiprocessing
import ctypes
import subprocess
bits = platform.architecture()[0]
is_windows = platform.system().lower() == 'windows'
def chomp(s):
for sep in ['\r\n', '\n', '\r']:
if s.endswith(sep):
return s[:-len(sep)]
return s
class ProcessRunner(object):
def __init__(self, command):
self._command = command
self._process = None
self._return_code = None
self._stdout = None
self._stderr = None
def run(self):
self._stdout = b''
self._stderr = b''
# Start the process and save the output
self._process = subprocess.Popen(
self._command,
stderr = subprocess.PIPE,
stdout = subprocess.PIPE,
shell = True
)
def wait(self):
# Wait for the process to actually exit
self._process.wait()
# Get the return code
rc = self._process.returncode
if hasattr(os, 'WIFEXITED') and os.WIFEXITED(rc):
rc = os.WEXITSTATUS(rc)
self._return_code = rc
# Get the standard out and error in the correct format
try:
self._stderr = str(self._stderr, 'UTF-8')
except Exception:
pass
try:
self._stdout = str(self._stdout, 'UTF-8')
except Exception:
pass
# Chomp the terminating newline off the ends of output
self._stdout = chomp(self._stdout)
self._stderr = chomp(self._stderr)
def get_is_done(self):
# You have to poll a process to update the retval. Even if it has stopped already
if self._process.returncode == None:
self._process.poll()
# Read the output from the buffer
sout, serr = self._process.communicate()
self._stdout += sout
self._stderr += serr
# Return true if there is a return code
return self._process.returncode != None
is_done = property(get_is_done)
def get_stderr(self):
self._require_wait()
return self._stderr
stderr = property(get_stderr)
def get_stdout(self):
self._require_wait()
return self._stdout
stdout = property(get_stdout)
def get_stdall(self):
self._require_wait()
return self._stdout + '\n' + self._stderr
stdall = property(get_stdall)
def get_is_success(self):
self._require_wait()
return self._return_code == 0
is_success = property(get_is_success)
def _require_wait(self):
if self._return_code == None:
raise Exception("Wait needs to be called before any info on the process can be gotten.")
def run_and_get_stdout(command):
runner = ProcessRunner(command)
runner.run()
runner.is_done
runner.wait()
if runner.is_success:
return runner.stdout
else:
return None
def program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def to_friendly_hz(ticks):
ticks = float(ticks)
hz_map = [
{'GHz' : 1000000000.0},
{'MHz' : 1000000.0},
{'KHz' : 1000.0},
{'Hz' : 1.0}
]
for pair in hz_map:
for symbol, place in pair.items():
if ticks >= place:
return '{0:.4f} {1}'.format(ticks / place, symbol)
def parse_arch(raw_arch_string):
arch, bits = None, None
raw_arch_string = raw_arch_string.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string):
arch = 'X86_32'
bits = '32'
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string):
arch = 'X86_64'
bits = '64'
# ARM
elif re.match('^armv8-a$', raw_arch_string):
arch = 'ARM_8'
bits = '64'
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$', raw_arch_string):
arch = 'ARM_7'
bits = '32'
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = '32'
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string):
arch = 'PPC_32'
bits = '32'
elif re.match('^powerpc$|^ppc64$', raw_arch_string):
arch = 'PPC_64'
bits = '64'
# SPARC
elif re.match('^sparc32$|^sparc$', raw_arch_string):
arch = 'SPARC_32'
bits = '32'
elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string):
arch = 'SPARC_64'
bits = '64'
return (arch, bits)
def is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
class CPUID(object):
def __init__(self):
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = False
# Just return if the SE Linux Status Tool is not installed
if not program_paths('sestatus'):
return
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = os.popen("sestatus -b | grep -i \"allow_execheap\"").read().strip().lower().endswith('on')
can_selinux_exec_memory = os.popen("sestatus -b | grep -i \"allow_execmem\"").read().strip().lower().endswith('on')
self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
global is_windows
byte_code = bytes.join(b'', byte_code)
address = None
if is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_EXECUTE_READWRITE = ctypes.c_ulong(0x40)
address = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0), ctypes.c_size_t(size), MEM_COMMIT, PAGE_EXECUTE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
address = ctypes.pythonapi.valloc(size)
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as safe for code execution
if not self.is_selinux_enforcing:
READ_WRITE_EXECUTE = 0x1 | 0x2 | 0x4
if ctypes.pythonapi.mprotect(address, size, READ_WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
global is_windows
global bits
# Convert the byte code into a function that returns an int
restype = None
if bits == '64bit':
restype = ctypes.c_uint64
else:
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
# Free the function memory segment
# FIXME: This should set the memory as non executable before freeing
if is_windows:
size = ctypes.c_size_t(len(byte_code))
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(address, size, MEM_RELEASE)
else:
ctypes.pythonapi.free(address)
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
global bits
if bits == '64bit':
return (
b"\x66\xB8\x00\x00" # mov eax,0x0"
)
else:
return (
b"\x31\xC0" # xor ax,ax
)
def _one_eax(self):
global bits
if bits == '64bit':
return (
b"\x66\xB8\x01\x00" # mov eax,0x1"
)
else:
return (
b"\x31\xC0" # xor ax,ax
b"\x40" # inc ax
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = str.join('', vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved1' : is_bit_set(edx, 10),
'sep' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
'pn' : is_bit_set(edx, 18),
'clflush' : is_bit_set(edx, 19),
#'reserved2' : is_bit_set(edx, 20),
'dts' : is_bit_set(edx, 21),
'acpi' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'sse' : is_bit_set(edx, 25),
'sse2' : is_bit_set(edx, 26),
'ss' : is_bit_set(edx, 27),
'ht' : is_bit_set(edx, 28),
'tm' : is_bit_set(edx, 29),
'ia64' : is_bit_set(edx, 30),
'pbe' : is_bit_set(edx, 31),
'pni' : is_bit_set(ecx, 0),
'pclmulqdq' : is_bit_set(ecx, 1),
'dtes64' : is_bit_set(ecx, 2),
'monitor' : is_bit_set(ecx, 3),
'ds_cpl' : is_bit_set(ecx, 4),
'vmx' : is_bit_set(ecx, 5),
'smx' : is_bit_set(ecx, 6),
'est' : is_bit_set(ecx, 7),
'tm2' : is_bit_set(ecx, 8),
'ssse3' : is_bit_set(ecx, 9),
'cid' : is_bit_set(ecx, 10),
#'reserved3' : is_bit_set(ecx, 11),
'fma' : is_bit_set(ecx, 12),
'cx16' : is_bit_set(ecx, 13),
'xtpr' : is_bit_set(ecx, 14),
'pdcm' : is_bit_set(ecx, 15),
#'reserved4' : is_bit_set(ecx, 16),
'pcid' : is_bit_set(ecx, 17),
'dca' : is_bit_set(ecx, 18),
'sse4_1' : is_bit_set(ecx, 19),
'sse4_2' : is_bit_set(ecx, 20),
'x2apic' : is_bit_set(ecx, 21),
'movbe' : is_bit_set(ecx, 22),
'popcnt' : is_bit_set(ecx, 23),
'tscdeadline' : is_bit_set(ecx, 24),
'aes' : is_bit_set(ecx, 25),
'xsave' : is_bit_set(ecx, 26),
'osxsave' : is_bit_set(ecx, 27),
'avx' : is_bit_set(ecx, 28),
'f16c' : is_bit_set(ecx, 29),
'rdrnd' : is_bit_set(ecx, 30),
'hypervisor' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# Get the Extended CPU flags
extended_flags = {}
if max_extension_support >= 0x80000001:
# EDX
edx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved' : is_bit_set(edx, 10),
'syscall' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
#'reserved' : is_bit_set(edx, 18),
'mp' : is_bit_set(edx, 19),
'nx' : is_bit_set(edx, 20),
#'reserved' : is_bit_set(edx, 21),
'mmxext' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'fxsr_opt' : is_bit_set(edx, 25),
'pdpe1gp' : is_bit_set(edx, 26),
'rdtscp' : is_bit_set(edx, 27),
#'reserved' : is_bit_set(edx, 28),
'lm' : is_bit_set(edx, 29),
'3dnowext' : is_bit_set(edx, 30),
'3dnow' : is_bit_set(edx, 31),
'lahf_lm' : is_bit_set(ecx, 0),
'cmp_legacy' : is_bit_set(ecx, 1),
'svm' : is_bit_set(ecx, 2),
'extapic' : is_bit_set(ecx, 3),
'cr8_legacy' : is_bit_set(ecx, 4),
'abm' : is_bit_set(ecx, 5),
'sse4a' : is_bit_set(ecx, 6),
'misalignsse' : is_bit_set(ecx, 7),
'3dnowprefetch' : is_bit_set(ecx, 8),
'osvw' : is_bit_set(ecx, 9),
'ibs' : is_bit_set(ecx, 10),
'xop' : is_bit_set(ecx, 11),
'skinit' : is_bit_set(ecx, 12),
'wdt' : is_bit_set(ecx, 13),
#'reserved' : is_bit_set(ecx, 14),
'lwp' : is_bit_set(ecx, 15),
'fma4' : is_bit_set(ecx, 16),
'tce' : is_bit_set(ecx, 17),
#'reserved' : is_bit_set(ecx, 18),
'nodeid_msr' : is_bit_set(ecx, 19),
#'reserved' : is_bit_set(ecx, 20),
'tbm' : is_bit_set(ecx, 21),
'topoext' : is_bit_set(ecx, 22),
'perfctr_core' : is_bit_set(ecx, 23),
'perfctr_nb' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
#'reserved' : is_bit_set(ecx, 26),
#'reserved' : is_bit_set(ecx, 27),
#'reserved' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
#'reserved' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
return processor_brand[:-1]
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
global bits
retval = None
if bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_hz(self):
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return to_friendly_hz(ticks)
def get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register.
Returns None of non X86 cpus.
Returns None if SELinux is in enforcing mode.
'''
# Get the CPU arch and bits
raw_arch_string = platform.machine()
arch, bits = parse_arch(raw_arch_string)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return None
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
return None
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
return {
'vendor_id' : cpuid.get_vendor_id(),
'brand' : cpuid.get_processor_brand(max_extension_support),
'hz' : cpuid.get_hz(),
'arch' : arch,
'bits' : bits,
'count' : multiprocessing.cpu_count(),
'raw_arch_string' : raw_arch_string,
'l2_cache_size:' : cache_info['size_kb'],
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
def _get_field(raw_string, *field_names):
for field_name in field_names:
if field_name in raw_string:
raw_field = raw_string.split(field_name)[1] # Everything after the field name
raw_field = raw_field.split(':')[1] # Everything after the :
raw_field = raw_field.split('\n')[0] # Everything before the \n
raw_field = raw_field.strip() # Strip any extra white space
return raw_field
return None
def get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo. Will return None if
/proc/cpuinfo is not found.
'''
# Just return None if there is no cpuinfo
if not os.path.exists('/proc/cpuinfo'):
return None
output = os.popen('cat /proc/cpuinfo').read()
# FIXME: See for how lscpu parses /proc/cpuinfo
# http://git.kernel.org/cgit/utils/util-linux/util-linux.git/tree/sys-utils/lscpu.c
# Various fields
vendor_id = _get_field(output, 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(output, 'model name','cpu')
cache_size = _get_field(output, 'cache size')
stepping = _get_field(output, 'stepping')
model = _get_field(output, 'model')
family = _get_field(output, 'cpu family')
# Flags
flags = _get_field(output, 'flags', 'Features').split()
flags.sort()
# Convert from MHz string to Hz
processor_hz = _get_field(output, 'cpu MHz', 'cpu speed', 'clock')
processor_hz = processor_hz.lower().rstrip('mhz').strip()
processor_hz = float(processor_hz) * 1000000.0
processor_hz = to_friendly_hz(processor_hz)
# Get the CPU arch and bits
raw_arch_string = platform.machine()
arch, bits = parse_arch(raw_arch_string)
return {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz' : processor_hz,
'arch' : arch,
'bits' : bits,
'count' : multiprocessing.cpu_count(),
'raw_arch_string' : raw_arch_string,
'l2_cache_size:' : cache_size,
'l2_cache_line_size' : 0,
'l2_cache_associativity' : 0,
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : 0,
'extended_model' : 0,
'extended_family' : 0,
'flags' : flags
}
def get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl. Will return None if
sysctl is not found.
'''
# Just return None if there is no sysctl
if not program_paths('sysctl'):
return None
# If sysctl fails return None
output = run_and_get_stdout('sysctl machdep.cpu')
if output == None:
return None
# Various fields
vendor_id = _get_field(output, 'machdep.cpu.vendor')
processor_brand = _get_field(output, 'machdep.cpu.brand_string')
cache_size = _get_field(output, 'machdep.cpu.cache.size')
stepping = _get_field(output, 'machdep.cpu.stepping')
model = _get_field(output, 'machdep.cpu.model')
family = _get_field(output, 'machdep.cpu.family')
# Flags
flags = _get_field(output, 'machdep.cpu.features').lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 1
if processor_brand.lower().endswith('mhz'):
scale = 1000000.0
elif processor_brand.lower().endswith('ghz'):
scale = 1000000000.0
processor_hz = processor_brand.lower()
processor_hz = processor_hz.split('@')[1]
processor_hz = processor_hz.rstrip('mhz').rstrip('ghz').strip()
processor_hz = float(processor_hz) * scale
processor_hz = to_friendly_hz(processor_hz)
# Get the CPU arch and bits
raw_arch_string = platform.machine()
arch, bits = parse_arch(raw_arch_string)
return {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz' : processor_hz,
'arch' : arch,
'bits' : bits,
'count' : multiprocessing.cpu_count(),
'raw_arch_string' : raw_arch_string,
'l2_cache_size:' : cache_size,
'l2_cache_line_size' : 0,
'l2_cache_associativity' : 0,
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : 0,
'extended_model' : 0,
'extended_family' : 0,
'flags' : flags
}
def get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry. Will return None if
not on Windows.
'''
global is_windows
# Just return None if not on Windows
if not is_windows:
return None
try:
import _winreg as winreg
except ImportError:
import winreg
# Get the CPU arch and bits
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
arch, bits = parse_arch(raw_arch_string)
# Get the CPU MHz
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_hz = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
processor_hz = float(processor_hz) * 1000000.0
processor_hz = to_friendly_hz(processor_hz)
# Get the CPU name
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
# Get the CPU vendor id
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
# Get the CPU features
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
return {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz' : processor_hz,
'arch' : arch,
'bits' : bits,
'count' : multiprocessing.cpu_count(),
'raw_arch_string' : raw_arch_string,
'l2_cache_size:' : 0,
'l2_cache_line_size' : 0,
'l2_cache_associativity' : 0,
'stepping' : 0,
'model' : 0,
'family' : 0,
'processor_type' : 0,
'extended_model' : 0,
'extended_family' : 0,
'flags' : flags
}
def get_cpu_info():
info = None
# Try the Windows registry
if not info:
info = get_cpu_info_from_registry()
# Try /proc/cpuinfo
if not info:
info = get_cpu_info_from_proc_cpuinfo()
# Try sysctl
if not info:
info = get_cpu_info_from_sysctl()
# Try querying the CPU cpuid register
if not info:
info = get_cpu_info_from_cpuid()
return info
if __name__ == '__main__':
info = get_cpu_info()
print('Vendor ID', info['vendor_id'])
print('Brand', info['brand'])
print('Hz', info['hz'])
print('Arch', info['arch'])
print('Bits', info['bits'])
print('Count', info['count'])
print('Raw Arch String', info['raw_arch_string'])
print('L2 Cache Size', info['l2_cache_size:'])
print('L2 Cache Line Size', info['l2_cache_line_size'])
print('L2 Cache Associativity', info['l2_cache_associativity'])
print('Stepping', info['stepping'])
print('Model', info['model'])
print('Family', info['family'])
print('Processor Type', info['processor_type'])
print('Extended Model', info['extended_model'])
print('Extended Family', info['extended_family'])
print('Flags:', info['flags'])
| |
from github3 import login
from github3.exceptions import GitHubError
import socket
import json, datetime, os
import time
import dateutil.parser
from tabulate import tabulate
import humanize
VERSION = "0.1.0"
GLOBAL_PARAMS = {"per_page": 100}
ACCEPTED_WAIT = 0.25
def date_handler(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.total_seconds()
else:
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
def logo_block(name=None):
output = []
output.append("# __ ")
output.append("# /\ \ ")
output.append("# __\ \ \___ _____ ___ ")
output.append("# /'_ `\ \ _ `\/\ __`\ /' _ `\ ")
output.append("# /\ \_\ \ \ \ \ \ \ \/\ \/\ \/\ \ ")
output.append("# \ \____ \ \_\ \_\ \ __/\ \_\ \_\\")
output.append("# \/____\ \/_/\/_/\ \ \/ \/_/\/_/")
output.append("# /\____/ \ \_\ ")
output.append("# \_/__/ \/_/ ")
output.append("#")
output.append("# github parents night v%s" % (VERSION))
if name:
output += ["#", "# REPORT FOR: %s" % (name)]
return "\n".join(output)
def section_header_block(header):
h_len = "#"*(len(header)+4)
return "%s\n# %s #\n%s\n" % (h_len, header, h_len)
def flatten_event_list(events):
if len(events) > 0:
# hacky hack if people have so many events
# that they are all from one day...
flat = {}
start = min(events, key=lambda x: x.created_at).created_at
end = max(events, key=lambda x: x.created_at).created_at
if (end-start).days <= 0:
time_split = "%H"
else:
time_split = "%d-%m-%Y"
for e in events:
d_key = e.created_at.strftime(time_split)
if flat.get(d_key, None):
flat[d_key].append(e)
else:
flat[d_key] = []
flat[d_key].append(e)
# hackHACKHACKSOBAD
if time_split == "%d-%m-%Y":
for day in range((end-start).days):
today = (start+datetime.timedelta(days=day)).strftime(time_split)
if not flat.get(today, None):
flat[today] = []
flatr = [[k, len(v)] for k, v in flat.items()]
if time_split == "%d-%m-%Y":
flatr.sort(key=lambda x: dateutil.parser.parse(x[0], dayfirst=True))
return flatr
def reduce_events(events, cutoff=25):
pushes = []
creates = []
forks = []
issues = []
for e in events:
if e.type == "PushEvent":
pushes.append(e)
elif e.type == "CreateEvent":
creates.append(e)
elif e.type == "ForkEvent":
forks.append(e)
elif e.type == "IssueCommentEvent" or e.type == "IssuesEvent":
issues.append(e)
if len(pushes) < cutoff:
pushes = None
else:
pushes = flatten_event_list(pushes)
if len(creates) < cutoff:
creates = None
else:
creates = flatten_event_list(creates)
if len(forks):
forks = None
else:
forks = flatten_event_list(forks)
if len(issues):
issues = None
else:
issues = flatten_event_list(issues)
return pushes, creates, forks, issues
class GHRepo(object):
def __init__(
self,
name=None,
is_forkd=None,
total_commits=None,
last_month_commits=None,
stars=None,
watchers=None,
forks=None,
language=None,
languages=None,
size=None,
open_issues=None,
last_updated=None,
created_at=None,
last_commit=None
):
self.name = name
self.is_forkd = is_forkd
self.total_commits = total_commits
self.last_month_commits = last_month_commits
self.stars = stars
self.watchers = watchers
self.forks = forks
self.language = language
self.languages = languages
self.size = size
self.open_issues = open_issues
self.last_updated = last_updated
self.created_at = created_at
self.last_commit = last_commit
class GHProfile(object):
def __init__(
self,
username=None,
name=None,
user_since=None,
last_active=None,
followers=None,
following=None,
repos=None,
location=None,
push_activity=None,
fork_activity=None,
create_activity=None,
issue_activity=None,
company=None,
hireable=None,
num_gists=None,
email=None
):
self.username = username
self.name = name
self.user_since = user_since
self.last_active = last_active
self.followers = followers
self.following = following
self.repos = repos
self.location = location
self.push_activity = push_activity
self.fork_activity = fork_activity
self.create_activity = create_activity
self.issue_activity = issue_activity
self.company = company
self.hireable = hireable
self.num_gists = num_gists
self.email = email
@staticmethod
def from_github(username, json_errors=False, github=None):
# this is where ALL the requests come from (at least they should)
if not github:
github = GitHub()
ro = github.user(username)
if not ro:
if json_errors:
return {"error": "cannot find %s" % username, "error_status_code": 404}
else:
# raise something?
pass
repos_iter = github.repositories_by(username)
repos_iter.params = GLOBAL_PARAMS
def get_user_commits():
total = 0
contrib_iter = r.contributor_statistics()
contrib_iter.params = GLOBAL_PARAMS
while True:
try:
for contributor in contrib_iter:
if contrib_iter.last_status == 202:
time.sleep(ACCEPTED_WAIT)
break
else:
if contributor.author.login == username:
total += contributor.total
if not contrib_iter.last_status == 202:
break
except AttributeError:
break
return total
ro_repos = []
for r in repos_iter:
latest_commit_iter = r.commits()
latest_commit_iter.params = {"per_page": 1}
last_commit = ""
try:
for lc in latest_commit_iter:
last_commit = lc.sha[:8]
break
except GitHubError:
if latest_commit_iter.last_status == 403:
continue
last_commit = ""
lang_iter = r.languages()
lang_iter.params = GLOBAL_PARAMS
repo = GHRepo(
name=r.name,
is_forkd=r.fork,
total_commits=get_user_commits(), # guhhhh
last_month_commits=None,
stars=r.stargazers_count,
watchers=r.watchers, # WRONG
forks=r.forks_count,
language=r.language,
languages=[l for l in lang_iter], # bytes
size=r.size*1000, # kb i think
open_issues=r.open_issues_count,
last_updated=r.updated_at.replace(tzinfo=None),
created_at=r.created_at.replace(tzinfo=None),
last_commit=last_commit
)
ro_repos.append(repo)
event_iter = ro.events()
event_iter.params = GLOBAL_PARAMS
pushes, creates, forks, issues = reduce_events([e for e in event_iter])
user_last_active = datetime.datetime.strptime(ro._json_data["updated_at"], "%Y-%m-%dT%H:%M:%SZ")
return GHProfile(
username=username,
name=ro.name,
user_since=ro.created_at.replace(tzinfo=None),
last_active=user_last_active,
followers=ro.followers_count,
following=ro.following_count,
repos=ro_repos,
location=ro.location,
push_activity=pushes,
fork_activity=forks,
create_activity=creates,
issue_activity=issues,
company=ro.company,
hireable=ro.hireable,
num_gists=ro.public_gists,
email=ro.email
)
@staticmethod
def from_file(filename):
with open(filename, "r") as f:
# should handle datetime re-serialization...? (last_updated, created_at)
profile = json.load(f)
repos = [GHRepo(
name=r["name"],
is_forkd=r["is_forkd"],
total_commits=r["total_commits"],
last_month_commits=r["last_month_commits"],
stars=r["stars"],
watchers=r["watchers"],
forks=r["forks"],
language=r["language"],
languages=r["languages"],
size=r["size"],
open_issues=r["open_issues"],
last_updated=dateutil.parser.parse(r["last_updated"], dayfirst=True),
created_at=dateutil.parser.parse(r["created_at"], dayfirst=True),
last_commit=r["last_commit"]
) for r in profile["repos"]]
return GHProfile(
username=profile["username"],
user_since=dateutil.parser.parse(profile["user_since"], dayfirst=True),
last_active=dateutil.parser.parse(profile["last_active"], dayfirst=True),
followers=profile["followers"],
following=profile["following"],
name=profile["name"],
repos=repos,
location=profile["location"],
push_activity=profile["push_activity"],
fork_activity=profile["fork_activity"],
create_activity=profile["create_activity"],
issue_activity=profile["issue_activity"],
company=profile["company"],
hireable=profile["hireable"],
num_gists=profile["num_gists"],
email=profile["email"]
)
def to_file(self, filename):
with open(filename, "w") as f:
profile = self.__dict__
profile["repos"] = [r.__dict__ for r in profile["repos"]]
json.dump(profile, f, default=date_handler)
def get_lang_stats(self):
langs = {}
for r in self.repos:
for k, v in r.languages:
if not langs.get(k): langs[k] = {"bytes": 0, "used": 0}
langs[k]["bytes"] += v
langs[k]["used"] += 1
lang_count = sum(langs[lang]["used"] for lang in langs)
langs = [(lang, langs[lang]["used"], langs[lang]["bytes"], (langs[lang]["used"]/lang_count)*100) for lang in langs]
langs.sort(key=lambda x: x[1], reverse=True)
return langs
def get_repos_footprint(self):
return sum(r.size for r in self.repos), sum(r.size for r in self.repos if not r.is_forkd) or None
def get_avg_repo_age(self):
if len(self.repos) > 0:
ages = [datetime.datetime.utcnow()-r.created_at for r in self.repos]
return sum(ages, datetime.timedelta(0))/len(ages)
def get_repo_age_range(self):
if len(self.repos) > 0:
oldest = max([(r.name, datetime.datetime.utcnow()-r.created_at) for r in self.repos], key=lambda x: x[1])
newest = min([(r.name, datetime.datetime.utcnow()-r.created_at) for r in self.repos], key=lambda x: x[1])
return (oldest[0], oldest[1]), (newest[0], newest[1])
else:
return None, None
def get_stars(self):
stars = [r.stars for r in self.repos if r.stars > 0]
return sum(stars), len(stars)
def get_forkers(self):
forkers = [f.forks for f in self.repos if f.forks > 0]
return sum(forkers), len(forkers)
def get_active_repos(self):
two_months_ago = datetime.datetime.now()-datetime.timedelta(weeks=3*2)
repos = [[r.name, r.language, r.last_commit[:8], r.total_commits, r.last_updated, r.created_at] for r in self.repos if r.last_updated > two_months_ago]
repos.sort(key=lambda x: x[4], reverse=True)
for r in repos:
r[4] = humanize.naturaltime(r[4])
r[5] = humanize.naturaltime(r[5])
return repos
def get_inactive_repos(self):
six_months_ago = datetime.datetime.now()-datetime.timedelta(weeks=3*6)
repos = [[r.name, r.language, r.last_commit[:8], r.total_commits, r.last_updated, r.created_at] for r in self.repos if r.last_updated < six_months_ago]
repos.sort(key=lambda x: x[4])
for r in repos:
r[4] = humanize.naturaltime(r[4])
r[5] = humanize.naturaltime(r[5])
return repos
def get_popular_repos(self, fork_sort=False):
repos = [(r.name, r.stars, r.forks, r.total_commits, humanize.naturaltime(r.last_updated)) for r in self.repos if r.stars > 0 or r.forks > 0]
if not fork_sort:
sorter=1
else:
sorter=2
repos.sort(key=lambda x: x[sorter], reverse=True)
return repos
def get_total_commits(self):
return sum(r.total_commits for r in self.repos if not r.is_forkd)
class GHProfileStats(object):
def __init__(
self,
token=None,
username=None,
name=None,
location=None,
user_since=None,
last_active=None,
repo_num=None,
forked_repo_num=None,
langs=None,
footprint=None,
footprint_minus_forks=None,
stars=None,
forkers=None,
followers=None,
following=None,
oldest_repo=None,
newest_repo=None,
avg_repo_age=None,
popular_repos=None,
active_repos=None,
inactive_repos=None,
num_inactive_repos=None,
total_commits=None,
push_activity=None,
fork_activity=None,
create_activity=None,
issue_activity=None,
company=None,
hireable=None,
num_gists=None,
email=None
):
if token:
self.gh = login(token=token)
else:
self.gh = None
self.username = username
self.name = name
self.location = location
self.user_since = user_since
self.last_active = last_active
self.repo_num = repo_num
self.forked_repo_num = forked_repo_num
self.langs = langs
self.footprint = footprint
self.footprint_minus_forks = footprint_minus_forks
self.stars = stars
self.forkers = forkers
self.followers = followers
self.following = following
self.oldest_repo = oldest_repo
self.newest_repo = newest_repo
self.avg_repo_age = avg_repo_age
self.popular_repos = popular_repos
self.active_repos = active_repos
self.inactive_repos = inactive_repos
self.num_inactive_repos = num_inactive_repos
self.total_commits = total_commits
self.push_activity = push_activity
self.fork_activity = fork_activity
self.create_activity = create_activity
self.issue_activity = issue_activity
self.company = company
self.hireable = hireable
self.num_gists = num_gists
self.email = email
@staticmethod
def get(username, json_errors=False):
stats = GHProfile.from_github(username, json_errors=json_errors, github=self.gh)
if json_errors and not isinstance(stats, GHProfile):
print(stats["error"])
print(username)
print(stats)
return stats
else:
return GHProfileStats.from_ghprofile(stats)
@staticmethod
def from_ghprofile(profile, repo_limit=10):
footprint, footprint_minus_forks = profile.get_repos_footprint()
oldest, newest = profile.get_repo_age_range()
return GHProfileStats(
username=profile.username,
name=profile.name,
location=profile.location,
user_since=profile.user_since,
last_active=profile.last_active,
repo_num=len(profile.repos),
forked_repo_num=len([r for r in profile.repos if r.is_forkd]),
langs=profile.get_lang_stats(),
footprint=footprint,
footprint_minus_forks=footprint_minus_forks,
stars=profile.get_stars(),
forkers=profile.get_forkers(),
followers=profile.followers,
following=profile.following,
oldest_repo=oldest,
newest_repo=newest,
avg_repo_age=profile.get_avg_repo_age(),
popular_repos=profile.get_popular_repos()[:repo_limit],
active_repos=profile.get_active_repos()[:repo_limit],
inactive_repos=profile.get_inactive_repos()[:repo_limit],
num_inactive_repos=len(profile.get_inactive_repos()),
total_commits=profile.get_total_commits(),
push_activity=profile.push_activity,
fork_activity=profile.fork_activity,
create_activity=profile.create_activity,
issue_activity=profile.issue_activity,
company=profile.company,
hireable=profile.hireable,
num_gists=profile.num_gists,
email=profile.email
)
@staticmethod
def from_json(stats_json):
stats_json = json.loads(stats_json)
oldest, newest = stats_json["oldest_repo"], stats_json["newest_repo"]
if oldest and newest:
oldest[1] = datetime.timedelta(seconds=oldest[1])
newest[1] = datetime.timedelta(seconds=newest[1])
if stats_json["avg_repo_age"]:
avg_repo_age = datetime.timedelta(seconds=stats_json["avg_repo_age"])
else:
avg_repo_age = None
return GHProfileStats(
username=stats_json["username"],
name=stats_json["name"],
location=stats_json["location"],
user_since=dateutil.parser.parse(stats_json["user_since"], dayfirst=True),
last_active=dateutil.parser.parse(stats_json["last_active"], dayfirst=True),
repo_num=stats_json["repo_num"],
forked_repo_num=stats_json["forked_repo_num"],
langs=stats_json["langs"],
footprint=stats_json["footprint"],
footprint_minus_forks=stats_json["footprint_minus_forks"],
stars=stats_json["stars"],
forkers=stats_json["forkers"],
followers=stats_json["followers"],
following=stats_json["following"],
oldest_repo=oldest,
newest_repo=newest,
avg_repo_age=avg_repo_age,
popular_repos=stats_json["popular_repos"],
active_repos=stats_json["active_repos"],
inactive_repos=stats_json["inactive_repos"],
num_inactive_repos=stats_json["num_inactive_repos"],
total_commits=stats_json["total_commits"],
push_activity=stats_json["push_activity"],
fork_activity=stats_json["fork_activity"],
create_activity=stats_json["create_activity"],
issue_activity=stats_json["issue_activity"],
company=stats_json["company"],
hireable=stats_json["hireable"],
num_gists=stats_json["num_gists"],
email=stats_json["email"]
)
def to_json(self):
return json.dumps(self.__dict__, default=date_handler)
def user_block(self):
output = []
output.append(section_header_block("User info"))
output.append(" Github username: %s" % (self.username))
if self.name:
output.append(" Name: %s" % (self.name))
if self.company:
output.append(" Company: %s" % (self.company))
if self.location:
output.append(" Location: %s" % (self.location))
if self.email:
output.append(" Email: %s" % (self.email))
if self.hireable:
output.append(" Hireable: [TRUE]")
output.append(" User since: %s [%s]" % (self.user_since.strftime("%d-%m-%Y"), humanize.naturaltime(datetime.datetime.now()-self.user_since)))
output.append(" Last active: %s" % (humanize.naturaltime(self.last_active)))
output.append(" Followers: %d [following %d]" % (self.followers, self.following))
if self.num_gists > 0:
output.append(" Gists: %d" % (self.num_gists))
if self.footprint > 0:
if self.footprint_minus_forks:
extra = " [%s minus forks]" % humanize.naturalsize(self.footprint_minus_forks, gnu=True)
else:
extra = ""
output.append(" Github footprint: %s%s" % (humanize.naturalsize(self.footprint, gnu=True), extra))
return "\n".join(output)
def repo_block(self):
output = []
output.append(section_header_block("Repositories"))
if self.forked_repo_num > 0 or self.num_inactive_repos > 0:
extra = []
if self.forked_repo_num > 0:
extra.append("%d forks" % (self.forked_repo_num))
elif self.num_inactive_repos > 0:
extra.append("%d inactive for >6 months" % (self.num_inactive_repos))
extra_repo = " [%s]" % (", ".join(extra))
else:
extra_repo = ""
output.append(" Repos: %s%s" % (self.repo_num, extra_repo))
output.append(" Total commits: %d" % (self.total_commits))
output.append(" Stars: %d [over %d repos]" % (self.stars[0], self.stars[1]))
output.append(" Forkers: %d [over %d repos]" % (self.forkers[0], self.forkers[1]))
if self.repo_num > 0:
output.append(" Oldest repo: %s [%s]" % (self.oldest_repo[0], humanize.naturaltime(self.oldest_repo[1]).replace("ago", "old")))
output.append(" Newest repo: %s [%s]" % (self.newest_repo[0], humanize.naturaltime(self.newest_repo[1]).replace("ago", "old")))
if self.avg_repo_age:
output.append(" Average repo age: %s" % (humanize.naturaltime(self.avg_repo_age).replace("ago", "old")))
if len(self.langs) > 0:
output.append(" Languages used: %s" % (len(self.langs)))
return "\n".join(output)
def lang_breakdown_block(self):
output = []
if len(self.langs) > 0:
output.append(section_header_block("Language breakdown"))
table = [(l[0], humanize.naturalsize(l[2], gnu=True), l[1], int(l[3]/4)*"|", "{:3.2f}%".format(l[3])) for l in self.langs]
for t in tabulate(table, tablefmt="simple", headers=["", "footprint", "times used", " "*25, ""], stralign="right").split("\n"):
output.append(" %s" % (t))
return "\n".join(output)
def active_repos_block(self):
output = []
if len(self.active_repos) > 0:
output.append(section_header_block("Recently active repositories"))
for t in tabulate(self.active_repos, tablefmt="simple", headers=["", "language", "last commit", "total commits", "last updated", "created"]).split("\n"):
output.append(" %s" % (t))
return "\n".join(output)
def inactive_repos_block(self):
output = []
if len(self.inactive_repos) > 0:
output.append(section_header_block("Inactive repositories"))
for t in tabulate(self.inactive_repos, tablefmt="simple", headers=["", "language", "last commit", "total commits", "last updated", "created"]).split("\n"):
output.append(" %s" % (t))
return "\n".join(output)
def popular_repos_block(self):
output = []
if len(self.popular_repos) > 0:
output.append(section_header_block("Popular repositories"))
for t in tabulate(self.popular_repos, tablefmt="simple", headers=["", "stars", "forkers", "total commits", "last updated"]).split("\n"):
output.append(" %s" % (t))
return "\n".join(output)
@staticmethod
def construct_event_graph_block(header, event_tuples, height=15):
if event_tuples:
output = []
line_length = len(event_tuples)
# if line_length < 10:
# modi = 15
# elif line_length > 75:
# modi = 1
# else:
# modi = 2
modi = 2
e_max = max(event_tuples, key=lambda x: x[1])[1] or 10
def trans(value, event_max=e_max, graph_max=height):
return ((graph_max)*(value)/(event_max))
output.append(section_header_block(header))
table = ""
for row in range(height, 0, -1):
table += " "
for col in range(line_length):
if trans(event_tuples[col][1]) >= row:
table += "#"*modi
else:
table += " "*modi
table += "| "
if row == height:
table += "~%d\n" % (e_max)
elif row == 1:
table += "~1\n"
else:
table += "\n"
output.append(table+" "+"-"*(modi*line_length)+"/")
if len(str(event_tuples[0][0]))/2 <= 4:
padding = 4
else:
padding = 0
padding += 4
padding = " "*padding
output.append(str("{}{: <%d}{: >%d}" % ((modi*line_length/2)+(len(str(event_tuples[0][0]))/2), (modi*line_length/2)+((len(str(event_tuples[0][0]))/2)))).format(padding, event_tuples[0][0], event_tuples[-1][0]))
return "\n".join(output)
def get_all_blocks(self):
name = self.name or self.username
blocks = [
logo_block(name=name),
self.user_block(),
self.repo_block(),
self.lang_breakdown_block(),
self.popular_repos_block(),
self.construct_event_graph_block("Push chart", self.push_activity),
self.active_repos_block(),
self.inactive_repos_block(),
self.construct_event_graph_block("New repository chart", self.create_activity),
self.construct_event_graph_block("Issue comments chart", self.issue_activity),
self.construct_event_graph_block("Fork chart", self.fork_activity)
]
return [b for b in blocks if b]
def _debug_remaining_requests(self):
return self.gh.rate_limit()
def _debug_request_counter(self):
return self.gh.__dict__["session"].__dict__["request_counter"]
def sample_gh_users(pages=1):
import requests
events = [requests.get("https://api.github.com/events?page=%d&per_page=100" % (r)).json() for r in range(pages)]
users = []
for ep in events:
users += [e["actor"]["login"] for e in ep]
return users
def testing(test_users):
debugs = []
for tu in test_users:
print(tu+"\n")
start_t = time.time()
start_l = gh.rate_limiting[0]
roland = GHProfile.from_github(tu)
DEBUG_INFO = {
"requests_took": time.time()-start_t,
"num_requests_made": start_l-gh.rate_limiting[0],
}
debugs.append(DEBUG_INFO)
stats = GHProfileStats.from_ghprofile(roland)
print("\n\n".join(stats.get_all_blocks()))
print(section_header_block("DEBUG"))
print(" requests took: %.2fs" % (DEBUG_INFO["requests_took"]))
print(" num requests made: %d" % (DEBUG_INFO["num_requests_made"]))
print(" tpr: %.3f" % (DEBUG_INFO["requests_took"]/DEBUG_INFO["num_requests_made"]))
def avg(l):
return sum(l) / len(l)
print(section_header_block("RUN STATS"))
print(" Average collection period: %.2fs" % (avg([d["requests_took"] for d in debugs])))
print(" Average requests: %.2d" % (avg([d["num_requests_made"] for d in debugs])))
def run():
import sys
rl_start = gh.__dict__["session"].__dict__["request_counter"]
r_start_t = time.time()
roland = GHProfile.from_github(sys.argv[1])
r_end_t = time.time()
rl_end = gh.__dict__["session"].__dict__["request_counter"]
s_start_t = time.time()
stats = GHProfileStats.from_ghprofile(roland)
s_end_t = time.time()
print("\n\n".join(stats.get_all_blocks()))
print("\n")
print(section_header_block("DEBUG"))
print(" requests took: %.2fs" % (r_end_t-r_start_t))
print(" stats gen took: %.6fs" % (s_end_t-s_start_t))
print(" num requests made: %d" % (rl_end-rl_start))
print(" rps: %.3f" % ((rl_end-rl_start)/(r_end_t-r_start_t)))
if __name__ == "__main__":
run()
# testing(sample_gh_users())
| |
import os
import stat
import tempfile
from mock import Mock, patch
from nose import SkipTest
from nose.tools import eq_
import waffle
from django.conf import settings
import amo
import amo.tests
from amo.tests.test_helpers import get_image_path
from devhub.models import UserLog
from lib.video import get_library
from lib.video import ffmpeg, totem
from lib.video.tasks import resize_video
from users.models import UserProfile
files = {
'good': os.path.join(os.path.dirname(__file__),
'fixtures/disco-truncated.webm'),
'bad': get_image_path('mozilla.png'),
}
older_output = """
Input #0, matroska,webm, from 'lib/video/fixtures/disco-truncated.webm':
Duration: 00:00:10.00, start: 0.000000, bitrate: 298 kb/s
Stream #0:0(eng): Video: vp8, yuv420p, 640x360, SAR 1:1 DAR 16:9,
Stream #0:1(eng): Audio: vorbis, 44100 Hz, stereo, s16 (default)
"""
other_output = """
Input #0, matroska, from 'disco-truncated.webm':
Metadata:
doctype : webm
"""
totem_indexer_good = """
TOTEM_INFO_DURATION=10
TOTEM_INFO_HAS_VIDEO=True
TOTEM_INFO_VIDEO_WIDTH=640
TOTEM_INFO_VIDEO_HEIGHT=360
TOTEM_INFO_VIDEO_CODEC=VP8 video
TOTEM_INFO_FPS=25
TOTEM_INFO_HAS_AUDIO=True
TOTEM_INFO_AUDIO_BITRATE=128
TOTEM_INFO_AUDIO_CODEC=Vorbis
TOTEM_INFO_AUDIO_SAMPLE_RATE=44100
TOTEM_INFO_AUDIO_CHANNELS=Stereo
"""
totem_indexer_bad = """
TOTEM_INFO_HAS_VIDEO=False
TOTEM_INFO_HAS_AUDIO=False
"""
class TestFFmpegVideo(amo.tests.TestCase):
def setUp(self):
self.video = ffmpeg.Video(files['good'])
if not ffmpeg.Video.library_available():
raise SkipTest
self.video._call = Mock()
self.video._call.return_value = older_output
def test_meta(self):
self.video.get_meta()
eq_(self.video.meta['formats'], ['matroska', 'webm'])
eq_(self.video.meta['duration'], 10.0)
eq_(self.video.meta['dimensions'], (640, 360))
def test_valid(self):
self.video.get_meta()
assert self.video.is_valid()
def test_dev_valid(self):
self.video._call.return_value = other_output
self.video.get_meta()
eq_(self.video.meta['formats'], ['webm'])
# These tests can be a little bit slow, to say the least so they are
# skipped. Un-skip them if you want.
def test_screenshot(self):
raise SkipTest
self.video.get_meta()
try:
screenshot = self.video.get_screenshot(amo.ADDON_PREVIEW_SIZES[0])
assert os.stat(screenshot)[stat.ST_SIZE]
finally:
os.remove(screenshot)
def test_encoded(self):
raise SkipTest
self.video.get_meta()
try:
video = self.video.get_encoded(amo.ADDON_PREVIEW_SIZES[0])
assert os.stat(video)[stat.ST_SIZE]
finally:
os.remove(video)
class TestBadFFmpegVideo(amo.tests.TestCase):
def setUp(self):
self.video = ffmpeg.Video(files['bad'])
if not self.video.library_available():
raise SkipTest
self.video.get_meta()
def test_meta(self):
eq_(self.video.meta['formats'], ['image2'])
assert not self.video.is_valid()
def test_valid(self):
assert not self.video.is_valid()
def test_screenshot(self):
self.assertRaises(AssertionError, self.video.get_screenshot,
amo.ADDON_PREVIEW_SIZES[0])
def test_encoded(self):
self.assertRaises(AssertionError, self.video.get_encoded,
amo.ADDON_PREVIEW_SIZES[0])
class TestTotemVideo(amo.tests.TestCase):
def setUp(self):
self.video = totem.Video(files['good'])
self.video._call_indexer = Mock()
def test_meta(self):
self.video._call_indexer.return_value = totem_indexer_good
self.video.get_meta()
eq_(self.video.meta['formats'], 'VP8')
eq_(self.video.meta['duration'], '10')
def test_valid(self):
self.video._call_indexer = Mock()
self.video._call_indexer.return_value = totem_indexer_good
self.video.get_meta()
assert self.video.is_valid()
def test_not_valid(self):
self.video._call_indexer.return_value = totem_indexer_bad
self.video.get_meta()
assert not self.video.is_valid()
# These tests can be a little bit slow, to say the least so they are
# skipped. Un-skip them if you want.
def test_screenshot(self):
raise SkipTest
self.video.get_meta()
try:
screenshot = self.video.get_screenshot(amo.ADDON_PREVIEW_SIZES[0])
assert os.stat(screenshot)[stat.ST_SIZE]
finally:
os.remove(screenshot)
def test_encoded(self):
raise SkipTest
self.video.get_meta()
try:
video = self.video.get_encoded(amo.ADDON_PREVIEW_SIZES[0])
assert os.stat(video)[stat.ST_SIZE]
finally:
os.remove(video)
@patch('lib.video.totem.Video.library_available')
@patch('lib.video.ffmpeg.Video.library_available')
@patch.object(settings, 'VIDEO_LIBRARIES',
['lib.video.totem', 'lib.video.ffmpeg'])
def test_choose(ffmpeg_, totem_):
ffmpeg_.return_value = True
totem_.return_value = True
eq_(get_library(), totem.Video)
totem_.return_value = False
eq_(get_library(), ffmpeg.Video)
ffmpeg_.return_value = False
eq_(get_library(), None)
class TestTask(amo.tests.TestCase):
# TODO(andym): make these more sparkly and cope with totem and not blow
# up all the time.
def setUp(self):
waffle.models.Switch.objects.create(name='video-encode', active=True)
self.mock = Mock()
self.mock.thumbnail_path = tempfile.mkstemp()[1]
self.mock.image_path = tempfile.mkstemp()[1]
self.mock.pk = 1
@patch('lib.video.tasks._resize_video')
def test_resize_error(self, _resize_video):
user = UserProfile.objects.create(email='a@a.com')
_resize_video.side_effect = ValueError
with self.assertRaises(ValueError):
resize_video(files['good'], self.mock, user=user)
assert self.mock.delete.called
assert UserLog.objects.filter(user=user,
activity_log__action=amo.LOG.VIDEO_ERROR.id).exists()
@patch('lib.video.tasks._resize_video')
def test_resize_failed(self, _resize_video):
user = UserProfile.objects.create(email='a@a.com')
_resize_video.return_value = None
resize_video(files['good'], self.mock, user=user)
assert self.mock.delete.called
@patch('lib.video.ffmpeg.Video.get_encoded')
def test_resize_video_no_encode(self, get_encoded):
raise SkipTest
waffle.models.Switch.objects.update(name='video-encode', active=False)
resize_video(files['good'], self.mock)
assert not get_encoded.called
assert isinstance(self.mock.sizes, dict)
assert self.mock.save.called
def test_resize_video(self):
raise SkipTest
resize_video(files['good'], self.mock)
assert isinstance(self.mock.sizes, dict)
assert self.mock.save.called
def test_resize_image(self):
raise SkipTest
resize_video(files['bad'], self.mock)
assert not isinstance(self.mock.sizes, dict)
assert not self.mock.save.called
| |
# -*- coding: utf-8 -*-
"""
Module for validating HAR data. Uses official HAR JSON Schema.
"""
from __future__ import absolute_import
def validate(instance):
""" Validate HAR data; raise an exception if it is invalid """
validator = get_validator()
validator.check_schema(SCHEMA)
validator.validate(instance)
def get_validator():
""" Return jsonschema validator to validate SCHEMA """
import jsonschema
format_checker = jsonschema.FormatChecker(['date-time'])
return jsonschema.Draft4Validator(SCHEMA, format_checker=format_checker)
SCHEMA = {
"type": "object",
"properties": {
"log": {
"type": "object",
"properties": {
"version": {"type": "string"},
"creator": {"$ref": "#/defs/creatorType"},
"browser": {"$ref": "#/defs/browserType"},
"pages": {"type": "array", "items": {"$ref": "#/defs/pageType"}},
"entries": {"type": "array", "items": {"$ref": "#/defs/entryType"}},
"comment": {"type": "string"}
},
"required": ["version", "creator", "browser", "entries"],
}
},
"required": ["log"],
"defs": {
"creatorType": {
"id": "creatorType",
"description": "Name and version info of the log creator app.",
"type": "object",
"properties": {
"name": {"type": "string"},
"version": {"type": "string"},
"comment": {"type": "string"},
},
"required": ["name", "version"]
},
"browserType": {
"id": "browserType",
"description": "Name and version info of used browser.",
"type": "object",
"properties": {
"name": {"type": "string"},
"version": {"type": "string"},
"comment": {"type": "string"},
},
"required": ["name", "version"]
},
"pageType": {
"description": "Exported web page",
"type": "object",
"properties": {
"startedDateTime": {"type": "string", "format": "date-time"},
"id": {"type": "string", "unique": True},
"title": {"type": "string"},
"pageTimings": {"$ref": "#/defs/pageTimingsType"},
"comment": {"type": "string"},
},
"required": ["startedDateTime", "id", "title", "pageTimings"]
},
"entryType": {
"type": "object",
"properties": {
"pageref": {"type": "string"},
"startedDateTime": {"type": "string", "format": "date-time"},
"time": {"type": "number", "minimum": 0},
"request" : {"$ref": "#/defs/requestType"},
"response" : {"$ref": "#/defs/responseType"},
"cache" : {"$ref": "#/defs/cacheType"},
"timings" : {"$ref": "#/defs/timingsType"},
"serverIPAddress" : {"type": "string"},
"connection" : {"type": "string"},
"comment": {"type": "string"},
},
"required": ["startedDateTime", "time", "request", "response", "cache", "timings"]
},
"pageTimingsType": {
"type": "object",
"properties": {
"onContentLoad": {"type": "number", "minimum": -1},
"onLoad": {"type": "number", "minimum": -1},
"comment": {"type": "string"},
},
},
"requestType": {
"type": "object",
"properties": {
"method": {"type": "string"},
"url": {"type": "string"},
"httpVersion": {"type" : "string"},
"cookies" : {"type": "array", "items": {"$ref": "#/defs/cookieType"}},
"headers" : {"type": "array", "items": {"$ref": "#/defs/recordType"}},
"queryString" : {"type": "array", "items": {"$ref": "#/defs/recordType"}},
"postData" : {"$ref": "#/defs/postDataType"},
"headersSize" : {"type": "integer"},
"bodySize" : {"type": "integer"},
"comment": {"type": "string"},
},
"required": ["method", "url", "httpVersion", "cookies", "headers", "queryString", "headersSize", "bodySize"]
},
"responseType": {
"type": "object",
"properties": {
"status": {"type": "integer"},
"statusText": {"type": "string"},
"httpVersion": {"type": "string"},
"cookies" : {"type": "array", "items": {"$ref": "#/defs/cookieType"}},
"headers" : {"type": "array", "items": {"$ref": "#/defs/recordType"}},
"content" : {"$ref": "#/defs/contentType"},
"redirectURL" : {"type": "string"},
"headersSize" : {"type": "integer"},
"bodySize" : {"type": "integer"},
"comment": {"type": "string"}
},
"required": ["status", "statusText", "httpVersion",
"cookies", "headers", "content", "redirectURL",
"headersSize", "bodySize"]
},
"cacheType": {
"type": "object",
"properties": {
"beforeRequest": {"$ref": "#/defs/cacheEntryType"},
"afterRequest": {"$ref": "#/defs/cacheEntryType"},
"comment": {"type": "string"}
}
},
"timingsType": {
"type": "object",
"properties": {
"dns": {"type": "number", "minimum": -1},
"connect": {"type": "number", "minimum": -1},
"blocked": {"type": "number", "minimum": -1},
"send": {"type": "number", "minimum": -1},
"wait": {"type": "number", "minimum": -1},
"receive": {"type": "number", "minimum": -1},
"ssl": {"type": "number", "minimum": -1},
"comment": {"type": "string"}
},
"required": ["send", "wait", "receive"]
},
"cookieType": {
"type": "object",
"properties": {
"name": {"type": "string"},
"value": {"type": "string"},
"path": {"type": "string"},
"domain" : {"type": "string"},
"expires" : {"type": "string"},
"httpOnly" : {"type": "boolean"},
"secure" : {"type": "boolean"},
"comment": {"type": "string"},
},
"required": ["name", "value"]
},
"recordType": {
"type": "object",
"properties": {
"name": {"type": "string"},
"value": {"type": "string"},
"comment": {"type": "string"},
},
"required": ["name", "value"],
},
"postDataType": {
"type": "object",
"properties": {
"mimeType": {"type": "string"},
"text": {"type": "string"},
"params": {
"type": "array",
"properties": {
"name": {"type": "string"},
"value": {"type": "string"},
"fileName": {"type": "string"},
"contentType": {"type": "string"},
"comment": {"type": "string"},
},
"required": ["name"]
},
"comment": {"type": "string"}
},
"required": ["mimeType"]
},
"contentType": {
"type": "object",
"properties": {
"size": {"type": "integer"},
"compression": {"type": "integer"},
"mimeType": {"type": "string"},
"text": {"type": "string"},
"encoding": {"type": "string"},
"comment": {"type": "string"}
},
"required": ["size", "mimeType"]
},
"cacheEntryType": {
"type": "object",
"properties": {
"expires": {"type": "string"},
"lastAccess": {"type": "string"},
"eTag": {"type": "string"},
"hitCount": {"type": "integer"},
"comment": {"type": "string"}
},
"required": ["lastAccess", "eTag", "hitCount"]
}
},
}
| |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from errno import ENOENT
from glob import glob
from itertools import chain
from logging import getLogger
import os
from os.path import abspath, basename, dirname, expanduser, expandvars, isdir, join
import re
import sys
from textwrap import dedent
# Since we have to have configuration context here, anything imported by
# conda.base.context is fair game, but nothing more.
from . import CONDA_PACKAGE_ROOT, CondaError
from ._vendor.toolz import concatv, drop
from ._vendor.auxlib.compat import Utf8NamedTemporaryFile
from .base.context import ROOT_ENV_NAME, context, locate_prefix_by_name
from .common.compat import FILESYSTEM_ENCODING, PY2, iteritems, on_win, string_types, text_type
from .common.path import paths_equal
log = getLogger(__name__)
class _Activator(object):
# Activate and deactivate have three tasks
# 1. Set and unset environment variables
# 2. Execute/source activate.d/deactivate.d scripts
# 3. Update the command prompt
#
# Shells should also use 'reactivate' following conda's install, update, and
# remove/uninstall commands.
#
# All core logic is in build_activate() or build_deactivate(), and is independent of
# shell type. Each returns a map containing the keys:
# export_vars
# unset_var
# activate_scripts
# deactivate_scripts
#
# The value of the CONDA_PROMPT_MODIFIER environment variable holds conda's contribution
# to the command prompt.
#
# To implement support for a new shell, ideally one would only need to add shell-specific
# information to the __init__ method of this class.
# The following instance variables must be defined by each implementation.
pathsep_join = None
sep = None
path_conversion = None
script_extension = None
tempfile_extension = None # None means write instructions to stdout rather than a temp file
command_join = None
unset_var_tmpl = None
export_var_tmpl = None
set_var_tmpl = None
run_script_tmpl = None
hook_source_path = None
def __init__(self, arguments=None):
self._raw_arguments = arguments
if PY2:
self.environ = {ensure_fs_path_encoding(k): ensure_fs_path_encoding(v)
for k, v in iteritems(os.environ)}
else:
self.environ = os.environ.copy()
# Once Python2 dies odargs can become kwargs again since dicts are ordered since 3.6.
def get_export_unset_vars(self, odargs):
"""
:param kwargs: environment variables to export. The `conda_exe_vars` meta
variables are also exported by default. If you do not want
this to happen then pass:
conda_exe_vars=None
.. if you pass and set any other variable to None, then it
emits it to the dict with a value of None.
:return: A OrderedDict of env vars to export ordered the same way as kwargs.
And a list of env vars to unset.
"""
kwargs = odargs
conda_exe_vars_None = False if ('conda_exe_vars' not in kwargs or
kwargs['conda_exe_vars'] is not None) else True
conda_exe_unset_vars = []
unset_vars = []
# conda_exe_vars = context.conda_exe_vars_dict.copy()
from collections import OrderedDict
conda_exe_vars_export = OrderedDict()
for k, v in context.conda_exe_vars_dict.items():
if v is None or conda_exe_vars_None:
conda_exe_unset_vars.append(k)
else:
conda_exe_vars_export[k] = self.path_conversion(v) if v else v
from collections import OrderedDict
export_vars = OrderedDict()
for k, v in kwargs.items():
if k == 'conda_exe_vars':
continue
elif v is None:
unset_vars.append(k.upper())
else:
export_vars[k.upper()] = v
# Just to make writing tests a bit more ergonomic we add these to the end.
unset_vars += conda_exe_unset_vars
export_vars = OrderedDict(chain(export_vars.items(), conda_exe_vars_export.items()))
return export_vars, unset_vars
# Used in tests only.
def add_export_unset_vars(self, export_vars, unset_vars, **kwargs):
new_export_vars, new_unset_vars = self.get_export_unset_vars(odargs=OrderedDict(kwargs))
if export_vars is not None:
export_vars = OrderedDict(chain(export_vars.items(), new_export_vars.items()))
if unset_vars is not None:
unset_vars += new_unset_vars
return export_vars, unset_vars
# Used in tests only.
def get_scripts_export_unset_vars(self, **kwargs):
export_vars, unset_vars = self.get_export_unset_vars(odargs=OrderedDict(kwargs))
script_export_vars = script_unset_vars = None
if export_vars:
script_export_vars = self.command_join.join(
[self.export_var_tmpl % (k, v) for k, v in export_vars.items()])
if unset_vars:
script_unset_vars = self.command_join.join(
[self.unset_var_tmpl % (k) for k in unset_vars])
return script_export_vars or '', script_unset_vars or ''
def _finalize(self, commands, ext):
commands = concatv(commands, ('',)) # add terminating newline
if ext is None:
return self.command_join.join(commands)
elif ext:
with Utf8NamedTemporaryFile('w+', suffix=ext, delete=False) as tf:
# the default mode is 'w+b', and universal new lines don't work in that mode
# command_join should account for that
tf.write(self.command_join.join(commands))
return tf.name
else:
raise NotImplementedError()
def activate(self):
if self.stack:
builder_result = self.build_stack(self.env_name_or_prefix)
else:
builder_result = self.build_activate(self.env_name_or_prefix)
return self._finalize(self._yield_commands(builder_result), self.tempfile_extension)
def deactivate(self):
return self._finalize(self._yield_commands(self.build_deactivate()),
self.tempfile_extension)
def reactivate(self):
return self._finalize(self._yield_commands(self.build_reactivate()),
self.tempfile_extension)
def hook(self, auto_activate_base=None):
builder = []
builder.append(self._hook_preamble())
with open(self.hook_source_path) as fsrc:
builder.append(fsrc.read())
if auto_activate_base is None and context.auto_activate_base or auto_activate_base:
builder.append("conda activate base\n")
postamble = self._hook_postamble()
if postamble is not None:
builder.append(postamble)
return "\n".join(builder)
def execute(self):
# return value meant to be written to stdout
self._parse_and_set_args(self._raw_arguments)
return getattr(self, self.command)()
def commands(self):
"""
Returns a list of possible subcommands that are valid
immediately following `conda` at the command line.
This method is generally only used by tab-completion.
"""
# Import locally to reduce impact on initialization time.
from .cli.find_commands import find_commands
from .cli.conda_argparse import generate_parser, find_builtin_commands
# return value meant to be written to stdout
# Hidden commands to provide metadata to shells.
return "\n".join(sorted(
find_builtin_commands(generate_parser()) +
tuple(find_commands(True))
))
def _hook_preamble(self):
# must be implemented in subclass
raise NotImplementedError()
def _hook_postamble(self):
return None
def _parse_and_set_args(self, arguments):
# the first index of arguments MUST be either activate, deactivate, or reactivate
if arguments is None:
from .exceptions import ArgumentError
raise ArgumentError("'activate', 'deactivate', or 'reactivate' command must be given")
command = arguments[0]
arguments = tuple(drop(1, arguments))
help_flags = ('-h', '--help', '/?')
non_help_args = tuple(arg for arg in arguments if arg not in help_flags)
help_requested = len(arguments) != len(non_help_args)
remainder_args = list(arg for arg in non_help_args if arg and arg != command)
if not command:
from .exceptions import ArgumentError
raise ArgumentError("'activate', 'deactivate', 'hook', "
"'commands', or 'reactivate' "
"command must be given")
elif help_requested:
from .exceptions import ActivateHelp, DeactivateHelp, GenericHelp
help_classes = {
'activate': ActivateHelp(),
'deactivate': DeactivateHelp(),
'hook': GenericHelp('hook'),
'commands': GenericHelp('commands'),
'reactivate': GenericHelp('reactivate'),
}
raise help_classes[command]
elif command not in ('activate', 'deactivate', 'reactivate', 'hook', 'commands'):
from .exceptions import ArgumentError
raise ArgumentError("invalid command '%s'" % command)
if command.endswith('activate') or command == 'hook':
try:
dev_idx = remainder_args.index('--dev')
except ValueError:
context.dev = False
else:
del remainder_args[dev_idx]
context.dev = True
if command == 'activate':
try:
stack_idx = remainder_args.index('--stack')
except ValueError:
self.stack = False
else:
del remainder_args[stack_idx]
self.stack = True
if len(remainder_args) > 1:
from .exceptions import ArgumentError
raise ArgumentError(command + ' does not accept more than one argument:\n'
+ str(remainder_args) + '\n')
self.env_name_or_prefix = remainder_args and remainder_args[0] or 'base'
else:
if remainder_args:
from .exceptions import ArgumentError
raise ArgumentError('%s does not accept arguments\nremainder_args: %s\n'
% (command, remainder_args))
self.command = command
def _yield_commands(self, cmds_dict):
for key, value in sorted(iteritems(cmds_dict.get('export_path', {}))):
yield self.export_var_tmpl % (key, value)
for script in cmds_dict.get('deactivate_scripts', ()):
yield self.run_script_tmpl % script
for key in cmds_dict.get('unset_vars', ()):
yield self.unset_var_tmpl % key
for key, value in iteritems(cmds_dict.get('set_vars', {})):
yield self.set_var_tmpl % (key, value)
for key, value in iteritems(cmds_dict.get('export_vars', {})):
yield self.export_var_tmpl % (key, value)
for script in cmds_dict.get('activate_scripts', ()):
yield self.run_script_tmpl % script
def build_activate(self, env_name_or_prefix):
return self._build_activate_stack(env_name_or_prefix, False)
def build_stack(self, env_name_or_prefix):
return self._build_activate_stack(env_name_or_prefix, True)
def _build_activate_stack(self, env_name_or_prefix, stack):
if re.search(r'\\|/', env_name_or_prefix):
prefix = expand(env_name_or_prefix)
if not isdir(join(prefix, 'conda-meta')):
from .exceptions import EnvironmentLocationNotFound
raise EnvironmentLocationNotFound(prefix)
elif env_name_or_prefix in (ROOT_ENV_NAME, 'root'):
prefix = context.root_prefix
else:
prefix = locate_prefix_by_name(env_name_or_prefix)
# query environment
old_conda_shlvl = int(self.environ.get('CONDA_SHLVL', '').strip() or 0)
new_conda_shlvl = old_conda_shlvl + 1
old_conda_prefix = self.environ.get('CONDA_PREFIX')
if old_conda_prefix == prefix and old_conda_shlvl > 0:
return self.build_reactivate()
activate_scripts = self._get_activate_scripts(prefix)
conda_default_env = self._default_env(prefix)
conda_prompt_modifier = self._prompt_modifier(prefix, conda_default_env)
unset_vars = []
if old_conda_shlvl == 0:
new_path = self.pathsep_join(self._add_prefix_to_path(prefix))
export_vars, unset_vars = self.get_export_unset_vars(
odargs=OrderedDict((
('path', new_path),
('conda_prefix', prefix),
('conda_shlvl', new_conda_shlvl),
('conda_default_env', conda_default_env),
('conda_prompt_modifier', conda_prompt_modifier))))
deactivate_scripts = ()
else:
if self.environ.get('CONDA_PREFIX_%s' % (old_conda_shlvl - 1)) == prefix:
# in this case, user is attempting to activate the previous environment,
# i.e. step back down
return self.build_deactivate()
if stack:
new_path = self.pathsep_join(self._add_prefix_to_path(prefix))
deactivate_scripts = ()
export_vars, unset_vars = self.get_export_unset_vars(odargs=OrderedDict((
('path', new_path),
('conda_prefix', prefix),
('conda_shlvl', new_conda_shlvl),
('conda_default_env', conda_default_env),
('conda_prompt_modifier', conda_prompt_modifier))))
export_vars['CONDA_PREFIX_%d' % old_conda_shlvl] = old_conda_prefix
export_vars['CONDA_STACKED_%d' % new_conda_shlvl] = 'true'
else:
new_path = self.pathsep_join(
self._replace_prefix_in_path(old_conda_prefix, prefix))
deactivate_scripts = self._get_deactivate_scripts(old_conda_prefix)
export_vars, unset_vars = self.get_export_unset_vars(odargs=OrderedDict((
('path', new_path),
('conda_prefix', prefix),
('conda_shlvl', new_conda_shlvl),
('conda_default_env', conda_default_env),
('conda_prompt_modifier', conda_prompt_modifier))))
export_vars['CONDA_PREFIX_%d' % old_conda_shlvl] = old_conda_prefix
set_vars = {}
if context.changeps1:
self._update_prompt(set_vars, conda_prompt_modifier)
self._build_activate_shell_custom(export_vars)
return {
'unset_vars': unset_vars,
'set_vars': set_vars,
'export_vars': export_vars,
'deactivate_scripts': deactivate_scripts,
'activate_scripts': activate_scripts,
}
def build_deactivate(self):
self._deactivate = True
# query environment
old_conda_prefix = self.environ.get('CONDA_PREFIX')
old_conda_shlvl = int(self.environ.get('CONDA_SHLVL', '').strip() or 0)
if not old_conda_prefix or old_conda_shlvl < 1:
# no active environment, so cannot deactivate; do nothing
return {
'unset_vars': (),
'set_vars': OrderedDict(),
'export_vars': OrderedDict(),
'deactivate_scripts': (),
'activate_scripts': (),
}
deactivate_scripts = self._get_deactivate_scripts(old_conda_prefix)
new_conda_shlvl = old_conda_shlvl - 1
set_vars = {}
if old_conda_shlvl == 1:
new_path = self.pathsep_join(self._remove_prefix_from_path(old_conda_prefix))
# You might think that you can remove the CONDA_EXE vars by passing conda_exe_vars=None
# here so that "deactivate means deactivate" but you cannot since the conda shell
# scripts still refer to them and they only set them once at the top. We could change
# that though, the conda() shell function could set them instead of doing it at the
# top. This would be *much* cleaner. I personally cannot abide that I have
# deactivated conda and anything at all in my env still references it (apart from the
# shell script, we need something I suppose!)
export_vars, unset_vars = self.get_export_unset_vars(odargs=OrderedDict((
('conda_prefix', None),
('conda_shlvl', new_conda_shlvl),
('conda_default_env', None),
('conda_prompt_modifier', None))))
conda_prompt_modifier = ''
activate_scripts = ()
export_path = {'PATH': new_path, }
else:
assert old_conda_shlvl > 1
new_prefix = self.environ.get('CONDA_PREFIX_%d' % new_conda_shlvl)
conda_default_env = self._default_env(new_prefix)
conda_prompt_modifier = self._prompt_modifier(new_prefix, conda_default_env)
old_prefix_stacked = 'CONDA_STACKED_%d' % old_conda_shlvl in self.environ
new_path = ''
unset_vars = ['CONDA_PREFIX_%d' % new_conda_shlvl]
if old_prefix_stacked:
new_path = self.pathsep_join(self._remove_prefix_from_path(old_conda_prefix))
unset_vars.append('CONDA_STACKED_%d' % old_conda_shlvl)
else:
new_path = self.pathsep_join(
self._replace_prefix_in_path(old_conda_prefix, new_prefix)
)
export_vars, unset_vars2 = self.get_export_unset_vars(odargs=OrderedDict((
('conda_prefix', new_prefix),
('conda_shlvl', new_conda_shlvl),
('conda_default_env', conda_default_env),
('conda_prompt_modifier', conda_prompt_modifier))))
unset_vars += unset_vars2
export_path = {'PATH': new_path, }
activate_scripts = self._get_activate_scripts(new_prefix)
if context.changeps1:
self._update_prompt(set_vars, conda_prompt_modifier)
return {
'unset_vars': unset_vars,
'set_vars': set_vars,
'export_vars': export_vars,
'export_path': export_path,
'deactivate_scripts': deactivate_scripts,
'activate_scripts': activate_scripts,
}
def build_reactivate(self):
self._reactivate = True
conda_prefix = self.environ.get('CONDA_PREFIX')
conda_shlvl = int(self.environ.get('CONDA_SHLVL', '').strip() or 0)
if not conda_prefix or conda_shlvl < 1:
# no active environment, so cannot reactivate; do nothing
return {
'unset_vars': (),
'set_vars': OrderedDict(),
'export_vars': OrderedDict(),
'deactivate_scripts': (),
'activate_scripts': (),
}
conda_default_env = self.environ.get('CONDA_DEFAULT_ENV', self._default_env(conda_prefix))
new_path = self.pathsep_join(self._replace_prefix_in_path(conda_prefix, conda_prefix))
set_vars = {}
conda_prompt_modifier = self._prompt_modifier(conda_prefix, conda_default_env)
if context.changeps1:
self._update_prompt(set_vars, conda_prompt_modifier)
# environment variables are set only to aid transition from conda 4.3 to conda 4.4
return {
'unset_vars': (),
'set_vars': set_vars,
'export_vars': OrderedDict([('PATH', new_path),
('CONDA_SHLVL', conda_shlvl),
('CONDA_PROMPT_MODIFIER', self._prompt_modifier(
conda_prefix, conda_default_env))]),
'deactivate_scripts': self._get_deactivate_scripts(conda_prefix),
'activate_scripts': self._get_activate_scripts(conda_prefix),
}
def _get_starting_path_list(self):
# For isolation, running the conda test suite *without* env. var. inheritance
# every so often is a good idea. We should probably make this a pytest fixture
# along with one that tests both hardlink-only and copy-only, but before that
# conda's testsuite needs to be a lot faster!
clean_paths = {'darwin': '/usr/bin:/bin:/usr/sbin:/sbin',
# You may think 'let us do something more clever here and interpolate
# `%windir%`' but the point here is the the whole env. is cleaned out
'win32': 'C:\\Windows\\system32;'
'C:\\Windows;'
'C:\\Windows\\System32\\Wbem;'
'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\'
}
path = self.environ.get('PATH',
clean_paths[sys.platform] if sys.platform in clean_paths else
'/usr/bin')
path_split = path.split(os.pathsep)
# We used to prepend sys.prefix\Library\bin to PATH on startup but not anymore.
# Instead, in conda 4.6 we add the full suite of entries. This is performed in
# condabin\conda.bat and condabin\ _conda_activate.bat. However, we
# need to ignore the stuff we add there, and only consider actual PATH entries.
prefix_dirs = tuple(self._get_path_dirs(sys.prefix))
start_index = 0
while (start_index < len(prefix_dirs) and
start_index < len(path_split) and
paths_equal(path_split[start_index], prefix_dirs[start_index])):
start_index += 1
path_split = path_split[start_index:]
library_bin_dir = self.path_conversion(
self.sep.join((sys.prefix, 'Library', 'bin')))
if paths_equal(path_split[0], library_bin_dir):
path_split = path_split[1:]
return path_split
def _get_path_dirs(self, prefix, extra_library_bin=False):
if on_win: # pragma: unix no cover
yield prefix.rstrip("\\")
yield self.sep.join((prefix, 'Library', 'mingw-w64', 'bin'))
yield self.sep.join((prefix, 'Library', 'usr', 'bin'))
yield self.sep.join((prefix, 'Library', 'bin'))
yield self.sep.join((prefix, 'Scripts'))
yield self.sep.join((prefix, 'bin'))
else:
yield self.sep.join((prefix, 'bin'))
def _add_prefix_to_path(self, prefix, starting_path_dirs=None):
prefix = self.path_conversion(prefix)
if starting_path_dirs is None:
path_list = list(self.path_conversion(self._get_starting_path_list()))
else:
path_list = list(self.path_conversion(starting_path_dirs))
# If this is the first time we're activating an environment, we need to ensure that
# the condabin directory is included in the path list.
# Under normal conditions, if the shell hook is working correctly, this should
# never trigger.
old_conda_shlvl = int(self.environ.get('CONDA_SHLVL', '').strip() or 0)
if not old_conda_shlvl and not any(p.endswith("condabin") for p in path_list):
condabin_dir = self.path_conversion(join(context.conda_prefix, "condabin"))
path_list.insert(0, condabin_dir)
path_list[0:0] = list(self.path_conversion(self._get_path_dirs(prefix)))
return tuple(path_list)
def _remove_prefix_from_path(self, prefix, starting_path_dirs=None):
return self._replace_prefix_in_path(prefix, None, starting_path_dirs)
def _replace_prefix_in_path(self, old_prefix, new_prefix, starting_path_dirs=None):
old_prefix = self.path_conversion(old_prefix)
new_prefix = self.path_conversion(new_prefix)
if starting_path_dirs is None:
path_list = list(self.path_conversion(self._get_starting_path_list()))
else:
path_list = list(self.path_conversion(starting_path_dirs))
def index_of_path(paths, test_path):
for q, path in enumerate(paths):
if paths_equal(path, test_path):
return q
return None
if old_prefix is not None:
prefix_dirs = tuple(self._get_path_dirs(old_prefix))
first_idx = index_of_path(path_list, prefix_dirs[0])
if first_idx is None:
first_idx = 0
else:
prefix_dirs_idx = len(prefix_dirs) - 1
last_idx = None
while last_idx is None and prefix_dirs_idx > -1:
last_idx = index_of_path(path_list, prefix_dirs[prefix_dirs_idx])
if last_idx is None:
log.info("Did not find path entry {}".format(prefix_dirs[prefix_dirs_idx]))
prefix_dirs_idx = prefix_dirs_idx - 1
# this compensates for an extra Library/bin dir entry from the interpreter on
# windows. If that entry isn't being added, it should have no effect.
library_bin_dir = self.path_conversion(
self.sep.join((sys.prefix, 'Library', 'bin')))
if path_list[last_idx + 1] == library_bin_dir:
last_idx += 1
del path_list[first_idx:last_idx + 1]
else:
first_idx = 0
if new_prefix is not None:
path_list[first_idx:first_idx] = list(self._get_path_dirs(new_prefix))
return tuple(path_list)
def _build_activate_shell_custom(self, export_vars):
# A method that can be overriden by shell-specific implementations.
# The signature of this method may change in the future.
pass
def _update_prompt(self, set_vars, conda_prompt_modifier):
pass
def _default_env(self, prefix):
if paths_equal(prefix, context.root_prefix):
return 'base'
return basename(prefix) if basename(dirname(prefix)) == 'envs' else prefix
def _prompt_modifier(self, prefix, conda_default_env):
if context.changeps1:
# Get current environment and prompt stack
env_stack = []
prompt_stack = []
old_shlvl = int(self.environ.get('CONDA_SHLVL', '0').rstrip())
for i in range(1, old_shlvl+1):
if i == old_shlvl:
env_i = self._default_env(self.environ.get('CONDA_PREFIX', ''))
else:
env_i = self._default_env(
self.environ.get('CONDA_PREFIX_{}'.format(i), '').rstrip())
stacked_i = bool(self.environ.get('CONDA_STACKED_{}'.format(i), '').rstrip())
env_stack.append(env_i)
if not stacked_i:
prompt_stack = prompt_stack[0:-1]
prompt_stack.append(env_i)
# Modify prompt stack according to pending operation
deactivate = getattr(self, '_deactivate', False)
reactivate = getattr(self, '_reactivate', False)
if deactivate:
prompt_stack = prompt_stack[0:-1]
env_stack = env_stack[0:-1]
stacked = bool(self.environ.get('CONDA_STACKED_{}'.format(old_shlvl), '').rstrip())
if not stacked and env_stack:
prompt_stack.append(env_stack[-1])
elif reactivate:
pass
else:
stack = getattr(self, 'stack', False)
if not stack:
prompt_stack = prompt_stack[0:-1]
prompt_stack.append(conda_default_env)
conda_stacked_env = ','.join(prompt_stack[::-1])
return context.env_prompt.format(
default_env=conda_default_env,
stacked_env=conda_stacked_env,
prefix=prefix,
name=basename(prefix),
)
else:
return ""
def _get_activate_scripts(self, prefix):
return self.path_conversion(sorted(glob(join(
prefix, 'etc', 'conda', 'activate.d', '*' + self.script_extension
))))
def _get_deactivate_scripts(self, prefix):
return self.path_conversion(sorted(glob(join(
prefix, 'etc', 'conda', 'deactivate.d', '*' + self.script_extension
)), reverse=True))
def expand(path):
return abspath(expanduser(expandvars(path)))
def ensure_binary(value):
try:
return value.encode('utf-8')
except AttributeError: # pragma: no cover
# AttributeError: '<>' object has no attribute 'encode'
# In this case assume already binary type and do nothing
return value
def ensure_fs_path_encoding(value):
try:
return value.decode(FILESYSTEM_ENCODING)
except AttributeError:
return value
def native_path_to_unix(paths): # pragma: unix no cover
# on windows, uses cygpath to convert windows native paths to posix paths
if not on_win:
return path_identity(paths)
if paths is None:
return None
from subprocess import CalledProcessError, PIPE, Popen
from conda._vendor.auxlib.compat import shlex_split_unicode
# It is very easy to end up with a bash in one place and a cygpath in another due to e.g.
# using upstream MSYS2 bash, but with a conda env that does not have bash but does have
# cygpath. When this happens, we have two different virtual POSIX machines, rooted at
# different points in the Windows filesystem. We do our path conversions with one and
# expect the results to work with the other. It does not.
from .common.path import which
bash = which('bash')
command = os.path.join(dirname(bash), 'cygpath') if bash else 'cygpath'
command += ' --path -f -'
single_path = isinstance(paths, string_types)
joined = paths if single_path else ("%s" % os.pathsep).join(paths)
if hasattr(joined, 'encode'):
joined = joined.encode('utf-8')
try:
p = Popen(shlex_split_unicode(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
except EnvironmentError as e:
if e.errno != ENOENT:
raise
# This code path should (hopefully) never be hit be real conda installs. It's here
# as a backup for tests run under cmd.exe with cygpath not available.
def _translation(found_path): # NOQA
found = found_path.group(1).replace("\\", "/").replace(":", "").replace("//", "/")
return "/" + found.rstrip("/")
joined = ensure_fs_path_encoding(joined)
stdout = re.sub(
r'([a-zA-Z]:[\/\\\\]+(?:[^:*?\"<>|;]+[\/\\\\]*)*)',
_translation,
joined
).replace(";/", ":/").rstrip(";")
else:
stdout, stderr = p.communicate(input=joined)
rc = p.returncode
if rc != 0 or stderr:
message = "\n stdout: %s\n stderr: %s\n rc: %s\n" % (stdout, stderr, rc)
print(message, file=sys.stderr)
raise CalledProcessError(rc, command, message)
if hasattr(stdout, 'decode'):
stdout = stdout.decode('utf-8')
stdout = stdout.strip()
final = stdout and stdout.split(':') or ()
return final[0] if single_path else tuple(final)
def path_identity(paths):
if isinstance(paths, string_types):
return os.path.normpath(paths)
elif paths is None:
return None
else:
return tuple(os.path.normpath(_) for _ in paths)
class PosixActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.sh'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.command_join = '\n'
self.unset_var_tmpl = 'unset %s'
self.export_var_tmpl = "export %s='%s'"
self.set_var_tmpl = "%s='%s'"
self.run_script_tmpl = '. "%s"'
self.hook_source_path = join(CONDA_PACKAGE_ROOT, 'shell', 'etc', 'profile.d', 'conda.sh')
super(PosixActivator, self).__init__(arguments)
def _update_prompt(self, set_vars, conda_prompt_modifier):
ps1 = self.environ.get('PS1', '')
if 'POWERLINE_COMMAND' in ps1:
# Defer to powerline (https://github.com/powerline/powerline) if it's in use.
return
current_prompt_modifier = self.environ.get('CONDA_PROMPT_MODIFIER')
if current_prompt_modifier:
ps1 = re.sub(re.escape(current_prompt_modifier), r'', ps1)
# Because we're using single-quotes to set shell variables, we need to handle the
# proper escaping of single quotes that are already part of the string.
# Best solution appears to be https://stackoverflow.com/a/1250279
ps1 = ps1.replace("'", "'\"'\"'")
set_vars.update({
'PS1': conda_prompt_modifier + ps1,
})
def _hook_preamble(self):
result = ''
for key, value in context.conda_exe_vars_dict.items():
if value is None:
# Using `unset_var_tmpl` would cause issues for people running
# with shell flag -u set (error on unset).
# result += join(self.unset_var_tmpl % key) + '\n'
result += join(self.export_var_tmpl % (key, '')) + '\n'
else:
if key in ('PYTHONPATH', 'CONDA_EXE'):
result += join(self.export_var_tmpl % (
key, self.path_conversion(value))) + '\n'
else:
result += join(self.export_var_tmpl % (key, value)) + '\n'
return result
class CshActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.csh'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.command_join = ';\n'
self.unset_var_tmpl = 'unsetenv %s'
self.export_var_tmpl = 'setenv %s "%s"'
self.set_var_tmpl = "set %s='%s'"
self.run_script_tmpl = 'source "%s"'
self.hook_source_path = join(CONDA_PACKAGE_ROOT, 'shell', 'etc', 'profile.d', 'conda.csh')
super(CshActivator, self).__init__(arguments)
def _update_prompt(self, set_vars, conda_prompt_modifier):
prompt = self.environ.get('prompt', '')
current_prompt_modifier = self.environ.get('CONDA_PROMPT_MODIFIER')
if current_prompt_modifier:
prompt = re.sub(re.escape(current_prompt_modifier), r'', prompt)
set_vars.update({
'prompt': conda_prompt_modifier + prompt,
})
def _hook_preamble(self):
if on_win:
return ('setenv CONDA_EXE `cygpath %s`\n'
'setenv _CONDA_ROOT `cygpath %s`\n'
'setenv _CONDA_EXE `cygpath %s`\n'
'setenv CONDA_PYTHON_EXE `cygpath %s`'
% (context.conda_exe, context.conda_prefix, context.conda_exe, sys.executable))
else:
return ('setenv CONDA_EXE "%s"\n'
'setenv _CONDA_ROOT "%s"\n'
'setenv _CONDA_EXE "%s"\n'
'setenv CONDA_PYTHON_EXE "%s"'
% (context.conda_exe, context.conda_prefix, context.conda_exe,
sys.executable))
class XonshActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.xsh'
self.tempfile_extension = None
self.command_join = '\n'
self.unset_var_tmpl = 'del $%s'
self.export_var_tmpl = "$%s = '%s'"
self.set_var_tmpl = "$%s = '%s'" # TODO: determine if different than export_var_tmpl
self.run_script_tmpl = 'source "%s"'
self.hook_source_path = join(CONDA_PACKAGE_ROOT, 'shell', 'conda.xsh')
super(XonshActivator, self).__init__(arguments)
def _hook_preamble(self):
return '$CONDA_EXE = "%s"' % context.conda_exe
class CmdExeActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = ';'.join
self.sep = '\\'
self.path_conversion = path_identity
self.script_extension = '.bat'
self.tempfile_extension = '.bat'
self.command_join = '\n'
self.unset_var_tmpl = '@SET %s='
self.export_var_tmpl = '@SET "%s=%s"'
self.set_var_tmpl = '@SET "%s=%s"' # TODO: determine if different than export_var_tmpl
self.run_script_tmpl = '@CALL "%s"'
self.hook_source_path = None
# TODO: cmd.exe doesn't get a hook function? Or do we need to do something different?
# Like, for cmd.exe only, put a special directory containing only conda.bat on PATH?
super(CmdExeActivator, self).__init__(arguments)
# def _hook_preamble(self):
# if on_win:
# return '@chcp 65001'
class FishActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = '" "'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.fish'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.command_join = ';\n'
self.unset_var_tmpl = 'set -e %s'
self.export_var_tmpl = 'set -gx %s "%s"'
self.set_var_tmpl = 'set -g %s "%s"'
self.run_script_tmpl = 'source "%s"'
self.hook_source_path = join(CONDA_PACKAGE_ROOT, 'shell', 'etc', 'fish', 'conf.d',
'conda.fish')
super(FishActivator, self).__init__(arguments)
def _hook_preamble(self):
if on_win:
return ('set -gx CONDA_EXE (cygpath "%s")\n'
'set _CONDA_ROOT (cygpath "%s")\n'
'set _CONDA_EXE (cygpath "%s")\n'
'set -gx CONDA_PYTHON_EXE (cygpath "%s")'
% (context.conda_exe, context.conda_prefix, context.conda_exe, sys.executable))
else:
return ('set -gx CONDA_EXE "%s"\n'
'set _CONDA_ROOT "%s"\n'
'set _CONDA_EXE "%s"\n'
'set -gx CONDA_PYTHON_EXE "%s"'
% (context.conda_exe, context.conda_prefix, context.conda_exe, sys.executable))
class PowerShellActivator(_Activator):
def __init__(self, arguments=None):
self.pathsep_join = ';'.join if on_win else ':'.join
self.sep = '\\' if on_win else '/'
self.path_conversion = path_identity
self.script_extension = '.ps1'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.command_join = '\n'
self.unset_var_tmpl = 'Remove-Item Env:/%s'
self.export_var_tmpl = '$Env:%s = "%s"'
self.set_var_tmpl = '$Env:%s = "%s"'
self.run_script_tmpl = '. "%s"'
self.hook_source_path = join(CONDA_PACKAGE_ROOT, 'shell', 'condabin', 'conda-hook.ps1')
super(PowerShellActivator, self).__init__(arguments)
def _hook_preamble(self):
if context.dev:
return dedent("""\
$Env:PYTHONPATH = "{python_path}"
$Env:CONDA_EXE = "{sys_exe}"
$Env:_CE_M = "-m"
$Env:_CE_CONDA = "conda"
$Env:_CONDA_ROOT = "{python_path}{s}conda"
$Env:_CONDA_EXE = "{context.conda_exe}"
""".format(s=os.sep,
python_path=dirname(CONDA_PACKAGE_ROOT),
sys_exe=sys.executable, context=context))
else:
return dedent("""\
$Env:CONDA_EXE = "{context.conda_exe}"
$Env:_CE_M = ""
$Env:_CE_CONDA = ""
$Env:_CONDA_ROOT = "{context.conda_prefix}"
$Env:_CONDA_EXE = "{context.conda_exe}"
""".format(context=context))
def _hook_postamble(self):
if context.changeps1:
return "Add-CondaEnvironmentToPrompt"
return None
activator_map = {
'posix': PosixActivator,
'ash': PosixActivator,
'bash': PosixActivator,
'dash': PosixActivator,
'zsh': PosixActivator,
'csh': CshActivator,
'tcsh': CshActivator,
'xonsh': XonshActivator,
'cmd.exe': CmdExeActivator,
'fish': FishActivator,
'powershell': PowerShellActivator,
}
def main(argv=None):
from .common.compat import init_std_stream_encoding
context.__init__() # On import, context does not include SEARCH_PATH. This line fixes that.
init_std_stream_encoding()
argv = argv or sys.argv
assert len(argv) >= 3
assert argv[1].startswith('shell.')
shell = argv[1].replace('shell.', '', 1)
activator_args = argv[2:]
try:
activator_cls = activator_map[shell]
except KeyError:
raise CondaError("%s is not a supported shell." % shell)
activator = activator_cls(activator_args)
try:
print(activator.execute(), end='')
return 0
except Exception as e:
if isinstance(e, CondaError):
print(text_type(e), file=sys.stderr)
return e.return_code
else:
raise
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for cisco plugin
from alembic import op
import sqlalchemy as sa
from neutron.plugins.cisco.common import cisco_constants
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
def upgrade():
op.create_table(
'cisco_policy_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_network_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('segment_type', segment_type, nullable=False),
sa.Column('sub_type', sa.String(length=255), nullable=True),
sa.Column('segment_range', sa.String(length=255), nullable=True),
sa.Column('multicast_ip_index', sa.Integer(), nullable=True,
server_default='0'),
sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
sa.Column('physical_network', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vxlan_allocations',
sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False,
server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vxlan_allocations_ibfk_1'),
sa.PrimaryKeyConstraint('vxlan_id'))
op.create_table(
'cisco_n1kv_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False, server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vlan_allocations_ibfk_1'))
op.create_table(
'cisco_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('credential_name'))
op.create_table(
'cisco_qos_policies',
sa.Column('qos_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('qos_name', sa.String(length=255), nullable=False),
sa.Column('qos_desc', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
op.create_table(
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False,
server_default=cisco_constants.TENANT_ID_NOT_SET),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
op.create_table(
'cisco_n1kv_vmnetworks',
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('port_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_policy_profiles.id'], ),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'cisco_n1kv_trunk_segments',
sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment_id', sa.String(length=36), nullable=False),
sa.Column('dot1qtag', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_multi_segments',
sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment1_id', sa.String(length=36), nullable=False),
sa.Column('segment2_id', sa.String(length=36), nullable=False),
sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
'segment2_id'))
op.create_table(
'cisco_n1kv_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('multicast_ip', sa.String(length=32), nullable=True),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_network_profiles.id']),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'cisco_csr_identifier_map',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('ipsec_site_conn_id', sa.String(length=64),
primary_key=True),
sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
['ipsec_site_connections.id'],
ondelete='CASCADE')
)
op.create_table(
'cisco_ml2_apic_host_links',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('ifname', sa.String(length=64), nullable=False),
sa.Column('ifmac', sa.String(length=32), nullable=True),
sa.Column('swid', sa.String(length=32), nullable=False),
sa.Column('module', sa.String(length=32), nullable=False),
sa.Column('port', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('host', 'ifname'))
op.create_table(
'cisco_ml2_apic_names',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('neutron_type', sa.String(length=32), nullable=False),
sa.Column('apic_name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('neutron_id', 'neutron_type'))
op.create_table(
'cisco_ml2_apic_contracts',
sa.Column('tenant_id', sa.String(length=255)),
sa.Column('router_id', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('router_id'))
op.create_table('cisco_hosting_devices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('complementary_id', sa.String(length=36), nullable=True),
sa.Column('device_id', sa.String(length=255), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('management_port_id', sa.String(length=36), nullable=True),
sa.Column('protocol_port', sa.Integer(), nullable=True),
sa.Column('cfg_agent_id', sa.String(length=36), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=True),
sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ),
sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cisco_port_mappings',
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
sa.Column('port_type', sa.String(length=32), nullable=True),
sa.Column('network_type', sa.String(length=32), nullable=True),
sa.Column('hosting_port_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_id', sa.Integer(), autoincrement=False,
nullable=True),
sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
)
op.create_table('cisco_router_mappings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['hosting_device_id'],
['cisco_hosting_devices.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Mock ActionScript library
'''
__author__ = 'Ethan Kennerly'
undefined = None
true = True
false = False
null = None
def mock_actionscript_object_example():
'''In order to model the ActionScript client,
and easily port code to ActionScript, there is a mock:
MovieClip, SimpleButton and TextField.
Hacked approximation. not really the same as ActionScript null.
>>> print undefined
None
Refer to attribute or child as dictionary.
>>> root = get_example_stage()
>>> root['title_mc']['username_txt']['text']
'user'
>>> root['title_mc']['username_txt']['currentLabel']
<type 'exceptions.ReferenceError'>
Refer to attribute or child as dictionary.
>>> root = get_example_stage()
>>> root['title_mc']['username_txt']['text']
'user'
>>> root['title_mc']['username_txt']['text'] = 'joris'
>>> root['title_mc']['username_txt']['text']
'joris'
>>> root['title_mc']['username_txt']['t'] = 'joris'
Traceback (most recent call last):
...
ReferenceError
>>> root['title_mc']['username_txt']['t']
<type 'exceptions.ReferenceError'>
Refer to attribute or child as dictionary.
>>> root = get_example_stage()
>>> root['gateway_mc']['currentLabel']
'none'
>>> root['gateway_mc'].currentLabel
'none'
>>> root['gateway_mc']['label']
<type 'exceptions.ReferenceError'>
Refer to attribute or child as attribute.
>>> root = get_example_stage()
>>> root.gateway_mc.currentLabel
'none'
Careful, you define attributes that did not exist,
whereas ActionScript cannot for SimpleButton.
>>> root.gateway_mc.nonesuch = 2
Refer to root of an offspring.
>>> if not root == root.game_over_mc.start_mc.root:
... root.name, root.game_over_mc.start_mc.root.name
Dispatch event log
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.WARNING)
>>> mouseDown = MouseEvent(MouseEvent.MOUSE_DOWN)
>>> root.title_mc.start_btn.dispatchEvent(mouseDown)
trace_event: mouseDown
>>> logging.getLogger().setLevel(old_log_level)
Currently if target has no event listener, then
unlike Flash, this mock model does NOT bubble event to ancestors.
>>> button = SimpleButton()
>>> button.name = 'no_listener_btn'
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_DOWN, trace_event)
>>> root.title_mc.dispatchEvent(mouseDown)
trace_event: mouseDown
Like Flash, multiple events may be added.
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_OVER, trace_event)
>>> root.title_mc.dispatchEvent(mouseDown)
trace_event: mouseDown
>>> mouseOver = MouseEvent(MouseEvent.MOUSE_OVER)
>>> root.title_mc.dispatchEvent(mouseOver)
trace_event: mouseOver
Unlike Flash, adding the same event again overwrites that event.
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_DOWN, trace_event)
>>> root.title_mc.dispatchEvent(mouseDown)
trace_event: mouseDown
>>> def trace_event2(event):
... print 'trace_event2:', event.type
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_DOWN, trace_event2)
>>> root.title_mc.dispatchEvent(mouseDown)
trace_event2: mouseDown
>>> root.title_mc.dispatchEvent(mouseOver)
trace_event: mouseOver
Mouse out is also supported.
>>> root.title_mc.addEventListener(MouseEvent.MOUSE_OUT, trace_event)
>>> mouseOut = MouseEvent(MouseEvent.MOUSE_OUT)
>>> root.title_mc.dispatchEvent(mouseOut)
trace_event: mouseOut
>>> root.title_mc.addChild(button)
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.ERROR)
>>> root.title_mc.no_listener_btn.dispatchEvent(mouseDown)
>>> logging.getLogger().setLevel(old_log_level)
Refer to common root.
>>> root is not None
True
>>> if not root == root.root:
... root, root.root
>>> root == root.title_mc.root
True
>>> root == root.title_mc.no_listener_btn.root
True
Master mock python client dispatches setting label or text to Flash slave.
To reuse the same syntax as setting label or text directly on slave:
On setting an attribute, if root has callback, then execute:
with arguments: owner, property, value.
>>> root.title_mc.gotoAndPlay('hi')
>>> root.title_mc.currentLabel
'hi'
>>> root.title_mc.username_txt.text = 'yuji'
>>> root.title_mc.username_txt.text
'yuji'
>>> from remote_control import note
>>> root._on_set = note
>>> root.title_mc.gotoAndPlay('bye')
{'title_mc': {'currentLabel': 'bye'}}
>>> root.title_mc.currentLabel
'bye'
Setter never returns.
>>> root.title_mc.username_txt.text = 'jade'
And if value is the same, nothing happens.
>>> def note_string(owner, property, value):
... print 'note: ', note(owner, property, value)
>>> root._on_set = note_string
>>> root.title_mc.username_txt.text
'jade'
>>> root.title_mc.username_txt.text = 'jade'
But if value is different, the on_set function is called.
>>> root.title_mc.username_txt.text = 'yuji'
note: {'title_mc': {'username_txt': {'text': 'yuji'}}}
>>> root.title_mc.username_txt.text
'yuji'
Dispatch calls on_set, INSTEAD of responding to event.
>>> root.title_mc.dispatchEvent(mouseDown)
note: {'title_mc': {'dispatchEvent': 'mouseDown'}}
To see child in PyShell autoCompleteList:
If uniquely named, child becomes available as an attribute.
>>> if None is root.title_mc.name: False
>>> if None is root.title_mc.no_listener_btn: False
>>> if None is root.title_mc.__getattribute__('name'): False
>>> if None is root.title_mc.__getattribute__('no_listener_btn'): False
To program this easily, do not change the name!
>>> root.title_mc.no_listener_btn.name = 'a'
>>> hasattr(root.title_mc, 'a')
True
>>> hasattr(root.title_mc, 'no_listener_btn')
True
Upon removal, name is removed, if unique and referring to child.
>>> button = root.title_mc.removeChild(button)
>>> hasattr(root.title_mc, 'a')
False
Beware! If name changed, old attribute still lingers.
>>> hasattr(root.title_mc, 'no_listener_btn')
True
'''
import time
def getTimer():
'''milliseconds since import. emulate flash.utils.getTimer
>>> now = getTimer()
>>> if not 1 <= now: now
>>> if not type(1) == type(now): now
'''
seconds = time.clock()
milliseconds = int(seconds * 1000)
return milliseconds
start = getTimer()
Object = dict
Array = list
def object_example():
'''
>>> type({})
<type 'dict'>
>>> type({}) == Object
True
>>> type({'a': 1}) == Object
True
>>> type(['a', 1]) == Object
False
'''
class String(str):
r'''Partial mock of ActionScript String
file:///C:/Program%20Files%20(x86)/Common%20Files/Adobe/Help/en_US/AS3LCR/Flash_10.0/String.html
Limited to:
>>> var = s = String('ad\n');
>>> s.substring(2, 3);
'\n'
>>> s.substring(s.length - 1, s.length);
'\n'
>>> s.charAt(1);
'd'
>>> s
'ad\n'
Beware of reassignment.
>>> s = 'abc'
>>> s.charAt(2);
Traceback (most recent call last):
...
AttributeError: 'str' object has no attribute 'charAt'
>>> s
'abc'
>>> s = String('abc')
>>> s.charAt(2);
'c'
>>> s
'abc'
'''
def __init__(self, string):
str.__init__(string)
self._string = string
self.length = len(self._string)
def charAt(self, index):
return self._string[index]
def substring(self, startIndex = 0, endIndex = 0x7ffffff):
return self._string[startIndex:endIndex]
import logging
def get_example_stage():
self = MovieClip()
self.name = 'root1'
title_mc = MovieClip()
title_mc.name = 'title_mc'
self.addChild(title_mc)
username_txt = TextField()
username_txt.name = 'username_txt'
username_txt.text = 'user'
title_mc.addChild(username_txt)
password_txt = TextField()
password_txt.name = 'password_txt'
password_txt.text = 'pass'
title_mc.addChild(password_txt)
gateway_mc = MovieClip()
gateway_mc.name = 'gateway_mc'
gateway_mc.gotoAndPlay('none')
self.addChild(gateway_mc)
start_btn = SimpleButton()
start_btn.name = 'start_btn'
start_btn.addEventListener(MouseEvent.MOUSE_DOWN, trace_event)
title_mc.addChild(start_btn)
lobby_mc = MovieClip()
lobby_mc.name = 'lobby_mc'
self.addChild(lobby_mc)
lobby_table_mc = MovieClip()
lobby_table_mc.name = 'level_1_mc'
lobby_mc.addChild(lobby_table_mc)
enter_btn = SimpleButton()
enter_btn.name = 'enter_btn'
lobby_table_mc.addChild(enter_btn)
game_over_mc = MovieClip()
game_over_mc.name = 'game_over_mc'
game_over_mc.gotoAndPlay('none')
self.addChild(game_over_mc)
start_mc = MovieClip()
start_mc.name = 'start_mc'
game_over_mc.addChild(start_mc)
start_btn = SimpleButton()
start_btn.name = 'start_btn'
start_mc.addChild(start_btn)
save_mc = MovieClip()
save_mc.name = 'save_mc'
self.addChild(save_mc)
load_mc = MovieClip()
load_mc.name = 'load_mc'
self.addChild(load_mc)
_0_0_mc = MovieClip()
_0_0_mc.name = '_0_0_mc'
self.addChild(_0_0_mc)
_0_1_mc = MovieClip()
_0_1_mc.name = '_0_1_mc'
self.addChild(_0_1_mc)
_1_0_mc = MovieClip()
_1_0_mc.name = '_1_0_mc'
self.addChild(_1_0_mc)
_1_1_mc = MovieClip()
_1_1_mc.name = '_1_1_mc'
self.addChild(_1_1_mc)
return self
import text
def load(save_file_name):
r'''
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.CRITICAL)
>>> load('.no_file')
{'gateway_mc': 'save_not_found'}
>>> logging.getLogger().setLevel(old_log_level)
Convert Windows carriage return + line feed to Unix line feed.
http://www.freenetpages.co.uk/hp/alan.gauld/tutfiles.htm
http://en.wikipedia.org/wiki/Newline
>>> windows = '{\r\n\r\n}'
>>> file = open('user/test_windows_file.news.py', 'w')
>>> file.write(windows)
>>> file.close()
>>> load('user/test_windows_file.news.py')
{}
'''
import os
if os.path.exists(save_file_name):
unix_text = text.load(save_file_name)
message = eval(unix_text)
else:
logging.error('load: %s file not found' % save_file_name)
message = {'gateway_mc': 'save_not_found'}
return message
def trace_event(event):
'''Easy example of add and dispatch event. Not native to ActionScript.
'''
trace('trace_event: ' + event.type)
# Begin mocking ActionScript functions
def trace(message):
print message
class Event(object):
ENTER_FRAME = 'enterFrame'
def __init__(self, type):
self.type = type
self.currentTarget = None
class MouseEvent(Event):
CLICK = 'click'
MOUSE_DOWN = 'mouseDown'
MOUSE_OVER = 'mouseOver'
MOUSE_OUT = 'mouseOut'
MOUSE_MOVE = 'mouseMove'
class TextEvent(Event):
TEXT_INPUT = 'textInput'
def __init__(self, type):
super(self).__init__(type)
self.text = ''
class EventDispatcher(object):
'''Bubble event to eldest ancestor (root).
'''
def __init__(self):
##- super(EventDispatcher, self).__init__()
self.events = {}
self._logger = None
def addEventListener(self, event_type, respond):
self.events[event_type] = respond
def dispatchEvent(self, event):
if not self.events:
logging.warn('EventDispatcher.dispatchEvent: ' + self.name + ' has no listener')
if event.type in self.events:
# XXX Gotcha ActionScript ReferenceError: Error #1074:
# Illegal write to read-only property currentTarget
# on flash.events.MouseEvent.
event.currentTarget = self
dispatch_log = self.name + '.dispatchEvent(event) # ' + event.type
logging.info(dispatch_log)
if self.root:
if self.root._on_set:
self.root._on_set(self, 'dispatchEvent', event.type)
return
self.events[event.type](event)
class InteractiveObject(EventDispatcher):
'''Because all objects used are InteractiveObject.
I do not mock DisplayObject.
Mock mouse position.
>>> stage = InteractiveObject()
>>> hasattr(stage, 'mouseX')
True
>>> hasattr(stage, 'mouseY')
True
>>> hasattr(stage, 'scaleX')
True
>>> hasattr(stage, 'scaleY')
True
Beware, in ActionScript, mouseX and mouseY are read-only.
file:///C:/Program%20Files%20(x86)/Common%20Files/Adobe/Help/en_US/AS3LCR/Flash_10.0/flash/display/DisplayObject.html#mouseX
'''
def __init__(self):
super(InteractiveObject, self).__init__()
self.name = 'instance'
self.mouseEnabled = true
self.parent = None
self.root = self
self.x = 0
self.y = 0
self.mouseX = 0
self.mouseY = 0
self.scaleX = 0
self.scaleY = 0
self._on_set = None
# XXX property and get item give me a headache with reference error
#def _set_name(self, name):
# if name != self._name:
#if self.parent:
# attributes = self.parent.__dict__
# if attributes.has_key(self._name):
# if attributes[self._name] == self:
# attributes.pop(self._name)
# if hasName(self):
# attributes = self.parent.__dict__
# if not attributes.has_key(name):
# attributes[name] = self
# self._name = name
#name = property(lambda self: self._name, _set_name)
def __getitem__(self, item):
if self.__dict__.has_key(item):
return self.__dict__[item]
if hasattr(self, item):
return getattr(self, item)
# Flash is lenient with reference errors when getting.
logging.debug('InteractiveObject.getitem: ReferenceError %s["%s"]' % (self.name, item))
##
return ReferenceError
def __setitem__(self, item, value):
if self.__dict__.has_key(item):
self.__dict__[item] = value
elif hasattr(self, item):
setattr(self, item, value)
else:
logging.info('InteractiveObject.setitem: ReferenceError %s["%s"]' % (self.name, item))
raise ReferenceError
class SimpleButton(InteractiveObject):
'''May dispatchEvent to this.'''
class MovieClip(InteractiveObject):
def __init__(self):
super(MovieClip, self).__init__()
self._currentLabel = None
self._children = []
self.mouseChildren = true
self._moved = false
self._orphanage = None
currentLabel = property(lambda self: self._currentLabel)
def gotoAndPlay(self, label):
'''ActionScript gotcha: Move clip x and y in code. Then try 'goto*'.
No animation of x and y.
>>> clip = MovieClip()
>>> clip.currentLabel
>>> clip.gotoAndPlay('a')
>>> clip.currentLabel
'a'
>>> clip.x = 1
>>> old_log_level = logging.getLogger().level
>>> logging.getLogger().setLevel(logging.CRITICAL)
>>> clip.gotoAndPlay('b')
>>> clip.currentLabel
'b'
>>> logging.getLogger().setLevel(old_log_level)
'''
if self._moved:
moved_log = 'ActionScript gotcha: After ActionScript has moved %s to %i,%i, timeline cannot move x,y at any label, such as "%s"' \
% (self.name, self.x, self.y, label)
logging.debug(moved_log)
# if logging.getLogger().level < logging.DEBUG:
# import pdb; pdb.set_trace();
self._currentLabel = label
if self.root:
if self.root._on_set:
return self.root._on_set(self, 'currentLabel', label)
def _get_numChildren(self):
if self._orphanage:
self = remember_children(self, self._orphanage, recurse = False,
update = False)
self._orphanage = None
return len(self._children)
numChildren = property(_get_numChildren)
def _get_root(self):
ancestor = self.parent
while ancestor.parent:
ancestor = ancestor.parent
return ancestor
root = property(_get_root)
def addChild(self, display_object):
self._children.append(display_object)
display_object.parent = self
display_object.root = display_object.parent.root
if hasName(display_object):
if not self.__dict__.has_key(display_object.name):
self.__dict__[display_object.name] = display_object
def getChildByName(self, name):
'''If the movie clip knows an orphanage,
and child not found refer to orphanage.
>>> moonhyoung_root = MovieClip()
>>> orphanage = {'currentLabel': 'login',
... '_1_2_mc': {'currentLabel': 'empty_black',
... 'territory_mc': {'currentLabel': 'neutral'}}}
>>> moonhyoung_root._orphanage = orphanage
>>> moonhyoung_root.getChildAt(0).name
'_1_2_mc'
>>> moonhyoung_root.numChildren
1
Beware, current label and other properties of root are not updated.
>>> moonhyoung_root.currentLabel
>>> moonhyoung_root._1_2_mc.currentLabel
'empty_black'
>>> moonhyoung_root._1_2_mc.territory_mc.currentLabel
'neutral'
'''
for child in self._children:
if name == child.name:
return child
if self._orphanage and self._orphanage.has_key(name):
self = remember_children(self, self._orphanage, recurse = False,
update = False)
self._orphanage = None
return self.__dict__[name]
def getChildAt(self, index):
if self._orphanage:
self = remember_children(self, self._orphanage, recurse = False,
update = False)
self._orphanage = None
return self._children[index]
def removeChild(self, orphan):
for child in self._children:
if orphan == child:
self._children.remove(child)
if self == orphan.parent:
orphan.parent = None
if hasName(orphan):
if self.__dict__.has_key(orphan.name):
if self.__dict__[orphan.name] == orphan:
self.__dict__.pop(orphan)
return child
else:
logging.warn('removeChild: orphan not found %s in %s' \
%(orphan, self) )
import pdb; pdb.set_trace();
def __getitem__(self, item):
if self.__dict__.has_key(item):
return self.__dict__[item]
if hasattr(self, item):
return getattr(self, item)
child = self.getChildByName(item)
if child:
return child
## import pdb; pdb.set_trace();
logging.debug('MovieClip.getitem: ReferenceError %s["%s"]' % (self.name, item))
return ReferenceError
def __getattr__(self, item):
if self.__dict__.has_key(item):
return self.__dict__[item]
child = self.getChildByName(item)
if child:
return child
raise AttributeError
def __setattr__(self, item, value):
if 'x' == item or 'y' == item:
self._moved = true
self.__dict__[item] = value
class TextField(InteractiveObject):
def __init__(self):
super(TextField, self).__init__()
self._text = ''
def __get_text(self):
return self._text
def __set_text(self, text):
if self._text is not text:
self._text = text
if self.root:
if self.root._on_set:
self.root._on_set(self, 'text', text)
text = property(__get_text, __set_text)
class Array:
'''Mock ActionScript Array class to conveniently use indexOf.
Unused?
>>> simple_properties = new = Array('scaleX', 'scaleY', 'x', 'y');
>>> simple_properties.indexOf('x');
2
>>> simple_properties.indexOf('z');
-1
'''
def __init__(self, *values):
self._list = list(values)
## print self._list
def indexOf(self, searchElement, startIndex = 0):
try:
return self._list.index(searchElement)
except:
return -1
# helper functions not native to ActionScript
def hasName(display_object):
'''Does the display object have no name?
By default Flash names unnamed instances: instance...
>>> hasName(None)
False
>>> instance = MovieClip()
>>> hasName(instance)
False
>>> instance.name = 'an_instance'
>>> hasName(instance)
True
>>> instance.name = 'instance23'
>>> hasName(instance)
False
>>> instance.name = ''
>>> hasName(instance)
False
'''
if (not display_object):
return false;
if ('' == display_object.name):
return false;
return (0 != display_object.name.find('instance') )
def isMovieClip(owner):
if not owner:
return False
# bizarre non-equality from cyclical import? server-client
return type(owner) == MovieClip \
or hasattr(owner, 'gotoAndPlay')
def isInteractiveObject(owner):
if not owner:
return False
# bizarre non-equality from cyclical import? server-client
return type(owner) == InteractiveObject \
or hasattr(owner, 'mouseEnabled')
def isTextField(owner):
if not owner:
return False
return type(owner) == TextField \
or hasattr(owner, 'text')
def isSimpleButton(owner):
if not owner:
return False
return type(owner) == SimpleButton \
or (hasattr(owner, 'name') \
and owner.name.endswith('_btn'))
def isMovieTextButton(owner):
if not owner:
return False
return isMovieClip(owner) \
or isTextField(owner) \
or isSimpleButton(owner)
#.as:uncomment: def unicode_to_string(value):
#.as:uncomment: r'''ActionScript only.
#.as:uncomment: >>> unicode_to_string_as('lobby')
#.as:uncomment: 'lobby'
#.as:uncomment: >>> unicode_to_string_as(1)
#.as:uncomment: 1
#.as:uncomment: >>> unicode_to_string_as('\xa0')
#.as:uncomment: '\xa0'
#.as:uncomment: >>> unicode_to_string_as(u'\xa0')
#.as:uncomment: u'\xa0'
#.as:uncomment: '''
#.as:uncomment: return value;
def unicode_to_string(value):
r'''Not necessary in ActionScript?
>>> unicode_to_string(u'lobby')
'lobby'
>>> unicode_to_string(1)
1
>>> unicode_to_string(u'\xa0')
u'\xa0'
'''
converted = value
if type(u'') == type(value):
try:
converting = str(value)
except:
pass
else:
if value == converting:
converted = converting
return converted
def resembles_dictionary(owner):
'''
Beware that pyamf uses ASObject, not dictionary.
>>> import pyamf
>>> dictionary = {'a': {'b': u'2', 'c': u'3'}}
>>> as_object = pyamf.ASObject({'a': {'b': u'2', 'c': u'3'}})
Even though ASObject looks like a dictionary,
decoded ActionScript object fails dictionary type checks.
>>> type(as_object) == dict
False
>>> resembles_dictionary(as_object)
True
>>> dictionary == as_object
True
'''
return type(owner) == type({}) \
or hasattr(owner, 'has_key')
def remember_children(root, news, create = True, recurse = True, update = True):
'''Recursively remember children.
If create, then x,y does not count as moving.
Optionally do not recurse.
>>> moonhyoung_root = MovieClip()
>>> orphanage = {'currentLabel': 'login',
... '_1_2_mc': {'currentLabel': 'empty_black',
... 'territory_mc': {'currentLabel': 'neutral'}}}
>>> moonhyoung_root = remember_children(moonhyoung_root, orphanage, True, recurse = False)
>>> moonhyoung_root._orphanage
>>> moonhyoung_root.currentLabel
'login'
>>> moonhyoung_root._1_2_mc.currentLabel
'empty_black'
>>> moonhyoung_root._1_2_mc._orphanage == orphanage['_1_2_mc']
True
Optionally do not overwrite property.
>>> moonhyoung_root.gotoAndPlay('lobby')
>>> moonhyoung_root = remember_children(moonhyoung_root, orphanage, True, recurse = False, update = False)
>>> moonhyoung_root.currentLabel
'lobby'
Do not overwrite even if property is None.
>>> moonhyoung_root.gotoAndPlay(None)
>>> moonhyoung_root = remember_children(moonhyoung_root, orphanage, True, recurse = False, update = False)
>>> moonhyoung_root.currentLabel
'''
if not root:
logging.error('remember_children root missing ' + str(property))
elif not isMovieClip(root):
logging.error('remember_children root is not MovieClip ' + str(root))
else:
for property, value in news.items():
if update and property == 'currentLabel':
label = value
label = unicode_to_string(label)
root.gotoAndPlay(label)
elif update and property == 'x':
root.x = value
if create:
root._moved = False
elif update and property == 'y':
root.y = value
if create:
root._moved = False
elif property.endswith('_mc') or value \
and resembles_dictionary(value) \
and value.has_key('currentLabel'):
baby_mc = MovieClip()
baby_mc.name = property
label = value.get('currentLabel')
label = unicode_to_string(label)
baby_mc.gotoAndPlay(label)
root.addChild(baby_mc)
if recurse:
remember_children(baby_mc, value, recurse = recurse)
else:
root._orphanage = None
baby_mc._orphanage = value
elif property.endswith('_txt') or value \
and resembles_dictionary(value) \
and value.has_key('text'):
baby_txt = TextField()
baby_txt.name = property
baby_txt.text = value.get('text', '')
baby_txt.text = unicode_to_string(baby_txt.text)
root.addChild(baby_txt)
elif property.endswith('_btn'):
baby_btn = SimpleButton()
baby_btn.name = property
root.addChild(baby_btn)
return root
if __name__ == '__main__':
print '\n\nactionscript.py starts testing...',
import doctest
doctest.testmod()
print 'complete.'
| |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
from __future__ import print_function
import socket
import subprocess
from scapy.modules.six.moves.queue import Queue, Empty
from scapy.pipetool import Source, Drain, Sink
from scapy.config import conf
from scapy.compat import raw
from scapy.utils import ContextManagerSubprocess, PcapReader, PcapWriter
class SniffSource(Source):
"""Read packets from an interface and send them to low exit.
.. code::
+-----------+
>>-| |->>
| |
>-| [iface]--|->
+-----------+
If neither of the ``iface`` or ``socket`` parameters are specified, then
Scapy will capture from the first network interface.
:param iface: A layer 2 interface to sniff packets from. Mutually
exclusive with the ``socket`` parameter.
:param filter: Packet filter to use while capturing. See ``L2listen``.
Not used with ``socket`` parameter.
:param socket: A ``SuperSocket`` to sniff packets from.
"""
def __init__(self, iface=None, filter=None, socket=None, name=None):
Source.__init__(self, name=name)
if (iface or filter) and socket:
raise ValueError("iface and filter options are mutually exclusive "
"with socket")
self.s = socket
self.iface = iface
self.filter = filter
def start(self):
if not self.s:
self.s = conf.L2listen(iface=self.iface, filter=self.filter)
def stop(self):
if self.s:
self.s.close()
def fileno(self):
return self.s.fileno()
def check_recv(self):
return True
def deliver(self):
try:
pkt = self.s.recv()
if pkt is not None:
self._send(pkt)
except EOFError:
self.is_exhausted = True
class RdpcapSource(Source):
"""Read packets from a PCAP file send them to low exit.
.. code::
+----------+
>>-| |->>
| |
>-| [pcap]--|->
+----------+
"""
def __init__(self, fname, name=None):
Source.__init__(self, name=name)
self.fname = fname
self.f = PcapReader(self.fname)
def start(self):
self.f = PcapReader(self.fname)
self.is_exhausted = False
def stop(self):
self.f.close()
def fileno(self):
return self.f.fileno()
def check_recv(self):
return True
def deliver(self):
try:
p = self.f.recv()
self._send(p)
except EOFError:
self.is_exhausted = True
class InjectSink(Sink):
"""Packets received on low input are injected to an interface
.. code::
+-----------+
>>-| |->>
| |
>-|--[iface] |->
+-----------+
"""
def __init__(self, iface=None, name=None):
Sink.__init__(self, name=name)
if iface is None:
iface = conf.iface
self.iface = iface
def start(self):
self.s = conf.L2socket(iface=self.iface)
def stop(self):
self.s.close()
def push(self, msg):
self.s.send(msg)
class Inject3Sink(InjectSink):
def start(self):
self.s = conf.L3socket(iface=self.iface)
class WrpcapSink(Sink):
"""
Writes :py:class:`Packet` on the low entry to a ``pcap`` file.
Ignores all messages on the high entry.
.. note::
Due to limitations of the ``pcap`` format, all packets **must** be of
the same link type. This class will not mutate packets to conform with
the expected link type.
.. code::
+----------+
>>-| |->>
| |
>-|--[pcap] |->
+----------+
:param fname: Filename to write packets to.
:type fname: str
:param linktype: See :py:attr:`linktype`.
:type linktype: None or int
.. py:attribute:: linktype
Set an explicit link-type (``DLT_``) for packets. This must be an
``int`` or ``None``.
This is the same as the :py:func:`wrpcap` ``linktype`` parameter.
If ``None`` (the default), the linktype will be auto-detected on the
first packet. This field will *not* be updated with the result of this
auto-detection.
This attribute has no effect after calling :py:meth:`PipeEngine.start`.
"""
def __init__(self, fname, name=None, linktype=None):
Sink.__init__(self, name=name)
self.fname = fname
self.f = None
self.linktype = linktype
def start(self):
self.f = PcapWriter(self.fname, linktype=self.linktype)
def stop(self):
if self.f:
self.f.flush()
self.f.close()
def push(self, msg):
if msg:
self.f.write(msg)
class WiresharkSink(WrpcapSink):
"""
Streams :py:class:`Packet` from the low entry to Wireshark.
Packets are written into a ``pcap`` stream (like :py:class:`WrpcapSink`),
and streamed to a new Wireshark process on its ``stdin``.
Wireshark is run with the ``-ki -`` arguments, which cause it to treat
``stdin`` as a capture device. Arguments in :py:attr:`args` will be
appended after this.
Extends :py:mod:`WrpcapSink`.
.. code::
+----------+
>>-| |->>
| |
>-|--[pcap] |->
+----------+
:param linktype: See :py:attr:`WrpcapSink.linktype`.
:type linktype: None or int
:param args: See :py:attr:`args`.
:type args: None or list[str]
.. py:attribute:: args
Additional arguments for the Wireshark process.
This must be either ``None`` (the default), or a ``list`` of ``str``.
This attribute has no effect after calling :py:meth:`PipeEngine.start`.
See :manpage:`wireshark(1)` for more details.
"""
def __init__(self, name=None, linktype=None, args=None):
WrpcapSink.__init__(self, fname=None, name=name, linktype=linktype)
self.args = args
def start(self):
# Wireshark must be running first, because PcapWriter will block until
# data has been read!
with ContextManagerSubprocess(conf.prog.wireshark):
args = [conf.prog.wireshark, "-Slki", "-"]
if self.args:
args.extend(self.args)
proc = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=None,
stderr=None,
)
self.fname = proc.stdin
WrpcapSink.start(self)
class UDPDrain(Drain):
"""UDP payloads received on high entry are sent over UDP
.. code::
+-------------+
>>-|--[payload]--|->>
| X |
>-|----[UDP]----|->
+-------------+
"""
def __init__(self, ip="127.0.0.1", port=1234):
Drain.__init__(self)
self.ip = ip
self.port = port
def push(self, msg):
from scapy.layers.inet import IP, UDP
if IP in msg and msg[IP].proto == 17 and UDP in msg:
payload = msg[UDP].payload
self._high_send(raw(payload))
def high_push(self, msg):
from scapy.layers.inet import IP, UDP
p = IP(dst=self.ip) / UDP(sport=1234, dport=self.port) / msg
self._send(p)
class FDSourceSink(Source):
"""Use a file descriptor as source and sink
.. code::
+-------------+
>>-| |->>
| |
>-|-[file desc]-|->
+-------------+
"""
def __init__(self, fd, name=None):
Source.__init__(self, name=name)
self.fd = fd
def push(self, msg):
self.fd.write(msg)
def fileno(self):
return self.fd.fileno()
def deliver(self):
self._send(self.fd.read())
class TCPConnectPipe(Source):
"""TCP connect to addr:port and use it as source and sink
.. code::
+-------------+
>>-| |->>
| |
>-|-[addr:port]-|->
+-------------+
"""
__selectable_force_select__ = True
def __init__(self, addr="", port=0, name=None):
Source.__init__(self, name=name)
self.addr = addr
self.port = port
self.fd = None
def start(self):
self.fd = socket.socket()
self.fd.connect((self.addr, self.port))
def stop(self):
if self.fd:
self.fd.close()
def push(self, msg):
self.fd.send(msg)
def fileno(self):
return self.fd.fileno()
def deliver(self):
try:
msg = self.fd.recv(65536)
except socket.error:
self.stop()
raise
if msg:
self._send(msg)
class TCPListenPipe(TCPConnectPipe):
"""TCP listen on [addr:]port and use first connection as source and sink;
send peer address to high output
.. code::
+------^------+
>>-| +-[peer]-|->>
| / |
>-|-[addr:port]-|->
+-------------+
"""
__selectable_force_select__ = True
def __init__(self, addr="", port=0, name=None):
TCPConnectPipe.__init__(self, addr, port, name)
self.connected = False
self.q = Queue()
def start(self):
self.connected = False
self.fd = socket.socket()
self.fd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.fd.bind((self.addr, self.port))
self.fd.listen(1)
def push(self, msg):
if self.connected:
self.fd.send(msg)
else:
self.q.put(msg)
def deliver(self):
if self.connected:
try:
msg = self.fd.recv(65536)
except socket.error:
self.stop()
raise
if msg:
self._send(msg)
else:
fd, frm = self.fd.accept()
self._high_send(frm)
self.fd.close()
self.fd = fd
self.connected = True
self._trigger(frm)
while True:
try:
self.fd.send(self.q.get(block=False))
except Empty:
break
class UDPClientPipe(TCPConnectPipe):
"""UDP send packets to addr:port and use it as source and sink
Start trying to receive only once a packet has been send
.. code::
+-------------+
>>-| |->>
| |
>-|-[addr:port]-|->
+-------------+
"""
def __init__(self, addr="", port=0, name=None):
TCPConnectPipe.__init__(self, addr, port, name)
self.connected = False
def start(self):
self.fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.fd.connect((self.addr, self.port))
self.connected = True
def push(self, msg):
self.fd.send(msg)
def deliver(self):
if not self.connected:
return
try:
msg = self.fd.recv(65536)
except socket.error:
self.stop()
raise
if msg:
self._send(msg)
class UDPServerPipe(TCPListenPipe):
"""UDP bind to [addr:]port and use as source and sink
Use (ip, port) from first received IP packet as destination for all data
.. code::
+------^------+
>>-| +-[peer]-|->>
| / |
>-|-[addr:port]-|->
+-------------+
"""
def __init__(self, addr="", port=0, name=None):
TCPListenPipe.__init__(self, addr, port, name)
self._destination = None
def start(self):
self.fd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.fd.bind((self.addr, self.port))
def push(self, msg):
if self._destination:
self.fd.sendto(msg, self._destination)
else:
self.q.put(msg)
def deliver(self):
if self._destination:
try:
msg = self.fd.recv(65536)
except socket.error:
self.stop()
raise
if msg:
self._send(msg)
else:
msg, dest = self.fd.recvfrom(65536)
if msg:
self._send(msg)
self._destination = dest
self._trigger(dest)
self._high_send(dest)
while True:
try:
msg = self.q.get(block=False)
self.fd.sendto(msg, self._destination)
except Empty:
break
class TriggeredMessage(Drain):
"""Send a preloaded message when triggered and trigger in chain
.. code::
+------^------+
>>-| | /----|->>
| |/ |
>-|-[ message ]-|->
+------^------+
"""
def __init__(self, msg, name=None):
Drain.__init__(self, name=name)
self.msg = msg
def on_trigger(self, trigmsg):
self._send(self.msg)
self._high_send(self.msg)
self._trigger(trigmsg)
class TriggerDrain(Drain):
"""Pass messages and trigger when a condition is met
.. code::
+------^------+
>>-|-[condition]-|->>
| | |
>-|-[condition]-|->
+-------------+
"""
def __init__(self, f, name=None):
Drain.__init__(self, name=name)
self.f = f
def push(self, msg):
v = self.f(msg)
if v:
self._trigger(v)
self._send(msg)
def high_push(self, msg):
v = self.f(msg)
if v:
self._trigger(v)
self._high_send(msg)
class TriggeredValve(Drain):
"""Let messages alternatively pass or not, changing on trigger
.. code::
+------^------+
>>-|-[pass/stop]-|->>
| | |
>-|-[pass/stop]-|->
+------^------+
"""
def __init__(self, start_state=True, name=None):
Drain.__init__(self, name=name)
self.opened = start_state
def push(self, msg):
if self.opened:
self._send(msg)
def high_push(self, msg):
if self.opened:
self._high_send(msg)
def on_trigger(self, msg):
self.opened ^= True
self._trigger(msg)
class TriggeredQueueingValve(Drain):
"""Let messages alternatively pass or queued, changing on trigger
.. code::
+------^-------+
>>-|-[pass/queue]-|->>
| | |
>-|-[pass/queue]-|->
+------^-------+
"""
def __init__(self, start_state=True, name=None):
Drain.__init__(self, name=name)
self.opened = start_state
self.q = Queue()
def start(self):
self.q = Queue()
def push(self, msg):
if self.opened:
self._send(msg)
else:
self.q.put((True, msg))
def high_push(self, msg):
if self.opened:
self._send(msg)
else:
self.q.put((False, msg))
def on_trigger(self, msg):
self.opened ^= True
self._trigger(msg)
while True:
try:
low, msg = self.q.get(block=False)
except Empty:
break
else:
if low:
self._send(msg)
else:
self._high_send(msg)
class TriggeredSwitch(Drain):
r"""Let messages alternatively high or low, changing on trigger
.. code::
+------^------+
>>-|-\ | /-|->>
| [up/down] |
>-|-/ | \-|->
+------^------+
"""
def __init__(self, start_state=True, name=None):
Drain.__init__(self, name=name)
self.low = start_state
def push(self, msg):
if self.low:
self._send(msg)
else:
self._high_send(msg)
high_push = push
def on_trigger(self, msg):
self.low ^= True
self._trigger(msg)
class ConvertPipe(Drain):
"""Packets sent on entry are converted to another type of packet.
.. code::
+-------------+
>>-|--[convert]--|->>
| |
>-|--[convert]--|->
+-------------+
See ``Packet.convert_packet``.
"""
def __init__(self, low_type=None, high_type=None, name=None):
Drain.__init__(self, name=name)
self.low_type = low_type
self.high_type = high_type
def push(self, msg):
if self.low_type:
msg = self.low_type.convert_packet(msg)
self._send(msg)
def high_push(self, msg):
if self.high_type:
msg = self.high_type.convert_packet(msg)
self._high_send(msg)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype))
continue
if y.dtype.is_real or y.dtype.is_integer:
if not grad_y.dtype.is_real and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
return grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is None:
continue
if grad.dtype.is_real:
if not inp.dtype.is_real:
raise TypeError("Gradient type %s generated for real-valued op %s "
"with type %s must be real" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
elif grad.dtype.is_complex:
if not inp.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued op %s"
" with type %s must be complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
else:
raise TypeError("Gradient type %s generated for op %s "
"with type %s must be either real or complex" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(name, "gradients", ys + xs + grad_ys):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
grad_fn = ops.get_default_graph()._get_function(
op.type).python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and _IsTrainable(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = grad_fn(op, *out_grads)
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _SymGrad(op, out_grads)
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if isinstance(in_grad, ops.Tensor):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_real_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_real_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_real_grad:
# For an unused exit, if it has floating-point outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(y)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
ValueError: if the arguments are invalid or not supported. Currently,
this function only supports one-dimensional `x` in `xs`.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute a hessian matrix for each x in xs
hessians = []
for i, x in enumerate(xs):
# Check dimensions
ndims = x.get_shape().ndims
if ndims is None:
raise ValueError('Cannot compute Hessian because the dimensionality of '
'element number %d of `xs` cannot be determined' % i)
elif ndims != 1:
raise ValueError('Computing hessians is currently only supported for '
'one-dimensional tensors. Element number %d of `xs` has '
'%d dimensions.' % (i, ndims))
with ops.name_scope(name + '_first_derivative'):
# Compute the partial derivatives of the input with respect to all
# elements of `x`
_gradients = gradients(ys, x, **kwargs)[0]
# Unpack the gradients into a list so we can take derivatives with
# respect to each element
_gradients = array_ops.unstack(_gradients)
with ops.name_scope(name + '_second_derivative'):
# Compute the partial derivatives with respect to each element of the list
_hess = [gradients(_gradient, x, **kwargs)[0] for _gradient in _gradients]
# Pack the list into a matrix and add to the list of hessians
hessians.append(array_ops.stack(_hess, name=name))
return hessians
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReportInProductGet(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'custom_field_filter': 'str',
'date_range_custom_from_date': 'str',
'date_range_custom_to_date': 'str',
'date_range_filter': 'str',
'display_order': 'str',
'envelope_date_type_filter': 'str',
'envelope_recipient_name_contains_filter': 'str',
'envelope_status_filter': 'str',
'envelope_subject_contains_filter': 'str',
'fields': 'list[ReportInProductField]',
'last_scheduled_execution_date': 'str',
'last_scheduled_execution_success_date': 'str',
'max_download_rows': 'str',
'max_grid_rows': 'str',
'max_scheduled_rows': 'str',
'period_length_filter': 'str',
'report_customized_id': 'str',
'report_description': 'str',
'report_id': 'str',
'report_name': 'str',
'report_type': 'str',
'run_uri': 'str',
'save_uri': 'str',
'schedule_id': 'str',
'sent_by_details': 'ReportInProductSentByDetails',
'sent_by_filter': 'str',
'sent_by_ids': 'str',
'sort_field_direction': 'str',
'sort_field_name': 'str'
}
attribute_map = {
'custom_field_filter': 'customFieldFilter',
'date_range_custom_from_date': 'dateRangeCustomFromDate',
'date_range_custom_to_date': 'dateRangeCustomToDate',
'date_range_filter': 'dateRangeFilter',
'display_order': 'displayOrder',
'envelope_date_type_filter': 'envelopeDateTypeFilter',
'envelope_recipient_name_contains_filter': 'envelopeRecipientNameContainsFilter',
'envelope_status_filter': 'envelopeStatusFilter',
'envelope_subject_contains_filter': 'envelopeSubjectContainsFilter',
'fields': 'fields',
'last_scheduled_execution_date': 'lastScheduledExecutionDate',
'last_scheduled_execution_success_date': 'lastScheduledExecutionSuccessDate',
'max_download_rows': 'maxDownloadRows',
'max_grid_rows': 'maxGridRows',
'max_scheduled_rows': 'maxScheduledRows',
'period_length_filter': 'periodLengthFilter',
'report_customized_id': 'reportCustomizedId',
'report_description': 'reportDescription',
'report_id': 'reportId',
'report_name': 'reportName',
'report_type': 'reportType',
'run_uri': 'runUri',
'save_uri': 'saveUri',
'schedule_id': 'scheduleId',
'sent_by_details': 'sentByDetails',
'sent_by_filter': 'sentByFilter',
'sent_by_ids': 'sentByIds',
'sort_field_direction': 'sortFieldDirection',
'sort_field_name': 'sortFieldName'
}
def __init__(self, custom_field_filter=None, date_range_custom_from_date=None, date_range_custom_to_date=None, date_range_filter=None, display_order=None, envelope_date_type_filter=None, envelope_recipient_name_contains_filter=None, envelope_status_filter=None, envelope_subject_contains_filter=None, fields=None, last_scheduled_execution_date=None, last_scheduled_execution_success_date=None, max_download_rows=None, max_grid_rows=None, max_scheduled_rows=None, period_length_filter=None, report_customized_id=None, report_description=None, report_id=None, report_name=None, report_type=None, run_uri=None, save_uri=None, schedule_id=None, sent_by_details=None, sent_by_filter=None, sent_by_ids=None, sort_field_direction=None, sort_field_name=None): # noqa: E501
"""ReportInProductGet - a model defined in Swagger""" # noqa: E501
self._custom_field_filter = None
self._date_range_custom_from_date = None
self._date_range_custom_to_date = None
self._date_range_filter = None
self._display_order = None
self._envelope_date_type_filter = None
self._envelope_recipient_name_contains_filter = None
self._envelope_status_filter = None
self._envelope_subject_contains_filter = None
self._fields = None
self._last_scheduled_execution_date = None
self._last_scheduled_execution_success_date = None
self._max_download_rows = None
self._max_grid_rows = None
self._max_scheduled_rows = None
self._period_length_filter = None
self._report_customized_id = None
self._report_description = None
self._report_id = None
self._report_name = None
self._report_type = None
self._run_uri = None
self._save_uri = None
self._schedule_id = None
self._sent_by_details = None
self._sent_by_filter = None
self._sent_by_ids = None
self._sort_field_direction = None
self._sort_field_name = None
self.discriminator = None
if custom_field_filter is not None:
self.custom_field_filter = custom_field_filter
if date_range_custom_from_date is not None:
self.date_range_custom_from_date = date_range_custom_from_date
if date_range_custom_to_date is not None:
self.date_range_custom_to_date = date_range_custom_to_date
if date_range_filter is not None:
self.date_range_filter = date_range_filter
if display_order is not None:
self.display_order = display_order
if envelope_date_type_filter is not None:
self.envelope_date_type_filter = envelope_date_type_filter
if envelope_recipient_name_contains_filter is not None:
self.envelope_recipient_name_contains_filter = envelope_recipient_name_contains_filter
if envelope_status_filter is not None:
self.envelope_status_filter = envelope_status_filter
if envelope_subject_contains_filter is not None:
self.envelope_subject_contains_filter = envelope_subject_contains_filter
if fields is not None:
self.fields = fields
if last_scheduled_execution_date is not None:
self.last_scheduled_execution_date = last_scheduled_execution_date
if last_scheduled_execution_success_date is not None:
self.last_scheduled_execution_success_date = last_scheduled_execution_success_date
if max_download_rows is not None:
self.max_download_rows = max_download_rows
if max_grid_rows is not None:
self.max_grid_rows = max_grid_rows
if max_scheduled_rows is not None:
self.max_scheduled_rows = max_scheduled_rows
if period_length_filter is not None:
self.period_length_filter = period_length_filter
if report_customized_id is not None:
self.report_customized_id = report_customized_id
if report_description is not None:
self.report_description = report_description
if report_id is not None:
self.report_id = report_id
if report_name is not None:
self.report_name = report_name
if report_type is not None:
self.report_type = report_type
if run_uri is not None:
self.run_uri = run_uri
if save_uri is not None:
self.save_uri = save_uri
if schedule_id is not None:
self.schedule_id = schedule_id
if sent_by_details is not None:
self.sent_by_details = sent_by_details
if sent_by_filter is not None:
self.sent_by_filter = sent_by_filter
if sent_by_ids is not None:
self.sent_by_ids = sent_by_ids
if sort_field_direction is not None:
self.sort_field_direction = sort_field_direction
if sort_field_name is not None:
self.sort_field_name = sort_field_name
@property
def custom_field_filter(self):
"""Gets the custom_field_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The custom_field_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._custom_field_filter
@custom_field_filter.setter
def custom_field_filter(self, custom_field_filter):
"""Sets the custom_field_filter of this ReportInProductGet.
# noqa: E501
:param custom_field_filter: The custom_field_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._custom_field_filter = custom_field_filter
@property
def date_range_custom_from_date(self):
"""Gets the date_range_custom_from_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_custom_from_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_custom_from_date
@date_range_custom_from_date.setter
def date_range_custom_from_date(self, date_range_custom_from_date):
"""Sets the date_range_custom_from_date of this ReportInProductGet.
# noqa: E501
:param date_range_custom_from_date: The date_range_custom_from_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_custom_from_date = date_range_custom_from_date
@property
def date_range_custom_to_date(self):
"""Gets the date_range_custom_to_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_custom_to_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_custom_to_date
@date_range_custom_to_date.setter
def date_range_custom_to_date(self, date_range_custom_to_date):
"""Sets the date_range_custom_to_date of this ReportInProductGet.
# noqa: E501
:param date_range_custom_to_date: The date_range_custom_to_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_custom_to_date = date_range_custom_to_date
@property
def date_range_filter(self):
"""Gets the date_range_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The date_range_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._date_range_filter
@date_range_filter.setter
def date_range_filter(self, date_range_filter):
"""Sets the date_range_filter of this ReportInProductGet.
# noqa: E501
:param date_range_filter: The date_range_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._date_range_filter = date_range_filter
@property
def display_order(self):
"""Gets the display_order of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The display_order of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._display_order
@display_order.setter
def display_order(self, display_order):
"""Sets the display_order of this ReportInProductGet.
# noqa: E501
:param display_order: The display_order of this ReportInProductGet. # noqa: E501
:type: str
"""
self._display_order = display_order
@property
def envelope_date_type_filter(self):
"""Gets the envelope_date_type_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_date_type_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_date_type_filter
@envelope_date_type_filter.setter
def envelope_date_type_filter(self, envelope_date_type_filter):
"""Sets the envelope_date_type_filter of this ReportInProductGet.
# noqa: E501
:param envelope_date_type_filter: The envelope_date_type_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_date_type_filter = envelope_date_type_filter
@property
def envelope_recipient_name_contains_filter(self):
"""Gets the envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_recipient_name_contains_filter
@envelope_recipient_name_contains_filter.setter
def envelope_recipient_name_contains_filter(self, envelope_recipient_name_contains_filter):
"""Sets the envelope_recipient_name_contains_filter of this ReportInProductGet.
# noqa: E501
:param envelope_recipient_name_contains_filter: The envelope_recipient_name_contains_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_recipient_name_contains_filter = envelope_recipient_name_contains_filter
@property
def envelope_status_filter(self):
"""Gets the envelope_status_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_status_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_status_filter
@envelope_status_filter.setter
def envelope_status_filter(self, envelope_status_filter):
"""Sets the envelope_status_filter of this ReportInProductGet.
# noqa: E501
:param envelope_status_filter: The envelope_status_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_status_filter = envelope_status_filter
@property
def envelope_subject_contains_filter(self):
"""Gets the envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._envelope_subject_contains_filter
@envelope_subject_contains_filter.setter
def envelope_subject_contains_filter(self, envelope_subject_contains_filter):
"""Sets the envelope_subject_contains_filter of this ReportInProductGet.
# noqa: E501
:param envelope_subject_contains_filter: The envelope_subject_contains_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._envelope_subject_contains_filter = envelope_subject_contains_filter
@property
def fields(self):
"""Gets the fields of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The fields of this ReportInProductGet. # noqa: E501
:rtype: list[ReportInProductField]
"""
return self._fields
@fields.setter
def fields(self, fields):
"""Sets the fields of this ReportInProductGet.
# noqa: E501
:param fields: The fields of this ReportInProductGet. # noqa: E501
:type: list[ReportInProductField]
"""
self._fields = fields
@property
def last_scheduled_execution_date(self):
"""Gets the last_scheduled_execution_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The last_scheduled_execution_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._last_scheduled_execution_date
@last_scheduled_execution_date.setter
def last_scheduled_execution_date(self, last_scheduled_execution_date):
"""Sets the last_scheduled_execution_date of this ReportInProductGet.
# noqa: E501
:param last_scheduled_execution_date: The last_scheduled_execution_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._last_scheduled_execution_date = last_scheduled_execution_date
@property
def last_scheduled_execution_success_date(self):
"""Gets the last_scheduled_execution_success_date of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The last_scheduled_execution_success_date of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._last_scheduled_execution_success_date
@last_scheduled_execution_success_date.setter
def last_scheduled_execution_success_date(self, last_scheduled_execution_success_date):
"""Sets the last_scheduled_execution_success_date of this ReportInProductGet.
# noqa: E501
:param last_scheduled_execution_success_date: The last_scheduled_execution_success_date of this ReportInProductGet. # noqa: E501
:type: str
"""
self._last_scheduled_execution_success_date = last_scheduled_execution_success_date
@property
def max_download_rows(self):
"""Gets the max_download_rows of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The max_download_rows of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._max_download_rows
@max_download_rows.setter
def max_download_rows(self, max_download_rows):
"""Sets the max_download_rows of this ReportInProductGet.
# noqa: E501
:param max_download_rows: The max_download_rows of this ReportInProductGet. # noqa: E501
:type: str
"""
self._max_download_rows = max_download_rows
@property
def max_grid_rows(self):
"""Gets the max_grid_rows of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The max_grid_rows of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._max_grid_rows
@max_grid_rows.setter
def max_grid_rows(self, max_grid_rows):
"""Sets the max_grid_rows of this ReportInProductGet.
# noqa: E501
:param max_grid_rows: The max_grid_rows of this ReportInProductGet. # noqa: E501
:type: str
"""
self._max_grid_rows = max_grid_rows
@property
def max_scheduled_rows(self):
"""Gets the max_scheduled_rows of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The max_scheduled_rows of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._max_scheduled_rows
@max_scheduled_rows.setter
def max_scheduled_rows(self, max_scheduled_rows):
"""Sets the max_scheduled_rows of this ReportInProductGet.
# noqa: E501
:param max_scheduled_rows: The max_scheduled_rows of this ReportInProductGet. # noqa: E501
:type: str
"""
self._max_scheduled_rows = max_scheduled_rows
@property
def period_length_filter(self):
"""Gets the period_length_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The period_length_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._period_length_filter
@period_length_filter.setter
def period_length_filter(self, period_length_filter):
"""Sets the period_length_filter of this ReportInProductGet.
# noqa: E501
:param period_length_filter: The period_length_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._period_length_filter = period_length_filter
@property
def report_customized_id(self):
"""Gets the report_customized_id of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The report_customized_id of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._report_customized_id
@report_customized_id.setter
def report_customized_id(self, report_customized_id):
"""Sets the report_customized_id of this ReportInProductGet.
# noqa: E501
:param report_customized_id: The report_customized_id of this ReportInProductGet. # noqa: E501
:type: str
"""
self._report_customized_id = report_customized_id
@property
def report_description(self):
"""Gets the report_description of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The report_description of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._report_description
@report_description.setter
def report_description(self, report_description):
"""Sets the report_description of this ReportInProductGet.
# noqa: E501
:param report_description: The report_description of this ReportInProductGet. # noqa: E501
:type: str
"""
self._report_description = report_description
@property
def report_id(self):
"""Gets the report_id of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The report_id of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._report_id
@report_id.setter
def report_id(self, report_id):
"""Sets the report_id of this ReportInProductGet.
# noqa: E501
:param report_id: The report_id of this ReportInProductGet. # noqa: E501
:type: str
"""
self._report_id = report_id
@property
def report_name(self):
"""Gets the report_name of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The report_name of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._report_name
@report_name.setter
def report_name(self, report_name):
"""Sets the report_name of this ReportInProductGet.
# noqa: E501
:param report_name: The report_name of this ReportInProductGet. # noqa: E501
:type: str
"""
self._report_name = report_name
@property
def report_type(self):
"""Gets the report_type of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The report_type of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._report_type
@report_type.setter
def report_type(self, report_type):
"""Sets the report_type of this ReportInProductGet.
# noqa: E501
:param report_type: The report_type of this ReportInProductGet. # noqa: E501
:type: str
"""
self._report_type = report_type
@property
def run_uri(self):
"""Gets the run_uri of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The run_uri of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._run_uri
@run_uri.setter
def run_uri(self, run_uri):
"""Sets the run_uri of this ReportInProductGet.
# noqa: E501
:param run_uri: The run_uri of this ReportInProductGet. # noqa: E501
:type: str
"""
self._run_uri = run_uri
@property
def save_uri(self):
"""Gets the save_uri of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The save_uri of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._save_uri
@save_uri.setter
def save_uri(self, save_uri):
"""Sets the save_uri of this ReportInProductGet.
# noqa: E501
:param save_uri: The save_uri of this ReportInProductGet. # noqa: E501
:type: str
"""
self._save_uri = save_uri
@property
def schedule_id(self):
"""Gets the schedule_id of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The schedule_id of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._schedule_id
@schedule_id.setter
def schedule_id(self, schedule_id):
"""Sets the schedule_id of this ReportInProductGet.
# noqa: E501
:param schedule_id: The schedule_id of this ReportInProductGet. # noqa: E501
:type: str
"""
self._schedule_id = schedule_id
@property
def sent_by_details(self):
"""Gets the sent_by_details of this ReportInProductGet. # noqa: E501
:return: The sent_by_details of this ReportInProductGet. # noqa: E501
:rtype: ReportInProductSentByDetails
"""
return self._sent_by_details
@sent_by_details.setter
def sent_by_details(self, sent_by_details):
"""Sets the sent_by_details of this ReportInProductGet.
:param sent_by_details: The sent_by_details of this ReportInProductGet. # noqa: E501
:type: ReportInProductSentByDetails
"""
self._sent_by_details = sent_by_details
@property
def sent_by_filter(self):
"""Gets the sent_by_filter of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The sent_by_filter of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._sent_by_filter
@sent_by_filter.setter
def sent_by_filter(self, sent_by_filter):
"""Sets the sent_by_filter of this ReportInProductGet.
# noqa: E501
:param sent_by_filter: The sent_by_filter of this ReportInProductGet. # noqa: E501
:type: str
"""
self._sent_by_filter = sent_by_filter
@property
def sent_by_ids(self):
"""Gets the sent_by_ids of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The sent_by_ids of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._sent_by_ids
@sent_by_ids.setter
def sent_by_ids(self, sent_by_ids):
"""Sets the sent_by_ids of this ReportInProductGet.
# noqa: E501
:param sent_by_ids: The sent_by_ids of this ReportInProductGet. # noqa: E501
:type: str
"""
self._sent_by_ids = sent_by_ids
@property
def sort_field_direction(self):
"""Gets the sort_field_direction of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The sort_field_direction of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._sort_field_direction
@sort_field_direction.setter
def sort_field_direction(self, sort_field_direction):
"""Sets the sort_field_direction of this ReportInProductGet.
# noqa: E501
:param sort_field_direction: The sort_field_direction of this ReportInProductGet. # noqa: E501
:type: str
"""
self._sort_field_direction = sort_field_direction
@property
def sort_field_name(self):
"""Gets the sort_field_name of this ReportInProductGet. # noqa: E501
# noqa: E501
:return: The sort_field_name of this ReportInProductGet. # noqa: E501
:rtype: str
"""
return self._sort_field_name
@sort_field_name.setter
def sort_field_name(self, sort_field_name):
"""Sets the sort_field_name of this ReportInProductGet.
# noqa: E501
:param sort_field_name: The sort_field_name of this ReportInProductGet. # noqa: E501
:type: str
"""
self._sort_field_name = sort_field_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReportInProductGet, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportInProductGet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
"""Dispatching code for Selection operations.
"""
import functools
import operator
from collections import OrderedDict
from operator import methodcaller
from typing import Optional
import pandas as pd
from multipledispatch import Dispatcher
from toolz import compose, concat, concatv, first, unique
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.expr.scope import Scope
from ibis.expr.typing import TimeContext
from ..core import execute
from ..dispatch import execute_node
from ..execution import constants, util
from ..execution.util import coerce_to_output
compute_projection = Dispatcher(
'compute_projection',
doc="""\
Compute a projection, dispatching on whether we're computing a scalar, column,
or table expression.
Parameters
----------
expr : Union[ir.ScalarExpr, ir.ColumnExpr, ir.TableExpr]
parent : ops.Selection
data : pd.DataFrame
scope : Scope
timecontext:Optional[TimeContext]
Returns
-------
value : scalar, pd.Series, pd.DataFrame
Notes
-----
:class:`~ibis.expr.types.ScalarExpr` instances occur when a specific column
projection is a window operation.
""",
)
@compute_projection.register(ir.ScalarExpr, ops.Selection, pd.DataFrame)
def compute_projection_scalar_expr(
expr,
parent,
data,
scope: Scope = None,
timecontext: Optional[TimeContext] = None,
**kwargs,
):
name = expr._name
assert name is not None, 'Scalar selection name is None'
op = expr.op()
parent_table_op = parent.table.op()
data_columns = frozenset(data.columns)
if scope is None:
scope = Scope()
scope = scope.merge_scopes(
Scope(
{
t: map_new_column_names_to_data(
remap_overlapping_column_names(
parent_table_op, t, data_columns
),
data,
)
},
timecontext,
)
for t in op.root_tables()
)
scalar = execute(expr, scope=scope, **kwargs)
result = pd.Series([scalar], name=name).repeat(len(data.index))
result.index = data.index
return result
@compute_projection.register(ir.ColumnExpr, ops.Selection, pd.DataFrame)
def compute_projection_column_expr(
expr,
parent,
data,
scope: Scope,
timecontext: Optional[TimeContext],
**kwargs,
):
result_name = getattr(expr, '_name', None)
op = expr.op()
parent_table_op = parent.table.op()
if isinstance(op, ops.TableColumn):
# slightly faster path for simple column selection
name = op.name
assert isinstance(name, str)
if name in data:
return data[name].rename(result_name or name)
if not isinstance(parent_table_op, ops.Join):
raise KeyError(name)
(root_table,) = op.root_tables()
left_root, right_root = ops.distinct_roots(
parent_table_op.left, parent_table_op.right
)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
return data.loc[:, name + suffixes[root_table]].rename(
result_name or name
)
data_columns = frozenset(data.columns)
scope = scope.merge_scopes(
Scope(
{
t: map_new_column_names_to_data(
remap_overlapping_column_names(
parent_table_op, t, data_columns
),
data,
)
},
timecontext,
)
for t in op.root_tables()
)
result = coerce_to_output(
execute(expr, scope=scope, timecontext=timecontext, **kwargs),
expr,
data.index,
)
assert result_name is not None, 'Column selection name is None'
return result
@compute_projection.register(ir.TableExpr, ops.Selection, pd.DataFrame)
def compute_projection_table_expr(expr, parent, data, **kwargs):
if expr is parent.table:
return data
parent_table_op = parent.table.op()
assert isinstance(parent_table_op, ops.Join)
assert expr.equals(parent_table_op.left) or expr.equals(
parent_table_op.right
)
mapping = remap_overlapping_column_names(
parent_table_op,
root_table=expr.op(),
data_columns=frozenset(data.columns),
)
return map_new_column_names_to_data(mapping, data)
def remap_overlapping_column_names(table_op, root_table, data_columns):
"""Return an ``OrderedDict`` mapping possibly suffixed column names to
column names without suffixes.
Parameters
----------
table_op : TableNode
The ``TableNode`` we're selecting from.
root_table : TableNode
The root table of the expression we're selecting from.
data_columns : set or frozenset
The available columns to select from
Returns
-------
mapping : OrderedDict[str, str]
A map from possibly-suffixed column names to column names without
suffixes.
"""
if not isinstance(table_op, ops.Join):
return None
left_root, right_root = ops.distinct_roots(table_op.left, table_op.right)
suffixes = {
left_root: constants.LEFT_JOIN_SUFFIX,
right_root: constants.RIGHT_JOIN_SUFFIX,
}
column_names = [
({name, name + suffixes[root_table]} & data_columns, name)
for name in root_table.schema.names
]
mapping = OrderedDict(
(first(col_name), final_name)
for col_name, final_name in column_names
if col_name
)
return mapping
def map_new_column_names_to_data(mapping, df):
if mapping is not None:
return df.loc[:, mapping.keys()].rename(columns=mapping)
return df
def _compute_predicates(
table_op,
predicates,
data,
scope: Scope,
timecontext: Optional[TimeContext],
**kwargs,
):
"""Compute the predicates for a table operation.
Parameters
----------
table_op : TableNode
predicates : List[ir.ColumnExpr]
data : pd.DataFrame
scope : Scope
timecontext: Optional[TimeContext]
kwargs : dict
Returns
-------
computed_predicate : pd.Series[bool]
Notes
-----
This handles the cases where the predicates are computed columns, in
addition to the simple case of named columns coming directly from the input
table.
"""
for predicate in predicates:
# Map each root table of the predicate to the data so that we compute
# predicates on the result instead of any left or right tables if the
# Selection is on a Join. Project data to only inlude columns from
# the root table.
root_tables = predicate.op().root_tables()
# handle suffixes
data_columns = frozenset(data.columns)
additional_scope = Scope()
for root_table in root_tables:
mapping = remap_overlapping_column_names(
table_op, root_table, data_columns
)
if mapping is not None:
new_data = data.loc[:, mapping.keys()].rename(columns=mapping)
else:
new_data = data
additional_scope = additional_scope.merge_scope(
Scope({root_table: new_data}, timecontext)
)
scope = scope.merge_scope(additional_scope)
yield execute(predicate, scope=scope, **kwargs)
physical_tables = Dispatcher(
'physical_tables',
doc="""\
Return the underlying physical tables nodes of a
:class:`~ibis.expr.types.Node`.
Parameters
----------
op : ops.Node
Returns
-------
tables : List[ops.Node]
""",
)
@physical_tables.register(ops.Selection)
def physical_tables_selection(sel):
return physical_tables(sel.table.op())
@physical_tables.register(ops.PhysicalTable)
def physical_tables_physical_table(t):
# Base case. PhysicalTable nodes are their own root physical tables.
return [t]
@physical_tables.register(ops.Join)
def physical_tables_join(join):
# Physical roots of Join nodes are the unique physical roots of their
# left and right TableNodes.
func = compose(physical_tables, methodcaller('op'))
return list(unique(concat(map(func, (join.left, join.right)))))
@physical_tables.register(ops.Node)
def physical_tables_node(node):
# Iterative case. Any other Node's physical roots are the unique physical
# roots of that Node's root tables.
return list(unique(concat(map(physical_tables, node.root_tables()))))
@execute_node.register(ops.Selection, pd.DataFrame)
def execute_selection_dataframe(
op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs
):
selections = op.selections
predicates = op.predicates
sort_keys = op.sort_keys
result = data
# Build up the individual pandas structures from column expressions
if selections:
data_pieces = []
for selection in selections:
pandas_object = compute_projection(
selection,
op,
data,
scope=scope,
timecontext=timecontext,
**kwargs,
)
data_pieces.append(pandas_object)
new_pieces = [
piece.reset_index(
level=list(range(1, piece.index.nlevels)), drop=True
)
if piece.index.nlevels > 1
else piece
for piece in data_pieces
]
# Result series might be trimmed by time context, thus index may
# have changed. To concat rows properly, we first `sort_index` on
# each pieces then assign data index manually to series
for i in range(len(new_pieces)):
assert len(new_pieces[i].index) == len(data.index)
new_pieces[i] = new_pieces[i].sort_index()
new_pieces[i].index = data.index
result = pd.concat(new_pieces, axis=1)
if predicates:
predicates = _compute_predicates(
op.table.op(), predicates, data, scope, timecontext, **kwargs
)
predicate = functools.reduce(operator.and_, predicates)
assert len(predicate) == len(
result
), 'Selection predicate length does not match underlying table'
result = result.loc[predicate]
if sort_keys:
result, grouping_keys, ordering_keys = util.compute_sorted_frame(
result,
order_by=sort_keys,
scope=scope,
timecontext=timecontext,
**kwargs,
)
else:
grouping_keys = ordering_keys = ()
# return early if we do not have any temporary grouping or ordering columns
assert not grouping_keys, 'group by should never show up in Selection'
if not ordering_keys:
return result
# create a sequence of columns that we need to drop
temporary_columns = pd.Index(
concatv(grouping_keys, ordering_keys)
).difference(data.columns)
# no reason to call drop if we don't need to
if temporary_columns.empty:
return result
# drop every temporary column we created for ordering or grouping
return result.drop(temporary_columns, axis=1)
| |
# -*- coding: utf-8 -*-
"""
babel.util
~~~~~~~~~~
Various utility classes and functions.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import codecs
from datetime import timedelta, tzinfo
import os
import re
import textwrap
from babel._compat import izip, imap
missing = object()
def distinct(iterable):
"""Yield all items in an iterable collection that are distinct.
Unlike when using sets for a similar effect, the original ordering of the
items in the collection is preserved by this function.
>>> print list(distinct([1, 2, 1, 3, 4, 4]))
[1, 2, 3, 4]
>>> print list(distinct('foobar'))
['f', 'o', 'b', 'a', 'r']
:param iterable: the iterable collection providing the data
"""
seen = set()
for item in iter(iterable):
if item not in seen:
yield item
seen.add(item)
# Regexp to match python magic encoding line
PYTHON_MAGIC_COMMENT_re = re.compile(
br'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: https://docs.python.org/3.4/reference/lexical_analysis.html#encoding-declarations
The ``fp`` argument should be a seekable file object.
(From Jeff Dairiki)
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1.decode('latin-1'))
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source is
# not valid python source, or line2 is a continuation of line1,
# in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
magic_comment_encoding = m.group(1).decode('latin-1')
if magic_comment_encoding != 'utf-8':
raise SyntaxError(
'encoding problem: {0} with BOM'.format(
magic_comment_encoding))
return 'utf-8'
elif m:
return m.group(1).decode('latin-1')
else:
return None
finally:
fp.seek(pos)
def pathmatch(pattern, filename):
"""Extended pathname pattern matching.
This function is similar to what is provided by the ``fnmatch`` module in
the Python standard library, but:
* can match complete (relative or absolute) path names, and not just file
names, and
* also supports a convenience pattern ("**") to match files at any
directory level.
Examples:
>>> pathmatch('**.py', 'bar.py')
True
>>> pathmatch('**.py', 'foo/bar/baz.py')
True
>>> pathmatch('**.py', 'templates/index.html')
False
>>> pathmatch('**/templates/*.html', 'templates/index.html')
True
>>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
False
:param pattern: the glob pattern
:param filename: the path name of the file to match against
"""
symbols = {
'?': '[^/]',
'?/': '[^/]/',
'*': '[^/]+',
'*/': '[^/]+/',
'**/': '(?:.+/)*?',
'**': '(?:.+/)*?[^/]+',
}
buf = []
for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
if idx % 2:
buf.append(symbols[part])
elif part:
buf.append(re.escape(part))
match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
return match is not None
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
)
def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
"""Simple wrapper around the ``textwrap.wrap`` function in the standard
library. This version does not wrap lines on hyphens in words.
:param text: the text to wrap
:param width: the maximum line width
:param initial_indent: string that will be prepended to the first line of
wrapped output
:param subsequent_indent: string that will be prepended to all lines save
the first of wrapped output
"""
wrapper = TextWrapper(width=width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
break_long_words=False)
return wrapper.wrap(text)
class odict(dict):
"""Ordered dict implementation.
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, data=None):
dict.__init__(self, data or {})
self._keys = list(dict.keys(self))
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
new_key = key not in self
dict.__setitem__(self, key, item)
if new_key:
self._keys.append(key)
def __iter__(self):
return iter(self._keys)
iterkeys = __iter__
def clear(self):
dict.clear(self)
self._keys = []
def copy(self):
d = odict()
d.update(self)
return d
def items(self):
return zip(self._keys, self.values())
def iteritems(self):
return izip(self._keys, self.itervalues())
def keys(self):
return self._keys[:]
def pop(self, key, default=missing):
try:
value = dict.pop(self, key)
self._keys.remove(key)
return value
except KeyError as e:
if default == missing:
raise e
else:
return default
def popitem(self, key):
self._keys.remove(key)
return dict.popitem(key)
def setdefault(self, key, failobj = None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self[key] = val
def values(self):
return map(self.get, self._keys)
def itervalues(self):
return imap(self.get, self._keys)
try:
relpath = os.path.relpath
except AttributeError:
def relpath(path, start='.'):
"""Compute the relative path to one path from another.
>>> relpath('foo/bar.txt', '').replace(os.sep, '/')
'foo/bar.txt'
>>> relpath('foo/bar.txt', 'foo').replace(os.sep, '/')
'bar.txt'
>>> relpath('foo/bar.txt', 'baz').replace(os.sep, '/')
'../foo/bar.txt'
"""
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
return os.path.join(*rel_list)
class FixedOffsetTimezone(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self._offset = timedelta(minutes=offset)
if name is None:
name = 'Etc/GMT+%d' % offset
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return ZERO
import pytz as _pytz
from babel import localtime
# Export the localtime functionality here because that's
# where it was in the past.
UTC = _pytz.utc
LOCALTZ = localtime.LOCALTZ
get_localzone = localtime.get_localzone
STDOFFSET = localtime.STDOFFSET
DSTOFFSET = localtime.DSTOFFSET
DSTDIFF = localtime.DSTDIFF
ZERO = localtime.ZERO
| |
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class VanillaGrad(object):
def __init__(self, pretrained_model, cuda=False):
self.pretrained_model = pretrained_model
self.features = pretrained_model.features
self.cuda = cuda
#self.pretrained_model.eval()
def __call__(self, x, index=None):
output = self.pretrained_model(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
if self.cuda:
one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
else:
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_variables=True)
grad = x.grad.data.cpu().numpy()
grad = grad[0, :, :, :]
return grad
class SmoothGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=0.15,
n_samples=25, magnitude=True):
super(SmoothGrad, self).__init__(pretrained_model, cuda)
"""
self.pretrained_model = pretrained_model
self.features = pretrained_model.features
self.cuda = cuda
self.pretrained_model.eval()
"""
self.stdev_spread = stdev_spread
self.n_samples = n_samples
self.magnitutde = magnitude
def __call__(self, x, index=None):
x = x.data.cpu().numpy()
stdev = self.stdev_spread * (np.max(x) - np.min(x))
total_gradients = np.zeros_like(x)
for i in range(self.n_samples):
noise = np.random.normal(0, stdev, x.shape).astype(np.float32)
x_plus_noise = x + noise
if self.cuda:
x_plus_noise = Variable(torch.from_numpy(x_plus_noise).cuda(), requires_grad=True)
else:
x_plus_noise = Variable(torch.from_numpy(x_plus_noise), requires_grad=True)
output = self.pretrained_model(x_plus_noise)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
if self.cuda:
one_hot = Variable(torch.from_numpy(one_hot).cuda(), requires_grad=True)
else:
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
one_hot = torch.sum(one_hot * output)
if x_plus_noise.grad is not None:
x_plus_noise.grad.data.zero_()
one_hot.backward(retain_variables=True)
grad = x_plus_noise.grad.data.cpu().numpy()
if self.magnitutde:
total_gradients += (grad * grad)
else:
total_gradients += grad
#if self.visdom:
avg_gradients = total_gradients[0, :, :, :] / self.n_samples
return avg_gradients
class GuidedBackpropReLU(torch.autograd.Function):
def __init__(self, inplace=False):
super(GuidedBackpropReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
pos_mask = (input > 0).type_as(input)
output = torch.addcmul(
torch.zeros(input.size()).type_as(input),
input,
pos_mask)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
pos_mask_1 = (input > 0).type_as(grad_output)
pos_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(
torch.zeros(input.size()).type_as(input),
torch.addcmul(
torch.zeros(input.size()).type_as(input), grad_output, pos_mask_1),
pos_mask_2)
return grad_input
def __repr__(self):
inplace_str = ', inplace' if self.inplace else ''
return self.__class__.__name__ + ' (' \
+ inplace_str + ')'
class GuidedBackpropGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False):
super(GuidedBackpropGrad, self).__init__(pretrained_model, cuda)
for idx, module in self.features._modules.items():
if module.__class__.__name__ is 'ReLU':
self.features._modules[idx] = GuidedBackpropReLU()
class GuidedBackpropSmoothGrad(SmoothGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=.15, n_samples=25, magnitude=True):
super(GuidedBackpropSmoothGrad, self).__init__(
pretrained_model, cuda, stdev_spread, n_samples, magnitude)
for idx, module in self.features._modules.items():
if module.__class__.__name__ is 'ReLU':
self.features._modules[idx] = GuidedBackpropReLU()
class FeatureExtractor(object):
def __init__(self, model, target_layers):
self.model = model
self.features = model.features
self.target_layers = target_layers
self.gradients = []
def __call__(self, x):
target_activations, output = self.extract_features(x)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
def get_gradients(self):
return self.gradients
def save_gradient(self, grad):
self.gradients.append(grad)
def extract_features(self, x):
outputs = []
for name, module in self.features._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class GradCam(object):
def __init__(self, pretrained_model, target_layer_names, cuda):
self.pretrained_model = pretrained_model
self.cuda = cuda
if self.cuda:
self.pretrained_model.cuda()
self.pretrained_model.eval()
self.extractor = FeatureExtractor(self.pretrained_model, target_layer_names)
def __call__(self, x, index=None):
features, output = self.extractor(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
self.pretrained_model.zero_grad()
one_hot.backward(retain_variables=True)
grads = self.extractor.get_gradients()[-1].data.cpu().numpy()
target = features[-1].data.cpu().numpy()[0, :]
weights = np.mean(grads, axis=(2, 3))[0, :]
cam = np.ones(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (224, 224))
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
| |
# Copyright (C) 2013-2014 SignalFuse, Inc.
# Copyright (C) 2015 SignalFx, Inc.
#
# Docker container orchestration utility.
import bgtunnel
import collections
import datetime
import time
import os
# Import _strptime manually to work around a thread safety issue when using
# strptime() from threads for the first time.
import _strptime # flake8: noqa
import docker
try:
from docker.errors import APIError
except ImportError:
# Fall back to <= 0.3.1 location
from docker.client import APIError
try:
from docker.types import LogConfig
except ImportError:
# Fall bock to <= 1.10 location
from docker.utils.types import LogConfig
import multiprocessing.pool
import re
import six
# For Python bug workaround
import threading
import weakref
from . import exceptions
from . import lifecycle
# Valid syntax for port spec definitions
_PORT_SPEC_REGEX = re.compile(r'^(?P<p1>\d+)(?:-(?P<p2>\d+))?(?:/(?P<proto>(tcp|udp)))?$')
_DEFAULT_PORT_PROTOCOL = 'tcp'
# Possible values for the restart policy type.
_VALID_RESTART_POLICIES = ['no', 'always', 'on-failure', 'unless-stopped']
class Entity:
"""Base class for named entities in the orchestrator."""
def __init__(self, name):
self._name = name
@property
def name(self):
"""Get the name of this entity."""
return self._name
def __repr__(self):
return self._name
class Ship(Entity):
"""A Ship that can host and run Containers.
Ships are hosts in the infrastructure. A Docker daemon is expected to be
running on each ship, providing control over the containers that will be
executed there.
"""
DEFAULT_DOCKER_PORT = 2375
DEFAULT_DOCKER_TLS_PORT = 2376
DEFAULT_API_VERSION = 1.15
DEFAULT_DOCKER_TIMEOUT = 5
def __init__(self, name, ip, endpoint=None, docker_port=None,
socket_path=None, api_version=None, timeout=None,
ssh_tunnel=None, tls=None, tls_verify=False,
tls_ca_cert=None, tls_cert=None, tls_key=None,
ssl_version=None):
"""Instantiate a new ship.
Args:
name (string): the name of the ship.
ip (string): the IP address of resolvable host name of the host.
docker_port (int): the port the Docker daemon listens on.
socket_path (string): the path to the unix socket the Docker daemon listens on.
api_version (string): the API version of the Docker daemon.
ssh_tunnel (dict): configuration for SSH tunneling to the remote
Docker daemon.
"""
Entity.__init__(self, name)
self._ip = ip
self._endpoint = endpoint or ip
self._docker_port = int(docker_port or
(self.DEFAULT_DOCKER_TLS_PORT if tls else self.DEFAULT_DOCKER_PORT))
self._socket_path = os.path.realpath(socket_path) if socket_path else None
self._tunnel = None
if ssh_tunnel:
if 'user' not in ssh_tunnel:
raise exceptions.EnvironmentConfigurationException(
'Missing SSH user for ship {} tunnel configuration'.format(
self.name))
if 'key' not in ssh_tunnel:
raise exceptions.EnvironmentConfigurationException(
'Missing SSH key for ship {} tunnel configuration'.format(
self.name))
self._tunnel = bgtunnel.open(
ssh_address=self._endpoint,
ssh_user=ssh_tunnel['user'],
ssh_port=int(ssh_tunnel.get('port', 22)),
host_port=self._docker_port,
silent=True,
identity_file=ssh_tunnel['key'])
# Make sure we use https through the tunnel, if tls is enabled
proto = "https" if (tls or tls_verify) else "http"
self._backend_url = '{:s}://localhost:{:d}'.format(
proto, self._tunnel.bind_port)
# Apparently bgtunnel isn't always ready right away and this
# drastically cuts down on the timeouts
time.sleep(1)
elif self._socket_path is not None:
self._backend_url = 'unix://{:s}'.format(self._socket_path)
else:
proto = "https" if (tls or tls_verify) else "http"
self._backend_url = '{:s}://{:s}:{:d}'.format(
proto, self._endpoint, self._docker_port)
self._tls = docker.tls.TLSConfig(
verify = tls_verify,
client_cert = (tls_cert, tls_key),
ca_cert = tls_ca_cert,
ssl_version = ssl_version) if tls else None
self._backend = docker.Client(
base_url=self._backend_url,
version=str(api_version or Ship.DEFAULT_API_VERSION),
timeout=timeout or Ship.DEFAULT_DOCKER_TIMEOUT,
tls=self._tls)
@property
def ip(self):
"""Returns this ship's IP address or hostname."""
return self._ip
@property
def endpoint(self):
"""Returns this ship's Docker endpoint IP address or hostname."""
return self._endpoint
@property
def backend(self):
"""Returns the Docker client wrapper to talk to the Docker daemon on
this host."""
return self._backend
@property
def address(self):
if self._tunnel:
return '{} (ssh:{})'.format(self.name, self._tunnel.bind_port)
return self.name
def get_image_ids(self):
"""Returns a dictionary of tagged images available on the Docker daemon
running on this ship."""
images = {}
for image in self._backend.images():
tags = image.get('RepoTags', [])
if not tags or tags is '<none>:<none>':
continue
for tag in image['RepoTags']:
images[tag] = image['Id']
return images
def __repr__(self):
if self._tunnel:
return '{}@{} via ssh://{}@{}:{}->{}'.format(
self.name, self._ip, self._tunnel.ssh_user,
self._endpoint, self._tunnel.bind_port, self._docker_port)
return '{}@{} via {}'.format(self.name, self._ip, self._backend_url)
class Service(Entity):
"""A Service is a collection of Containers running on one or more Ships
that constitutes a logical grouping of containers that make up an
infrastructure service.
Services may depend on each other. This dependency tree is honored when
services need to be started.
"""
def __init__(self, name, image, omit=True, schema=None, env=None,
env_name='local', lifecycle=None, limits=None, ports=None):
"""Instantiate a new named service/component of the platform using a
given Docker image.
By default, a service has no dependencies. Dependencies are resolved
and added once all Service objects have been instantiated.
Args:
name (string): the name of this service.
image (string): the name of the Docker image the instances of this
service should use.
omit (boolean): Whether to include this service in no-argument
commands or omit it.
schema (dict): Maestro schema versioning information.
env (dict): a dictionary of environment variables to use as the
base environment for all instances of this service.
env_name (string): name of the Maestro environment.
lifecycle (dict): a dictionary of lifecycle checks configurations.
limits (dict): a dictionary of service limits.
ports (dict): a dictionary of service ports.
"""
Entity.__init__(self, name)
self._image = image
self._omit = omit
self._schema = schema
self._env = env or {}
self._env.update({
'MAESTRO_ENVIRONMENT_NAME': env_name,
'SERVICE_NAME': self.name,
})
self._lifecycle = lifecycle or {}
self._limits = limits or {}
self._ports = ports or {}
self._requires = set([])
self._wants_info = set([])
self._needed_for = set([])
self._containers = {}
@property
def image(self):
return self._image
@property
def omit(self):
return self._omit
@property
def env(self):
return self._env
@property
def lifecycle(self):
return self._lifecycle
@property
def limits(self):
return self._limits
@property
def ports(self):
return self._ports
@property
def dependencies(self):
return self._requires
@property
def requires(self):
"""Returns the full set of direct and indirect dependencies of this
service."""
dependencies = self._requires
for dep in dependencies:
dependencies = dependencies.union(dep.requires)
return dependencies
@property
def wants_info(self):
"""Returns the full set of "soft" dependencies this service wants
information about through link environment variables."""
return self._wants_info
@property
def needed_for(self):
"""Returns the full set of direct and indirect dependents (aka services
that depend on this service)."""
dependents = self._needed_for
for dep in dependents:
dependents = dependents.union(dep.needed_for)
return dependents
@property
def containers(self):
"""Return an ordered list of instance containers for this service, by
instance name."""
return map(lambda c: self._containers[c],
sorted(self._containers.keys()))
def add_dependency(self, service):
"""Declare that this service depends on the passed service."""
self._requires.add(service)
def add_dependent(self, service):
"""Declare that the passed service depends on this service."""
self._needed_for.add(service)
def add_wants_info(self, service):
"""Declare that this service wants information about the passed service
via link environment variables."""
self._wants_info.add(service)
def register_container(self, container):
"""Register a new instance container as part of this service."""
self._containers[container.name] = container
def get_link_variables(self, add_internal=False):
"""Return the dictionary of all link variables from each container of
this service. An additional variable, named '<service_name>_INSTANCES',
contain the list of container/instance names of the service."""
basename = re.sub(r'[^\w]', '_', self.name).upper()
links = {}
for c in self._containers.values():
for name, value in c.get_link_variables(add_internal).items():
links['{}_{}'.format(basename, name)] = value
links['{}_INSTANCES'.format(basename)] = \
','.join(self._containers.keys())
return links
class Container(Entity):
"""A Container represents an instance of a particular service that will be
executed inside a Docker container on its target ship/host."""
def __init__(self, name, ship, service, config=None, schema=None):
"""Create a new Container object.
Args:
name (string): the instance name (should be unique).
ship (Ship): the Ship object representing the host this container
is expected to be executed on.
service (Service): the Service this container is an instance of.
config (dict): the YAML-parsed dictionary containing this
instance's configuration (ports, environment, volumes, etc.)
schema (dict): Maestro schema versioning information.
"""
Entity.__init__(self, name)
config = config or {}
self._status = None # The container's status, cached.
self._ship = ship
self._service = service
self._image = config.get('image', service.image)
self._schema = schema
# Register this instance container as being part of its parent service.
self._service.register_container(self)
# Get command
# TODO(mpetazzoni): remove deprecated 'cmd' support
self.command = config.get('command', config.get('cmd'))
# Parse the port specs.
self.ports = self._parse_ports(dict(self.service.ports, **config.get('ports', {})))
# Gather environment variables.
self.env = dict(service.env)
self.env.update(config.get('env', {}))
# Seed the service name, container name and host address as part of the
# container's environment.
self.env.update({
'CONTAINER_NAME': self.name,
'CONTAINER_HOST_ADDRESS': self.ship.ip,
'DOCKER_IMAGE': self.image,
'DOCKER_TAG': self.get_image_details()['tag'],
})
def env_list_expand(elt):
return type(elt) != list and elt \
or ' '.join(map(env_list_expand, elt))
for k, v in self.env.items():
if type(v) == list:
self.env[k] = env_list_expand(v)
self.volumes = self._parse_volumes(config.get('volumes', {}))
self.container_volumes = config.get('container_volumes', [])
if type(self.container_volumes) != list:
self.container_volumes = [self.container_volumes]
self.container_volumes = set(self.container_volumes)
# Check for conflicts
for volume in self.volumes.values():
if volume['bind'] in self.container_volumes:
raise exceptions.InvalidVolumeConfigurationException(
'Conflict in {} between bind-mounted volume '
'and container-only volume on {}'
.format(self.name, volume['bind']))
# Contains the list of containers from which volumes should be mounted
# in this container. Host-locality and volume conflicts are checked by
# the conductor.
self.volumes_from = config.get('volumes_from', [])
if type(self.volumes_from) != list:
self.volumes_from = [self.volumes_from]
self.volumes_from = set(self.volumes_from)
# Get links
self.links = dict(
(name, alias) for name, alias in
config.get('links', {}).items())
# Should this container run with -privileged?
self.privileged = config.get('privileged', False)
# Add or drop privileges
self.cap_add = config.get('cap_add', None)
self.cap_drop = config.get('cap_drop', None)
# Add extra hosts
self.extra_hosts = config.get('extra_hosts', None)
# Network mode
self.network_mode = config.get('net')
# Restart policy
self.restart_policy = self._parse_restart_policy(config.get('restart'))
# DNS settings for the container, always as a list
self.dns = config.get('dns')
if isinstance(self.dns, six.string_types):
self.dns = [self.dns]
# Stop timeout
self.stop_timeout = config.get('stop_timeout', 10)
# Get limits
limits = dict(self.service.limits, **config.get('limits', {}))
self.cpu_shares = limits.get('cpu')
self.mem_limit = self._parse_bytes(limits.get('memory'))
self.memswap_limit = self._parse_bytes(limits.get('swap'))
# Get logging config.
self.log_config = self._parse_log_config(
config.get('log_driver'), config.get('log_opt'))
# Additional LXC configuration options. See the LXC documentation for a
# reference of the available settings. Those are only supported if the
# remote Docker daemon uses the lxc execution driver.
self.lxc_conf = config.get('lxc_conf', {})
# Work directory for the container
self.workdir = config.get('workdir')
# Reformat port structure
ports = collections.defaultdict(list) if self.ports else None
if ports is not None:
for port in self.ports.values():
ports[port['exposed']].append(
(port['external'][0], port['external'][1].split('/')[0]))
# Security options
self.security_opt = config.get('security_opt')
# Ulimits options
self.ulimits = self._parse_ulimits(config.get('ulimits', None))
# host_config now contains all settings previously passed in container
# start().
self.host_config = self._ship.backend.create_host_config(
log_config=self.log_config,
mem_limit=self.mem_limit,
memswap_limit=self.memswap_limit,
binds=self.volumes,
port_bindings=ports,
lxc_conf=self.lxc_conf,
privileged=self.privileged,
cap_add=self.cap_add,
cap_drop=self.cap_drop,
extra_hosts=self.extra_hosts,
network_mode=self.network_mode,
restart_policy=self.restart_policy,
dns=self.dns,
links=self.links,
ulimits=self.ulimits,
volumes_from=list(self.volumes_from),
security_opt=self.security_opt)
# With everything defined, build lifecycle state helpers as configured
lifecycle = dict(self.service.lifecycle)
for state, checks in config.get('lifecycle', {}).items():
if state not in lifecycle:
lifecycle[state] = []
lifecycle[state].extend(checks)
self._lifecycle = self._parse_lifecycle(lifecycle)
@property
def ship(self):
"""Returns the Ship this container runs on."""
return self._ship
@property
def service(self):
"""Returns the Service this container is an instance of."""
return self._service
@property
def id(self):
"""Returns the ID of this container given by the Docker daemon, or None
if the container doesn't exist."""
status = self.status()
return status and status.get('ID', status.get('Id', None))
@property
def shortid(self):
"""Returns a short representation of this container's ID, or '-' if the
container is not running."""
return self.id[:7] if self.id else '-'
def is_running(self):
"""Refreshes the status of this container and tells if it's running or
not."""
status = self.status(refresh=True)
return status and status['State']['Running']
@property
def image(self):
"""Return the full name and tag of the image used by instances of this
service."""
return self._image
@property
def short_image(self):
"""Return the abbreviated name (stripped of its registry component,
when present) of the image used by this service."""
return self._image[self._image.find('/')+1:]
def get_image_details(self, image=None):
"""Return a dictionary detailing the image used by this service, with
its repository name and the requested tag (defaulting to latest if not
specified)."""
image = image or self._image
p = image.rsplit(':', 1)
if len(p) > 1 and '/' in p[1]:
p[0] = image
p.pop()
return {'repository': p[0], 'tag': len(p) > 1 and p[1] or 'latest'}
@property
def shortid_and_tag(self):
"""Returns a string representing the tag of the image this container
runs on and the short ID of the running container."""
status = self.status()
image = status and status['Config']['Image']
return '{}:{}'.format(self.get_image_details(image)['tag'], self.shortid)
@property
def started_at(self):
"""Returns the time at which the container was started."""
status = self.status()
return status and self._parse_go_time(status['State']['StartedAt'])
@property
def finished_at(self):
"""Returns the time at which the container finished executing."""
status = self.status()
return status and self._parse_go_time(status['State']['FinishedAt'])
@property
def hostname(self):
"""Returns a hostname for the container, or None if the container has a
defined network mode."""
return self.name if not self.network_mode else None
def status(self, refresh=False):
"""Retrieve the details about this container from the Docker daemon, or
None if the container doesn't exist."""
if refresh or not self._status:
try:
self._status = self.ship.backend.inspect_container(self.name)
except APIError:
pass
return self._status
def get_volumes(self):
"""Returns all the declared local volume targets within this container.
This does not includes volumes from other containers."""
volumes = set(self.container_volumes)
for volume in self.volumes.values():
volumes.add(volume['bind'])
return volumes
def get_link_variables(self, add_internal=False):
"""Build and return a dictionary of environment variables providing
linking information to this container.
Variables are named
'<service_name>_<container_name>_{HOST,PORT,INTERNAL_PORT}'.
"""
def _to_env_var_name(n):
return re.sub(r'[^\w]', '_', n).upper()
basename = _to_env_var_name(self.name)
port_number = lambda p: p.split('/')[0]
links = {'{}_HOST'.format(basename): self.ship.ip}
for name, spec in self.ports.items():
links['{}_{}_PORT'.format(basename, _to_env_var_name(name))] = \
port_number(spec['external'][1])
if add_internal:
links['{}_{}_INTERNAL_PORT'.format(
basename, _to_env_var_name(name))] = \
port_number(spec['exposed'])
return links
def start_lifecycle_checks(self, state):
"""Check if a particular lifecycle state has been reached by executing
all its defined checks. If not checks are defined, it is assumed the
state is reached immediately."""
if state not in self._lifecycle:
# Return None to indicate no checks were performed.
return None
# HACK: Workaround for Python bug #10015 (also #14881). Fixed in
# Python >= 2.7.5 and >= 3.3.2.
thread = threading.current_thread()
if not hasattr(thread, "_children"):
thread._children = weakref.WeakKeyDictionary()
pool = multiprocessing.pool.ThreadPool()
return pool.map_async(lambda check: check.test(self),
self._lifecycle[state])
def ping_port(self, port):
"""Ping a single port, by its given name in the port mappings. Returns
True if the port is opened and accepting connections, False
otherwise."""
parts = self.ports[port]['external'][1].split('/')
if parts[1] == 'udp':
return False
return lifecycle.TCPPortPinger(self.ship.ip, int(parts[0]), 1).test()
def _parse_bytes(self, s):
if not s or not isinstance(s, six.string_types):
return s
units = {'k': 1024,
'm': 1024*1024,
'g': 1024*1024*1024}
suffix = s[-1].lower()
if suffix not in units.keys():
if not s.isdigit():
raise exceptions.EnvironmentConfigurationException(
'Unknown unit suffix {} in {} for container {}!'
.format(suffix, s, self.name))
return int(s)
return int(s[:-1]) * units[suffix]
def _parse_restart_policy(self, spec):
"""Parse the restart policy configured for this container.
Args:
spec: the restart policy specification, as extract from the YAML.
It can be a string <name>:<max-retries>, or a dictionary with the
name and retries for the restart policy.
Returns: A Docker-ready dictionary representing the parsed restart
policy.
"""
def _make_policy(name='no', retries=0):
if name not in _VALID_RESTART_POLICIES:
raise exceptions.InvalidRestartPolicyConfigurationException(
'Invalid restart policy {} for container {}; choose one of {}.'
.format(name, self.name, ', '.join(_VALID_RESTART_POLICIES)))
return {'Name': name, 'MaximumRetryCount': int(retries)}
try:
if isinstance(spec, six.string_types):
return _make_policy(*spec.split(':', 1))
elif type(spec) == dict:
return _make_policy(**spec)
except exceptions.InvalidRestartPolicyConfigurationException as e:
raise
except:
raise exceptions.InvalidRestartPolicyConfigurationException(
'Invalid restart policy format for container {}: "{}"'
.format(self.name, spec))
# Fall-back to default
return _make_policy()
def _parse_volumes(self, volumes):
"""Parse the volume bindings defined by this container's configuration.
Args:
volumes (dict): the configured volume mappings as extracted from
the YAML file.
Returns: A dictionary of bindings host -> binding spec, where the
binding spec specifies the target inside the container and its mode
(read-only or read-write) in docker-py's format.
"""
result = {}
def _parse_spec(src, spec):
# Short path for obsolete schemas
# TODO(mpetazzoni): remove when obsoleted
if self._schema and self._schema.get('schema') == 1:
result[spec] = {'bind': src, 'ro': False}
return
if isinstance(spec, six.string_types):
result[src] = {'bind': spec}
elif type(spec) == dict and 'target' in spec:
result[src] = {'bind': spec['target'],
'mode': spec.get('mode', 'rw')}
else:
raise exceptions.InvalidVolumeConfigurationException(
'Invalid volume specification for container {}: {} -> {}'
.format(self.name, src, spec))
for src, spec in volumes.items():
_parse_spec(src, spec)
return result
def _parse_log_config(self, log_driver, log_opt):
""" Parse the log config found in the container's configuration.
Args:
log_driver (enum): Should be a valid value as defined by
docker/docker-py, e.g. json-file, syslog, none.
log_opt (dict): Should be a valid dictionary with additional log driver
settings. Values are not interpreted.
Returns: A dictionary that can be passed to to docker-py via the
host_config.LogConfig variable.
"""
if log_driver:
if log_driver not in LogConfig.types._values:
raise exceptions.InvalidLogConfigurationException(
"log_driver must be one of ({0})".format(
', '.join(LogConfig.types._values)
))
if log_opt and not type(log_opt) == dict:
raise exceptions.InvalidLogConfigurationException(
"log_opt must be a dictionary")
if log_opt:
log_opt = dict((k, str(v)) for k, v in log_opt.items())
return {"Type": log_driver, "Config": log_opt}
else:
return {"Type": log_driver}
return None
def _parse_go_time(self, s):
"""Parse a time string found in the container status into a Python
datetime object.
Docker uses Go's Time.String() method to convert a UTC timestamp into a
string, but that representation isn't directly parsable from Python as
it includes nanoseconds: http://golang.org/pkg/time/#Time.String
We don't really care about sub-second precision here anyway, so we
strip it out and parse the datetime up to the second.
Args:
s (string): the time string from the container inspection
dictionary.
Returns: The corresponding Python datetime.datetime object, or None if
the time string clearly represented a non-initialized time (which
seems to be 0001-01-01T00:00:00Z in Go).
"""
if not s:
return None
t = datetime.datetime.strptime(s[:-1].split('.')[0], '%Y-%m-%dT%H:%M:%S')
return t if t.year > 1 else None
def _parse_ports(self, ports):
"""Parse port mapping specifications for this container."""
def parse_port_spec(spec):
if type(spec) == int:
spec = str(spec)
m = _PORT_SPEC_REGEX.match(spec)
if not m:
raise exceptions.InvalidPortSpecException(
('Invalid port specification {}! ' +
'Expected format is <port>, <p1>-<p2> or <port>/{tcp,udp}.')
.format(spec))
s = m.group('p1')
if m.group('p2'):
s += '-' + m.group('p2')
proto = m.group('proto') or _DEFAULT_PORT_PROTOCOL
s += '/' + proto
return s
result = {}
for name, spec in ports.items():
# Single number, interpreted as being a TCP port number and to be
# the same for the exposed port and external port bound on all
# interfaces.
if type(spec) == int:
result[name] = {
'exposed': parse_port_spec(spec),
'external': ('0.0.0.0', parse_port_spec(spec)),
}
# Port spec is a string. This means either a protocol was specified
# with /tcp or /udp, that a port range was specified, or that a
# mapping was provided, with each side of the mapping optionally
# specifying the protocol.
# External port is assumed to be bound on all interfaces as well.
elif type(spec) == str:
parts = list(map(parse_port_spec, spec.split(':')))
if len(parts) == 1:
# If only one port number is provided, assumed external =
# exposed.
parts.append(parts[0])
elif len(parts) > 2:
raise exceptions.InvalidPortSpecException(
('Invalid port spec {} for port {} of {}! ' +
'Format should be "name: external:exposed".').format(
spec, name, self))
if parts[0][-4:] != parts[1][-4:]:
raise exceptions.InvalidPortSpecException(
'Mismatched protocols between {} and {}!'.format(
parts[0], parts[1]))
result[name] = {
'exposed': parts[0],
'external': ('0.0.0.0', parts[1]),
}
# Port spec is fully specified.
elif type(spec) == dict and \
'exposed' in spec and 'external' in spec:
spec['exposed'] = parse_port_spec(spec['exposed'])
if type(spec['external']) != list:
spec['external'] = ('0.0.0.0', spec['external'])
spec['external'] = (spec['external'][0],
parse_port_spec(spec['external'][1]))
result[name] = spec
else:
raise exceptions.InvalidPortSpecException(
'Invalid port spec {} for port {} of {}!'.format(
spec, name, self))
return result
def _parse_lifecycle(self, lifecycles):
"""Parse the lifecycle checks configured for this container and
instantiate the corresponding check helpers, as configured."""
checkers = {}
for state, checks in lifecycles.items():
if not type(checks) == list:
raise exceptions.InvalidLifecycleCheckConfigurationException(
('Invalid {} lifecycle checks configuration; '
'expected list of checks, got {}!')
.format(state, type(checks)))
checkers[state] = list(
map(lambda c: (lifecycle.LifecycleHelperFactory
.from_config(self, c)), checks))
return checkers
def _parse_ulimits(self, ulimits):
"""Parse ulimits"""
if ulimits is None:
return None
result = []
for name, value in ulimits.items():
ulimit = {'name': name}
if isinstance(value, dict):
ulimit.update(value)
elif isinstance(value, int):
ulimit.update({'hard': value, 'soft': value})
else:
continue
result.append(ulimit)
return result
def __repr__(self):
return '{} (on {})'.format(self.name, self.ship.name)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
| |
# Copyright 2012 Mozilla Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, platform, os, shutil, sys, subprocess, tempfile, threading
import time, urllib, urllib2, hashlib, re, base64, uuid, socket, errno
import traceback
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from optparse import OptionParser
from urlparse import urlparse, parse_qs
from threading import Lock
USAGE_EXAMPLE = "%prog"
# The local web server uses the git repo as the document root.
DOC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
GIT_CLONE_CHECK = True
DEFAULT_MANIFEST_FILE = 'test_manifest.json'
EQLOG_FILE = 'eq.log'
BROWSERLOG_FILE = 'browser.log'
REFDIR = 'ref'
TEST_SNAPSHOTS = 'test_snapshots'
TMPDIR = 'tmp'
VERBOSE = False
BROWSER_TIMEOUT = 120
SERVER_HOST = "localhost"
lock = Lock()
class TestOptions(OptionParser):
def __init__(self, **kwargs):
OptionParser.__init__(self, **kwargs)
self.add_option("-m", "--masterMode", action="store_true", dest="masterMode",
help="Run the script in master mode.", default=False)
self.add_option("--noPrompts", action="store_true", dest="noPrompts",
help="Uses default answers (intended for CLOUD TESTS only!).", default=False)
self.add_option("--manifestFile", action="store", type="string", dest="manifestFile",
help="A JSON file in the form of test_manifest.json (the default).")
self.add_option("-b", "--browser", action="store", type="string", dest="browser",
help="The path to a single browser (right now, only Firefox is supported).")
self.add_option("--browserManifestFile", action="store", type="string",
dest="browserManifestFile",
help="A JSON file in the form of those found in resources/browser_manifests")
self.add_option("--reftest", action="store_true", dest="reftest",
help="Automatically start reftest showing comparison test failures, if there are any.",
default=False)
self.add_option("--port", action="store", dest="port", type="int",
help="The port the HTTP server should listen on.", default=8080)
self.add_option("--unitTest", action="store_true", dest="unitTest",
help="Run the unit tests.", default=False)
self.add_option("--fontTest", action="store_true", dest="fontTest",
help="Run the font tests.", default=False)
self.add_option("--noDownload", action="store_true", dest="noDownload",
help="Skips test PDFs downloading.", default=False)
self.add_option("--statsFile", action="store", dest="statsFile", type="string",
help="The file where to store stats.", default=None)
self.add_option("--statsDelay", action="store", dest="statsDelay", type="int",
help="The amount of time in milliseconds the browser should wait before starting stats.", default=10000)
self.set_usage(USAGE_EXAMPLE)
def verifyOptions(self, options):
if options.reftest and (options.unitTest or options.fontTest):
self.error("--reftest and --unitTest/--fontTest must not be specified at the same time.")
if options.masterMode and options.manifestFile:
self.error("--masterMode and --manifestFile must not be specified at the same time.")
if not options.manifestFile:
options.manifestFile = DEFAULT_MANIFEST_FILE
if options.browser and options.browserManifestFile:
print "Warning: ignoring browser argument since manifest file was also supplied"
if not options.browser and not options.browserManifestFile:
print "Starting server on port %s." % options.port
if not options.statsFile:
options.statsDelay = 0
return options
def prompt(question):
'''Return True iff the user answered "yes" to |question|.'''
inp = raw_input(question +' [yes/no] > ')
return inp == 'yes'
MIMEs = {
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/javascript',
'.json': 'application/json',
'.svg': 'image/svg+xml',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.gif': 'image/gif',
'.ico': 'image/x-icon',
'.png': 'image/png',
'.log': 'text/plain',
'.properties': 'text/plain'
}
class State:
browsers = [ ]
manifest = { }
taskResults = { }
remaining = { }
results = { }
done = False
numErrors = 0
numEqFailures = 0
numEqNoSnapshot = 0
numFBFFailures = 0
numLoadFailures = 0
eqLog = None
saveStats = False
stats = [ ]
lastPost = { }
class UnitTestState:
browsers = [ ]
browsersRunning = 0
lastPost = { }
numErrors = 0
numRun = 0
class Result:
def __init__(self, snapshot, failure, page):
self.snapshot = snapshot
self.failure = failure
self.page = page
class TestServer(ThreadingMixIn, HTTPServer):
pass
class TestHandlerBase(BaseHTTPRequestHandler):
# Disable annoying noise by default
def log_request(code=0, size=0):
if VERBOSE:
BaseHTTPRequestHandler.log_request(code, size)
def handle_one_request(self):
try:
BaseHTTPRequestHandler.handle_one_request(self)
except socket.error, v:
if v[0] == errno.ECONNRESET:
# Ignoring connection reset by peer exceptions
if VERBOSE:
print 'Detected connection reset'
elif v[0] == errno.EPIPE:
if VERBOSE:
print 'Detected remote peer disconnected'
elif v[0] == 10053:
if VERBOSE:
print 'An established connection was aborted by the' \
' software in your host machine'
else:
raise
def finish(self,*args,**kw):
# From http://stackoverflow.com/a/14355079/1834797
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def sendFile(self, path, ext):
self.send_response(200)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", os.path.getsize(path))
self.end_headers()
with open(path, "rb") as f:
self.wfile.write(f.read())
def sendFileRange(self, path, ext, start, end):
file_len = os.path.getsize(path)
if (end is None) or (file_len < end):
end = file_len
if (file_len < start) or (end <= start):
self.send_error(416)
return
chunk_len = end - start
time.sleep(chunk_len / 1000000.0)
self.send_response(206)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", chunk_len)
self.send_header("Content-Range", 'bytes ' + str(start) + '-' + str(end - 1) + '/' + str(file_len))
self.end_headers()
with open(path, "rb") as f:
f.seek(start)
self.wfile.write(f.read(chunk_len))
def do_GET(self):
url = urlparse(self.path)
# Ignore query string
path, _ = urllib.unquote_plus(url.path), url.query
path = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
prefix = os.path.commonprefix(( path, DOC_ROOT ))
_, ext = os.path.splitext(path.lower())
if url.path == "/favicon.ico":
self.sendFile(os.path.join(DOC_ROOT, "test", "resources", "favicon.ico"), ext)
return
if os.path.isdir(path):
self.sendIndex(url.path, url.query)
return
pieces = path.split(os.sep);
if pieces[len(pieces) - 2] == 'cmaps':
self.sendFile(path, '.properties');
return
if not (prefix == DOC_ROOT
and os.path.isfile(path)
and ext in MIMEs):
print path
self.send_error(404)
return
if 'Range' in self.headers:
range_re = re.compile(r"^bytes=(\d+)\-(\d+)?")
parsed_range = range_re.search(self.headers.getheader("Range"))
if parsed_range is None:
self.send_error(501)
return
if VERBOSE:
print 'Range requested %s - %s: %s' % (
parsed_range.group(1), parsed_range.group(2))
start = int(parsed_range.group(1))
if parsed_range.group(2) is None:
self.sendFileRange(path, ext, start, None)
else:
end = int(parsed_range.group(2)) + 1
self.sendFileRange(path, ext, start, end)
return
self.sendFile(path, ext)
class UnitTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
print "send index"
def translateFont(self, base64Data):
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.end_headers()
data = base64.b64decode(base64Data)
taskId = str(uuid.uuid4())
fontPath = 'ttx/' + taskId + '.otf'
resultPath = 'ttx/' + taskId + '.ttx'
with open(fontPath, "wb") as f:
f.write(data)
# When fontTools used directly, we need to snif ttx file
# to check what version of python is used
ttxPath = ''
for path in os.environ["PATH"].split(os.pathsep):
if os.path.isfile(path + os.sep + "ttx"):
ttxPath = path + os.sep + "ttx"
break
if ttxPath == '':
self.wfile.write("<error>TTX was not found</error>")
return
ttxRunner = ''
with open(ttxPath, "r") as f:
firstLine = f.readline()
if firstLine[:2] == '#!' and firstLine.find('python') > -1:
ttxRunner = firstLine[2:].strip()
with open(os.devnull, "w") as fnull:
if ttxRunner != '':
result = subprocess.call([ttxRunner, ttxPath, fontPath], stdout = fnull)
else:
result = subprocess.call([ttxPath, fontPath], stdout = fnull)
os.remove(fontPath)
if not os.path.isfile(resultPath):
self.wfile.write("<error>Output was not generated</error>")
return
with open(resultPath, "rb") as f:
self.wfile.write(f.read())
os.remove(resultPath)
return
def do_POST(self):
with lock:
url = urlparse(self.path)
numBytes = int(self.headers['Content-Length'])
content = self.rfile.read(numBytes)
# Process special utility requests
if url.path == '/ttx':
self.translateFont(content)
return
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
result = json.loads(content)
browser = result['browser']
UnitTestState.lastPost[browser] = int(time.time())
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
UnitTestState.browsersRunning -= 1
UnitTestState.lastPost[browser] = None
return
elif url.path == '/info':
print result['message']
elif url.path == '/submit_task_results':
status, description = result['status'], result['description']
UnitTestState.numRun += 1
if status == 'TEST-UNEXPECTED-FAIL':
UnitTestState.numErrors += 1
message = status + ' | ' + description + ' | in ' + browser
if 'error' in result:
message += ' | ' + result['error']
print message
else:
print 'Error: uknown action' + url.path
class PDFTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
if not path.endswith("/"):
# we need trailing slash
self.send_response(301)
redirectLocation = path + "/"
if query:
redirectLocation += "?" + query
self.send_header("Location", redirectLocation)
self.end_headers()
return
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
if query == "frame":
self.wfile.write("<html><frameset cols=*,200><frame name=pdf>" +
"<frame src='" + path + "'></frameset></html>")
return
location = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
self.wfile.write("<html><body><h1>PDFs of " + path + "</h1>\n")
for filename in os.listdir(location):
if filename.lower().endswith('.pdf'):
self.wfile.write("<a href='/web/viewer.html?file=" +
urllib.quote_plus(path + filename, '/') + "' target=pdf>" +
filename + "</a><br>\n")
self.wfile.write("</body></html>")
def do_POST(self):
with lock:
numBytes = int(self.headers['Content-Length'])
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
url = urlparse(self.path)
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
return
result = json.loads(self.rfile.read(numBytes))
browser = result['browser']
State.lastPost[browser] = int(time.time())
if url.path == "/info":
print result['message']
return
id = result['id']
failure = result['failure']
round = result['round']
page = result['page']
snapshot = result['snapshot']
taskResults = State.taskResults[browser][id]
taskResults[round].append(Result(snapshot, failure, page))
if State.saveStats:
stat = {
'browser': browser,
'pdf': id,
'page': page,
'round': round,
'stats': result['stats']
}
State.stats.append(stat)
def isTaskDone():
last_page_num = result['lastPageNum']
rounds = State.manifest[id]['rounds']
for round in range(0,rounds):
if not taskResults[round]:
return False
latest_page = taskResults[round][-1]
if not latest_page.page == last_page_num:
return False
return True
if isTaskDone():
# sort the results since they sometimes come in out of order
for results in taskResults:
results.sort(key=lambda result: result.page)
check(State.manifest[id], taskResults, browser,
self.server.masterMode)
# Please oh please GC this ...
del State.taskResults[browser][id]
State.remaining[browser] -= 1
checkIfDone()
def checkIfDone():
State.done = True
for key in State.remaining:
if State.remaining[key] != 0:
State.done = False
return
# Applescript hack to quit Chrome on Mac
def tellAppToQuit(path, query):
if platform.system() != "Darwin":
return
d = parse_qs(query)
path = d['path'][0]
cmd = """osascript<<END
tell application "%s"
quit
end tell
END""" % path
os.system(cmd)
class BaseBrowserCommand(object):
def __init__(self, browserRecord):
self.name = browserRecord["name"]
self.path = browserRecord["path"]
self.tempDir = None
self.process = None
if platform.system() == "Darwin" and (self.path.endswith(".app") or self.path.endswith(".app/")):
self._fixupMacPath()
if not os.path.exists(self.path):
raise Exception("Path to browser '%s' does not exist." % self.path)
def setup(self):
self.tempDir = tempfile.mkdtemp()
self.profileDir = os.path.join(self.tempDir, "profile")
self.browserLog = open(BROWSERLOG_FILE, "w")
def teardown(self):
self.process.terminate()
# If the browser is still running, wait up to ten seconds for it to quit
if self.process and self.process.poll() is None:
checks = 0
while self.process.poll() is None and checks < 20:
checks += 1
time.sleep(.5)
# If it's still not dead, try to kill it
if self.process.poll() is None:
print "Process %s is still running. Killing." % self.name
self.process.kill()
self.process.wait()
if self.tempDir is not None and os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
self.browserLog.close()
def start(self, url):
raise Exception("Can't start BaseBrowserCommand")
class FirefoxBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "firefox-bin")
def setup(self):
super(FirefoxBrowserCommand, self).setup()
shutil.copytree(os.path.join(DOC_ROOT, "test", "resources", "firefox"),
self.profileDir)
def start(self, url):
cmds = [self.path]
if platform.system() == "Darwin":
cmds.append("-foreground")
cmds.extend(["-no-remote", "-profile", self.profileDir, url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
class ChromeBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "Google Chrome")
def start(self, url):
cmds = [self.path]
cmds.extend(["--user-data-dir=%s" % self.profileDir,
"--no-first-run", "--disable-sync", url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
def makeBrowserCommand(browser):
path = browser["path"].lower()
name = browser["name"]
if name is not None:
name = name.lower()
types = {"firefox": FirefoxBrowserCommand,
"chrome": ChromeBrowserCommand }
command = None
for key in types.keys():
if (name and name.find(key) > -1) or path.find(key) > -1:
command = types[key](browser)
command.name = command.name or key
break
if command is None:
raise Exception("Unrecognized browser: %s" % browser)
return command
def makeBrowserCommands(browserManifestFile):
with open(browserManifestFile) as bmf:
browsers = [makeBrowserCommand(browser) for browser in json.load(bmf)]
return browsers
def downloadLinkedPDF(f):
linkFile = open(f +'.link')
link = linkFile.read()
linkFile.close()
sys.stdout.write('Downloading '+ link +' to '+ f +' ...')
sys.stdout.flush()
response = urllib2.urlopen(link)
with open(f, 'wb') as out:
out.write(response.read())
print 'done'
def downloadLinkedPDFs(manifestList):
for item in manifestList:
f, isLink = item['file'], item.get('link', False)
if isLink and not os.access(f, os.R_OK):
try:
downloadLinkedPDF(f)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print 'ERROR: Unable to download file "' + f + '".'
open(f, 'wb').close()
with open(f + '.error', 'w') as out:
out.write('\n'.join(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
def verifyPDFs(manifestList):
error = False
for item in manifestList:
f = item['file']
if os.path.isfile(f + '.error'):
print 'WARNING: File was not downloaded. See "' + f + '.error" file.'
error = True
elif os.access(f, os.R_OK):
fileMd5 = hashlib.md5(open(f, 'rb').read()).hexdigest()
if 'md5' not in item:
print 'WARNING: Missing md5 for file "' + f + '".',
print 'Hash for current file is "' + fileMd5 + '"'
error = True
continue
md5 = item['md5']
if fileMd5 != md5:
print 'WARNING: MD5 of file "' + f + '" does not match file.',
print 'Expected "' + md5 + '" computed "' + fileMd5 + '"'
error = True
continue
else:
print 'WARNING: Unable to open file for reading "' + f + '".'
error = True
return not error
def getTestBrowsers(options):
testBrowsers = []
if options.browserManifestFile:
testBrowsers = makeBrowserCommands(options.browserManifestFile)
elif options.browser:
testBrowsers = [makeBrowserCommand({"path":options.browser, "name":None})]
if options.browserManifestFile or options.browser:
assert len(testBrowsers) > 0
return testBrowsers
def setUp(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
if options.masterMode and os.path.isdir(TMPDIR):
print 'Temporary snapshot dir tmp/ is still around.'
print 'tmp/ can be removed if it has nothing you need.'
if options.noPrompts or prompt('SHOULD THIS SCRIPT REMOVE tmp/? THINK CAREFULLY'):
subprocess.call(( 'rm', '-rf', 'tmp' ))
assert not os.path.isdir(TMPDIR)
testBrowsers = getTestBrowsers(options)
with open(options.manifestFile) as mf:
manifestList = json.load(mf)
if not options.noDownload:
downloadLinkedPDFs(manifestList)
if not verifyPDFs(manifestList):
print 'Unable to verify the checksum for the files that are used for testing.'
print 'Please re-download the files, or adjust the MD5 checksum in the manifest for the files listed above.\n'
for b in testBrowsers:
State.taskResults[b.name] = { }
State.remaining[b.name] = len(manifestList)
State.lastPost[b.name] = int(time.time())
for item in manifestList:
id, rounds = item['id'], int(item['rounds'])
State.manifest[id] = item
taskResults = [ ]
for r in xrange(rounds):
taskResults.append([ ])
State.taskResults[b.name][id] = taskResults
if options.statsFile != None:
State.saveStats = True
return testBrowsers
def setUpUnitTests(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
testBrowsers = getTestBrowsers(options)
UnitTestState.browsersRunning = len(testBrowsers)
for b in testBrowsers:
UnitTestState.lastPost[b.name] = int(time.time())
return testBrowsers
def startBrowsers(browsers, options, path):
for b in browsers:
b.setup()
print 'Launching', b.name
host = 'http://%s:%s' % (SERVER_HOST, options.port)
qs = '?browser='+ urllib.quote(b.name) +'&manifestFile='+ urllib.quote(options.manifestFile)
qs += '&path=' + b.path
qs += '&delay=' + str(options.statsDelay)
qs += '&masterMode=' + str(options.masterMode)
b.start(host + path + qs)
def teardownBrowsers(browsers):
for b in browsers:
try:
b.teardown()
except:
print "Error cleaning up after browser at ", b.path
print "Temp dir was ", b.tempDir
print "Error:", sys.exc_info()[0]
def check(task, results, browser, masterMode):
failed = False
for r in xrange(len(results)):
pageResults = results[r]
for p in xrange(len(pageResults)):
pageResult = pageResults[p]
if pageResult is None:
continue
failure = pageResult.failure
if failure:
failed = True
if os.path.isfile(task['file'] + '.error'):
print 'TEST-SKIPPED | PDF was not downloaded', task['id'], '| in', browser, '| page', p + 1, 'round', r, '|', failure
else:
State.numErrors += 1
print 'TEST-UNEXPECTED-FAIL | test failed', task['id'], '| in', browser, '| page', p + 1, 'round', r, '|', failure
if failed:
return
kind = task['type']
if 'eq' == kind or 'text' == kind:
checkEq(task, results, browser, masterMode)
elif 'fbf' == kind:
checkFBF(task, results, browser)
elif 'load' == kind:
checkLoad(task, results, browser)
else:
assert 0 and 'Unknown test type'
def createDir(dir):
try:
os.makedirs(dir)
except OSError, e:
if e.errno != 17: # file exists
print >>sys.stderr, 'Creating', dir, 'failed!'
def readDataUri(data):
metadata, encoded = data.rsplit(",", 1)
return base64.b64decode(encoded)
def checkEq(task, results, browser, masterMode):
pfx = os.path.join(REFDIR, sys.platform, browser, task['id'])
testSnapshotDir = os.path.join(TEST_SNAPSHOTS, sys.platform, browser, task['id'])
results = results[0]
taskId = task['id']
taskType = task['type']
passed = True
for result in results:
page = result.page
snapshot = readDataUri(result.snapshot)
ref = None
eq = True
path = os.path.join(pfx, str(page) + '.png')
if not os.access(path, os.R_OK):
State.numEqNoSnapshot += 1
if not masterMode:
print 'WARNING: no reference snapshot', path
else:
f = open(path, 'rb')
ref = f.read()
f.close()
eq = (ref == snapshot)
if not eq:
print 'TEST-UNEXPECTED-FAIL |', taskType, taskId, '| in', browser, '| rendering of page', page, '!= reference rendering'
if not State.eqLog:
State.eqLog = open(EQLOG_FILE, 'w')
eqLog = State.eqLog
createDir(testSnapshotDir)
testSnapshotPath = os.path.join(testSnapshotDir, str(page) + '.png')
handle = open(testSnapshotPath, 'wb')
handle.write(snapshot)
handle.close()
refSnapshotPath = os.path.join(testSnapshotDir, str(page) + '_ref.png')
handle = open(refSnapshotPath, 'wb')
handle.write(ref)
handle.close()
# NB: this follows the format of Mozilla reftest
# output so that we can reuse its reftest-analyzer
# script
eqLog.write('REFTEST TEST-UNEXPECTED-FAIL | ' + browser +'-'+ taskId +'-page'+ str(page) + ' | image comparison (==)\n')
eqLog.write('REFTEST IMAGE 1 (TEST): ' + testSnapshotPath + '\n')
eqLog.write('REFTEST IMAGE 2 (REFERENCE): ' + refSnapshotPath + '\n')
passed = False
State.numEqFailures += 1
if masterMode and (ref is None or not eq):
tmpTaskDir = os.path.join(TMPDIR, sys.platform, browser, task['id'])
createDir(tmpTaskDir)
handle = open(os.path.join(tmpTaskDir, str(page)) + '.png', 'wb')
handle.write(snapshot)
handle.close()
if passed:
print 'TEST-PASS |', taskType, 'test', task['id'], '| in', browser
def checkFBF(task, results, browser):
round0, round1 = results[0], results[1]
assert len(round0) == len(round1)
passed = True
for page in xrange(len(round1)):
r0Page, r1Page = round0[page], round1[page]
if r0Page is None:
break
if r0Page.snapshot != r1Page.snapshot:
print 'TEST-UNEXPECTED-FAIL | forward-back-forward test', task['id'], '| in', browser, '| first rendering of page', page + 1, '!= second'
passed = False
State.numFBFFailures += 1
if passed:
print 'TEST-PASS | forward-back-forward test', task['id'], '| in', browser
def checkLoad(task, results, browser):
# Load just checks for absence of failure, so if we got here the
# test has passed
print 'TEST-PASS | load test', task['id'], '| in', browser
def processResults(options):
print ''
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 == State.numEqFailures and 0 == numFatalFailures:
print 'All regression tests passed.'
else:
print 'OHNOES! Some tests failed!'
if 0 < State.numErrors:
print ' errors:', State.numErrors
if 0 < State.numEqFailures:
print ' different ref/snapshot:', State.numEqFailures
if 0 < State.numFBFFailures:
print ' different first/second rendering:', State.numFBFFailures
if options.statsFile != None:
with open(options.statsFile, 'w') as sf:
sf.write(json.dumps(State.stats, sort_keys=True, indent=4))
print 'Wrote stats file: ' + options.statsFile
def maybeUpdateRefImages(options, browser):
if options.masterMode and (0 < State.numEqFailures or 0 < State.numEqNoSnapshot):
print "Some eq tests failed or didn't have snapshots."
print 'Checking to see if master references can be updated...'
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 < numFatalFailures:
print ' No. Some non-eq tests failed.'
else:
print ' Yes! The references in tmp/ can be synced with ref/.'
if options.reftest:
startReftest(browser, options)
if options.noPrompts or prompt('Would you like to update the master copy in ref/?'):
sys.stdout.write(' Updating ref/ ... ')
if not os.path.exists('ref'):
subprocess.check_call('mkdir ref', shell = True)
subprocess.check_call('cp -Rf tmp/* ref/', shell = True)
print 'done'
else:
print ' OK, not updating.'
def startReftest(browser, options):
url = "http://%s:%s" % (SERVER_HOST, options.port)
url += "/test/resources/reftest-analyzer.html"
url += "#web=/test/eq.log"
try:
browser.setup()
browser.start(url)
print "Waiting for browser..."
browser.process.wait()
finally:
teardownBrowsers([browser])
print "Completed reftest usage."
def runTests(options, browsers):
try:
shutil.rmtree(TEST_SNAPSHOTS);
except OSError, e:
if e.errno != 2: # folder doesn't exist
print >>sys.stderr, 'Deleting', dir, 'failed!'
t1 = time.time()
try:
startBrowsers(browsers, options, '/test/test_slave.html')
while not State.done:
for b in State.lastPost:
if State.remaining[b] > 0 and int(time.time()) - State.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
State.numErrors += State.remaining[b]
State.remaining[b] = 0
checkIfDone()
time.sleep(1)
processResults(options)
finally:
teardownBrowsers(browsers)
t2 = time.time()
print "Runtime was", int(t2 - t1), "seconds"
if State.eqLog:
State.eqLog.close();
if options.masterMode:
maybeUpdateRefImages(options, browsers[0])
elif options.reftest and State.numEqFailures > 0:
print "\nStarting reftest harness to examine %d eq test failures." % State.numEqFailures
startReftest(browsers[0], options)
def runUnitTests(options, browsers, url, name):
t1 = time.time()
try:
startBrowsers(browsers, options, url)
while UnitTestState.browsersRunning > 0:
for b in UnitTestState.lastPost:
if UnitTestState.lastPost[b] != None and int(time.time()) - UnitTestState.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
UnitTestState.lastPost[b] = None
UnitTestState.browsersRunning -= 1
UnitTestState.numErrors += 1
time.sleep(1)
print ''
print 'Ran', UnitTestState.numRun, 'tests'
if UnitTestState.numErrors > 0:
print 'OHNOES! Some', name, 'tests failed!'
print ' ', UnitTestState.numErrors, 'of', UnitTestState.numRun, 'failed'
else:
print 'All', name, 'tests passed.'
finally:
teardownBrowsers(browsers)
t2 = time.time()
print '', name, 'tests runtime was', int(t2 - t1), 'seconds'
def main():
optionParser = TestOptions()
options, args = optionParser.parse_args()
options = optionParser.verifyOptions(options)
if options == None:
sys.exit(1)
if options.unitTest or options.fontTest:
httpd = TestServer((SERVER_HOST, options.port), UnitTestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUpUnitTests(options)
if len(browsers) > 0:
if options.unitTest:
runUnitTests(options, browsers, '/test/unit/unit_test.html', 'unit')
if options.fontTest:
runUnitTests(options, browsers, '/test/font/font_test.html', 'font')
else:
httpd = TestServer((SERVER_HOST, options.port), PDFTestHandler)
httpd.masterMode = options.masterMode
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUp(options)
if len(browsers) > 0:
runTests(options, browsers)
else:
# just run the server
print "Running HTTP server. Press Ctrl-C to quit."
try:
while True:
time.sleep(1)
except (KeyboardInterrupt):
print "\nExiting."
if __name__ == '__main__':
main()
| |
# Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import flags
import numpy as np
import tensorflow as tf
from astronet.ops import dataset_ops
from astronet.util import configdict
FLAGS = flags.FLAGS
flags.DEFINE_string("test_srcdir", "", "Test source directory.")
_TEST_TFRECORD_FILE = "astronet/ops/test_data/test_dataset.tfrecord"
class DatasetOpsTest(tf.test.TestCase):
def testPadTensorToBatchSize(self):
with self.test_session():
# Cannot pad a 0-dimensional Tensor.
tensor_0d = tf.constant(1)
with self.assertRaises(ValueError):
dataset_ops.pad_tensor_to_batch_size(tensor_0d, 10)
# 1-dimensional Tensor. Un-padded batch size is 5.
tensor_1d = tf.range(5, dtype=tf.int32)
self.assertEqual([5], tensor_1d.shape)
self.assertAllEqual([0, 1, 2, 3, 4], tensor_1d.eval())
# Invalid to pad Tensor with batch size 5 to batch size 3.
tensor_1d_pad3 = dataset_ops.pad_tensor_to_batch_size(tensor_1d, 3)
with self.assertRaises(tf.errors.InvalidArgumentError):
tensor_1d_pad3.eval()
tensor_1d_pad5 = dataset_ops.pad_tensor_to_batch_size(tensor_1d, 5)
self.assertEqual([5], tensor_1d_pad5.shape)
self.assertAllEqual([0, 1, 2, 3, 4], tensor_1d_pad5.eval())
tensor_1d_pad8 = dataset_ops.pad_tensor_to_batch_size(tensor_1d, 8)
self.assertEqual([8], tensor_1d_pad8.shape)
self.assertAllEqual([0, 1, 2, 3, 4, 0, 0, 0], tensor_1d_pad8.eval())
# 2-dimensional Tensor. Un-padded batch size is 3.
tensor_2d = tf.reshape(tf.range(9, dtype=tf.int32), [3, 3])
self.assertEqual([3, 3], tensor_2d.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], tensor_2d.eval())
tensor_2d_pad2 = dataset_ops.pad_tensor_to_batch_size(tensor_2d, 2)
# Invalid to pad Tensor with batch size 2 to batch size 2.
with self.assertRaises(tf.errors.InvalidArgumentError):
tensor_2d_pad2.eval()
tensor_2d_pad3 = dataset_ops.pad_tensor_to_batch_size(tensor_2d, 3)
self.assertEqual([3, 3], tensor_2d_pad3.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]],
tensor_2d_pad3.eval())
tensor_2d_pad4 = dataset_ops.pad_tensor_to_batch_size(tensor_2d, 4)
self.assertEqual([4, 3], tensor_2d_pad4.shape)
self.assertAllEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 0, 0]],
tensor_2d_pad4.eval())
def testPadDatasetToBatchSizeNoWeights(self):
values = {"labels": np.arange(10, dtype=np.int32)}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(4)
self.assertItemsEqual(["labels"], dataset.output_shapes.keys())
self.assertFalse(dataset.output_shapes["labels"].is_fully_defined())
dataset_pad = dataset_ops.pad_dataset_to_batch_size(dataset, 4)
self.assertItemsEqual(["labels", "weights"],
dataset_pad.output_shapes.keys())
self.assertEqual([4], dataset_pad.output_shapes["labels"])
self.assertEqual([4], dataset_pad.output_shapes["weights"])
next_batch = dataset_pad.make_one_shot_iterator().get_next()
next_labels = next_batch["labels"]
next_weights = next_batch["weights"]
with self.test_session() as sess:
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([0, 1, 2, 3], labels)
self.assertAllClose([1, 1, 1, 1], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([4, 5, 6, 7], labels)
self.assertAllClose([1, 1, 1, 1], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([8, 9, 0, 0], labels)
self.assertAllClose([1, 1, 0, 0], weights)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([next_labels, next_weights])
def testPadDatasetToBatchSizeWithWeights(self):
values = {
"labels": np.arange(10, dtype=np.int32),
"weights": 100 + np.arange(10, dtype=np.int32)
}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(4)
self.assertItemsEqual(["labels", "weights"], dataset.output_shapes.keys())
self.assertFalse(dataset.output_shapes["labels"].is_fully_defined())
self.assertFalse(dataset.output_shapes["weights"].is_fully_defined())
dataset_pad = dataset_ops.pad_dataset_to_batch_size(dataset, 4)
self.assertItemsEqual(["labels", "weights"],
dataset_pad.output_shapes.keys())
self.assertEqual([4], dataset_pad.output_shapes["labels"])
self.assertEqual([4], dataset_pad.output_shapes["weights"])
next_batch = dataset_pad.make_one_shot_iterator().get_next()
next_labels = next_batch["labels"]
next_weights = next_batch["weights"]
with self.test_session() as sess:
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([0, 1, 2, 3], labels)
self.assertAllEqual([100, 101, 102, 103], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([4, 5, 6, 7], labels)
self.assertAllEqual([104, 105, 106, 107], weights)
labels, weights = sess.run([next_labels, next_weights])
self.assertAllEqual([8, 9, 0, 0], labels)
self.assertAllEqual([108, 109, 0, 0], weights)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run([next_labels, next_weights])
def testSetBatchSizeSingleTensor1d(self):
dataset = tf.data.Dataset.range(4).batch(2)
self.assertFalse(dataset.output_shapes.is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertEqual([2], dataset.output_shapes)
next_batch = dataset.make_one_shot_iterator().get_next()
with self.test_session() as sess:
batch_value = sess.run(next_batch)
self.assertAllEqual([0, 1], batch_value)
batch_value = sess.run(next_batch)
self.assertAllEqual([2, 3], batch_value)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
def testSetBatchSizeSingleTensor2d(self):
values = np.arange(12, dtype=np.int32).reshape([4, 3])
dataset = tf.data.Dataset.from_tensor_slices(values).batch(2)
self.assertFalse(dataset.output_shapes.is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertEqual([2, 3], dataset.output_shapes)
next_batch = dataset.make_one_shot_iterator().get_next()
with self.test_session() as sess:
batch_value = sess.run(next_batch)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], batch_value)
batch_value = sess.run(next_batch)
self.assertAllEqual([[6, 7, 8], [9, 10, 11]], batch_value)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
def testSetBatchSizeNested(self):
values = {
"a": 100 + np.arange(4, dtype=np.int32),
"nest": {
"b": np.arange(12, dtype=np.int32).reshape([4, 3]),
"c": np.arange(4, dtype=np.int32)
}
}
dataset = tf.data.Dataset.from_tensor_slices(values).batch(2)
self.assertItemsEqual(["a", "nest"], dataset.output_shapes.keys())
self.assertItemsEqual(["b", "c"], dataset.output_shapes["nest"].keys())
self.assertFalse(dataset.output_shapes["a"].is_fully_defined())
self.assertFalse(dataset.output_shapes["nest"]["b"].is_fully_defined())
self.assertFalse(dataset.output_shapes["nest"]["c"].is_fully_defined())
dataset = dataset_ops.set_batch_size(dataset, 2)
self.assertItemsEqual(["a", "nest"], dataset.output_shapes.keys())
self.assertItemsEqual(["b", "c"], dataset.output_shapes["nest"].keys())
self.assertEqual([2], dataset.output_shapes["a"])
self.assertEqual([2, 3], dataset.output_shapes["nest"]["b"])
self.assertEqual([2], dataset.output_shapes["nest"]["c"])
next_batch = dataset.make_one_shot_iterator().get_next()
next_a = next_batch["a"]
next_b = next_batch["nest"]["b"]
next_c = next_batch["nest"]["c"]
with self.test_session() as sess:
a, b, c = sess.run([next_a, next_b, next_c])
self.assertAllEqual([100, 101], a)
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], b)
self.assertAllEqual([0, 1], c)
a, b, c = sess.run([next_a, next_b, next_c])
self.assertAllEqual([102, 103], a)
self.assertAllEqual([[6, 7, 8], [9, 10, 11]], b)
self.assertAllEqual([2, 3], c)
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(next_batch)
class BuildDatasetTest(tf.test.TestCase):
def setUp(self):
super(BuildDatasetTest, self).setUp()
# The test dataset contains 10 tensorflow.Example protocol buffers. The i-th
# Example contains the following features:
# global_view = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
# local_view = [0.0, 1.0, 2.0, 3.0]
# aux_feature = 100 + i
# label_str = "PC" if i % 3 == 0 else "AFP" if i % 3 == 1 else "NTP"
self._file_pattern = os.path.join(FLAGS.test_srcdir, _TEST_TFRECORD_FILE)
self._input_config = configdict.ConfigDict({
"features": {
"global_view": {
"is_time_series": True,
"length": 8
},
"local_view": {
"is_time_series": True,
"length": 4
},
"aux_feature": {
"is_time_series": False,
"length": 1
}
}
})
def testNonExistentFileRaisesValueError(self):
with self.assertRaises(ValueError):
dataset_ops.build_dataset(
file_pattern="nonexistent",
input_config=self._input_config,
batch_size=4)
def testBuildWithoutLabels(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.test_session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testLabels1(self):
self._input_config["label_feature"] = "label_str"
self._input_config["label_map"] = {"PC": 0, "AFP": 1, "NTP": 2}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.test_session() as sess:
sess.run([init_op, iterator.initializer])
# Fetch 3 batches.
np.testing.assert_array_equal([0, 1, 2, 0], sess.run(labels))
np.testing.assert_array_equal([1, 2, 0, 1], sess.run(labels))
np.testing.assert_array_equal([2, 0], sess.run(labels))
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(labels)
def testLabels2(self):
self._input_config["label_feature"] = "label_str"
self._input_config["label_map"] = {"PC": 1, "AFP": 0, "NTP": 0}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.test_session() as sess:
sess.run([init_op, iterator.initializer])
# Fetch 3 batches.
np.testing.assert_array_equal([1, 0, 0, 1], sess.run(labels))
np.testing.assert_array_equal([0, 0, 1, 0], sess.run(labels))
np.testing.assert_array_equal([0, 1], sess.run(labels))
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(labels)
def testBadLabelIdsRaisesValueError(self):
self._input_config["label_feature"] = "label_str"
# Label ids should be contiguous integers starting at 0.
self._input_config["label_map"] = {"PC": 1, "AFP": 2, "NTP": 3}
with self.assertRaises(ValueError):
dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
def testUnknownLabel(self):
self._input_config["label_feature"] = "label_str"
# label_map does not include "NTP".
self._input_config["label_map"] = {"PC": 1, "AFP": 0}
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4)
# We need an initializable iterator when using labels because of the
# stateful label id hash table.
iterator = dataset.make_initializable_iterator()
inputs = iterator.get_next()
init_op = tf.tables_initializer()
# Expect features and labels.
self.assertItemsEqual(["time_series_features", "aux_features", "labels"],
inputs.keys())
labels = inputs["labels"]
with self.test_session() as sess:
sess.run([init_op, iterator.initializer])
# Unknown label "NTP".
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(labels)
def testReverseTimeSeries(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
reverse_time_series_prob=1,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.test_session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0],
[3, 2, 1, 0],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[7, 6, 5, 4, 3, 2, 1, 0],
[7, 6, 5, 4, 3, 2, 1, 0],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[3, 2, 1, 0],
[3, 2, 1, 0],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testRepeat(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.test_session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
def testTPU(self):
dataset = dataset_ops.build_dataset(
file_pattern=self._file_pattern,
input_config=self._input_config,
batch_size=4,
include_labels=False)
# We can use a one-shot iterator without labels because we don't have the
# stateful hash map for label ids.
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Expect features only.
self.assertItemsEqual(["time_series_features", "aux_features"],
features.keys())
with self.test_session() as sess:
# Batch 1.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[100], [101], [102], [103]],
f["aux_features"]["aux_feature"])
# Batch 2.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[104], [105], [106], [107]],
f["aux_features"]["aux_feature"])
# Batch 3.
f = sess.run(features)
np.testing.assert_array_almost_equal([
[0, 1, 2, 3, 4, 5, 6, 7],
[0, 1, 2, 3, 4, 5, 6, 7],
], f["time_series_features"]["global_view"])
np.testing.assert_array_almost_equal([
[0, 1, 2, 3],
[0, 1, 2, 3],
], f["time_series_features"]["local_view"])
np.testing.assert_array_almost_equal([[108], [109]],
f["aux_features"]["aux_feature"])
# No more batches.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(features)
if __name__ == "__main__":
tf.test.main()
| |
"""
Neural network library, drawing inspiration from Torch's nn and nngraph
"""
import cgt
from cgt import core
import numpy as np
from .nn_ops.im2col import im2col
from .nn_ops.cross_channel_lrn import cross_channel_lrn #pylint: disable=W0611
from .nn_ops import cudnn_ops #pylint: disable=W0611
from .nn_ops.max_pool_2d import MaxPool
from collections import namedtuple
class Module(object):
def __init__(self, inputs, outputs):
self.c = core.Composition(inputs, outputs)
def __call__(self, inputs):
return self.c.expand(inputs)
# assert all(isinstance(x,core.Node) for x in inputs)
# tup_out = core.Result(self.c, inputs)
# return [core.Result(core.TupleIndex(i),[tup_out]) for i in xrange(self.c.n_out)]
def get_parameters(self):
return list(node for node in self.c.get_nodes() if node.is_data())
def expand(self, inputs):
return self.c.expand(inputs)
def is_parameter(node):
return node.is_data() and node.props["is_parameter"]
def get_parameters(loss):
return list(node for node in cgt.core.topsorted([loss]) if is_parameter(node))
def parameter(val, name=None, device=None):
fixed_shape_mask = "all"
out = cgt.shared(val, name=name, device=device, fixed_shape_mask=fixed_shape_mask)
out.props["is_parameter"] = True
return out
# ================================================================
# Math functions
# ================================================================
def rectify(x):
return x * (x >= 0)
def _nu_softplus(x,out):
absx = np.abs(x)
out[:] = (absx+x)/2 + np.log(1 + np.exp(-absx))
def softplus(x):
op = core.ElwiseUnary("softplus",core.UnaryInfo("SoftPlus", _nu_softplus, True, 'f', lambda x, g, gy: gy/(cgt.exp(-x)+1.0), "(x > 0) ? (x + log(exp(-x) + 1)) : log(1+exp(x))"))
return core.Result(op, [x])
def softmax(x,axis=1):
# x = cgt.broadcast("-", x, x.max(axis=1,keepdims=True),"xx,x1")
out = cgt.exp(x)
out = cgt.broadcast("/", out, out.sum(axis=axis,keepdims=True), "xx,x1")
return out
def logsoftmax(x, axis=1):
return cgt.log(softmax(x, axis=axis))
def binary_crossentropy(x, y):
return -(y * cgt.log(x) + (1 - y) * cgt.log(1 - x))
def zero_one_loss(x, y):
assert x.ndim == 2 and y.ndim in (1,2) and core.dtype_kind(y.dtype)=='i'
return cgt.equal(x.argmax(axis=1,keepdims=False),y.flatten())
def dropout(x, p=0):
if p==0:
return x
else:
mask = cgt.greater(cgt.rand(*cgt.shape(x)), p)
x = x * mask
x = x /(1.0-p)
return x
# ================================================================
# Image processing functions
# ================================================================
PoolInfo = namedtuple("PoolInfo", ["kernel_h", "kernel_w", "pad_h", "pad_w", "stride_h", "stride_w"])
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0,0), stride=(1,1)):
devtype = cgt.get_config()["default_device"].devtype
L,K,r,c = f_LKrc.shape
if devtype == "gpu":
b_1K11 = cgt.zeros((1,L,1,1), cgt.floatX)
return core.Result(cudnn_ops.CudnnConvForward(pad[0],pad[1],stride[0],stride[1]), [x_BKRC, f_LKrc, b_1K11])
else:
assert devtype == "cpu"
col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
f_LZ = f_LKrc.reshape([L, K*r*c])
B,m,n,Z = col_BmnZ.shape
col_Bmn_Z = col_BmnZ.reshape([B*m*n, Z])
col_Bmn_L = core.Result(core.Mul22(False,True), [col_Bmn_Z, f_LZ])
return col_Bmn_L.reshape([B,m,n,L]).transpose([0,3,1,2])
def max_pool_2d(x, kernelshape, pad = (0,0), stride=(1,1)):
devtype = cgt.get_config()["default_device"].devtype
kernel_h, kernel_w = kernelshape
pad_h, pad_w = pad
stride_h, stride_w = stride
info = PoolInfo(kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w)
if devtype == "gpu":
return core.Result(cudnn_ops.CudnnPoolForward(info), [x])
else:
return core.Result(MaxPool(info), [x])[0]
# ================================================================
# Initializations
# ================================================================
IIDGaussian = namedtuple("IIDGaussian", ["mean", "std"])
IIDGaussian.__new__.__defaults__ = (0, 1)
IIDUniform = namedtuple("IIDUniform", ["low", "high"])
Constant = namedtuple("Constant", ["constant"])
XavierNormal = namedtuple("XavierNormal", ["scale"])
XavierUniform = namedtuple("XavierUniform", ["scale"])
XavierNormal.__new__.__defaults__ = (1,)
XavierUniform.__new__.__defaults__ = (1,)
HeNormal = namedtuple("HeNormal", ["scale"])
HeUniform = namedtuple("HeUniform", ['scale'])
def init_array(init, shape):
if isinstance(init, IIDGaussian):
return (np.random.randn(*shape)*init.std + init.mean).astype(cgt.floatX)
elif isinstance(init, IIDUniform):
return (np.random.rand(*shape)*(init.high-init.low) + init.low).astype(cgt.floatX)
elif isinstance(init, Constant):
return init.constant*np.ones(shape, cgt.floatX)
elif isinstance(init, XavierNormal):
std = get_xavier_weight(init, shape)
return (np.random.randn(*shape)*std).astype(cgt.floatX)
elif isinstance(init, XavierUniform):
std = get_xavier_weight(init, shape)
high = -np.sqrt(3) * std
low = np.sqrt(3) * std
return (np.random.rand(*shape)*(high-low) + low).astype(cgt.floatX)
elif isinstance(init, HeNormal):
std = get_he_weight(init, shape)
return (np.random.randn(*shape)*std).astype(cgt.floatX)
elif isinstance(init, HeUniform):
std = get_he_weight(init, shape)
low = -np.sqrt(3) * std
high = np.sqrt(3) * std
return (np.random.rand(*shape)*(high-low) + low).astype(cgt.floatX)
else:
raise ValueError("Invalid initializer %s"%init)
def get_xavier_weight(init, shape):
r"""For relu activation scale (init.scale) should be sqrt(2). For sigmoid and tanh 1.0 should be used.
Math depends on chosen underlying distribution (Normal, Uniform, etc) and activation function.
For uniform with RELU you obtain
a = sqrt{frac{6}{fan_{in}+fan_{out}}
W &\sim U[-a, a]. See reference for full details.
Reference: Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics."""
if len(shape) < 2:
raise RuntimeError("Shape length must at least 2")
n1, n2 = shape[:2]
field_size = np.prod(shape[2:])
scale = init.scale
std = scale * np.sqrt(2.0 / ((n1 + n2) * field_size))
return std
def get_he_weight(init, shape):
"""Use sqrt(2) for RELU and 1 for sigmoid/linear/tanh for init.scale
Weights are initialized with a standard deviation of
sigma = scale*sqrt{1/fan_{in}}
Reference: Kaiming He et al. (2015):
Delving deep into rectifiers: Surpassing human-level performance on
imagenet classification. arXiv preprint arXiv:1502.01852."""
if len(shape) == 2:
fan_in = shape[0]
elif len(shape) > 2:
fan_in = np.prod(shape[1:])
else:
raise RuntimeError("This initializer does not work with shapes of length less than two")
std = init.scale * np.sqrt(1.0 / fan_in)
return std
# ================================================================
# Layer constructors
# ================================================================
class Affine(object):
"""
Like torch's nn.Linear
"""
def __init__(self, input_size, output_size, name=None, weight_init=Constant(0), bias_init=Constant(0)):
input_size = int(input_size)
output_size = int(output_size)
name = "unnamed" if name is None else name
self.weight = parameter(init_array(weight_init, (input_size, output_size)),
name=name+".W")
self.bias = parameter(init_array(bias_init, (1, output_size)),
name=name+".b")
def __call__(self, x):
return cgt.broadcast("+", x.dot(self.weight), self.bias, "xx,1x")
class SpatialConvolution(object):
def __init__(self, input_channels, output_channels, kernelshape, pad, stride=(1,1), name=None, weight_init=Constant(0), bias_init=Constant(0)):
# type conversion
input_channels = int(input_channels)
output_channels = int(output_channels)
self.kernelshape = tuple(map(int, kernelshape))
self.pad = tuple(map(int,pad))
self.stride = tuple(map(int,stride))
name = "unnamed" if name is None else name
self.weight = parameter(init_array(weight_init, (output_channels, input_channels) + self.kernelshape),
name=name+".W")
self.bias = parameter(init_array(bias_init, (1, output_channels, 1, 1)),
name=name+".b")
def __call__(self, x):
tmp = conv2d(x, self.weight, self.kernelshape, self.pad, self.stride)
return cgt.broadcast("+", tmp, self.bias, "xxxx,1x11")
# ================================================================
# Optimization
# ================================================================
def sgd(cost, params, learning_rate):
"""Stochastic Gradient Descent (SGD) updates
Math:
* ``param := param - learning_rate * gradient``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Returns
-------
list of tuples of the form (param, new_param)
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
updates.append((param, param - learning_rate * grad))
return updates
def momentum(cost, params, learning_rate, mu=0.9):
"""Stochastic Gradient Descent (SGD) updates with momentum
Math:
* ``velocity := mu * velocity - learning_rate * grad``
* ``param := param + velocity``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
momentum: float
Tunes the weight given to the velocity term.
Returns
-------
list of tuples of the form (param, new_param) and (velocity, new_velocity)
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
assert isinstance(param.op, core.GetData)
velocity = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
new_velocity = mu * velocity - learning_rate * grad
new_param = param + new_velocity
updates.append((velocity, new_velocity))
updates.append((param, new_param))
return updates
def nesterov_momentum(cost, params, learning_rate, mu=0.9):
"""Stochastic Gradient Descent (SGD) updates with Nesterov momentum
Math:
* ``new_velocity := mu * velocity - learning_rate * grad``
* ``param := param - mu * velocity + (1 + mu) * new_velocity``
See http://arxiv.org/abs/1212.0901v2, first part of eq 7
At each step we're returning the "peaked-ahead parameters"
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
mu: float
Tunes the weight given to the velocity term.
Returns
-------
list of tuples of the form (param, updates), (velocity, velocity_update)
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
assert isinstance(param.op, core.GetData)
velocity = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
new_velocity = mu * velocity - learning_rate * grad
new_param = param - mu * velocity + (mu + 1) * new_velocity
updates.append((velocity, new_velocity))
updates.append((param, new_param))
return updates
def adagrad(cost, params, learning_rate=1.0, epsilon=1e-6):
"""Adagrad updates
The learning rate will be scaled by dividing it by the sqaure root of the sum of accumulated squared gradients.
Math:
* ``accu_new = accu + grad ** 2``
* ``param = param - (learning_rate * grad) / cgt.sqrt(accu_new + epsilon)``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
epsilon: avoids division close to zero. Small float.
Returns
-------
list of tuples of the form [(param, updates), (accumulated_grads, accumulated_grads_new)]
References
----------
.. [1] Duchi, J., Hazan, E., & Singer, Y. (2011):
Adaptive subgradient methods for online learning and stochastic
optimization. JMLR, 12:2121-2159.
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
assert isinstance(param.op, core.GetData)
accu = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
accu_new = accu + grad ** 2
updates.append((accu, accu_new))
updates.append((param, param - (learning_rate * grad) / cgt.sqrt(accu_new + epsilon)))
return updates
def rmsprop(cost, params, learning_rate=1.0, rho=0.9, epsilon=1e-6):
"""RMSProp updates
Divide learning rate by moving average of RMS gradients. See [1]
Math:
* ``accu_new = rho * accu + (1 - rho) * grad ** 2``
* ``param = param - (learning_rate * grad / cgt.sqrt(accu_new + epsilon))``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
rho : float
Controls decay of gradient moving average.
epsilon : float
Avoid division by 0 while scaling. Small constant.
Returns
-------
list of tuples of the form (param, updates), (accumulated_RMS_grads, accumulated_RMS_grads_new)
References
----------
.. [1] Yann N. Dauphin, Harm de Vries, Junyoung Chung, Yoshua Bengio (2015):
RMSProp and equilibrated adaptive learning rates for non-convex optimization
arXiv:1502.04390 http://arxiv.org/abs/1502.04390
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
assert isinstance(param.op, core.GetData)
accu = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
accu_new = rho * accu + (1 - rho) * grad ** 2
updates.append((accu, accu_new))
updates.append((param, param - (learning_rate * grad / cgt.sqrt(accu_new + epsilon))))
return updates
def adadelta(cost, params, learning_rate=1.0, rho=0.95, epsilon=1e-6):
""" Adadelta updates
The learning rate is scaled by the ratio of accumulated gradients to the ratio of accumulated step sizes.
Math:
* ``accu_new = rho * accu + (1 - rho) * grad ** 2``
* ``update = (grad * cgt.sqrt(delta_accu + epsilon) / cgt.sqrt(accu_new + epsilon))``
* ``param = param - learning_rate * update``
* ``delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2``
Parameters
----------
cost : a scalar loss.
params : a list of cgt shared variables. We generate update
expressions w.r.t. these variables.
learning_rate : float
Tunes the size of the update step.
rho : float
Controls decay of gradient moving average.
epsilon : float
Avoid division by 0 while scaling. Small constant.
Returns
-------
list of tuples of the form
(param, updates), (accumulated_grads, accumulated_grads_new), (step_accum, step_accum_new)
References
----------
.. [1] Zeiler, M. D. (2012):
ADADELTA: An Adaptive Learning Rate Method.
arXiv Preprint arXiv:1212.5701.
"""
updates = []
grads = cgt.grad(cost, params)
for param, grad in zip(params, grads):
assert isinstance(param.op, core.GetData)
accu = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
delta_accu = cgt.shared(np.zeros(param.op.get_shape(), dtype=param.dtype))
accu_new = rho * accu + (1 - rho) * grad ** 2
updates.append((accu, accu_new))
update = (grad * cgt.sqrt(delta_accu + epsilon) / cgt.sqrt(accu_new + epsilon))
updates.append((param, param - learning_rate * update))
delta_accu_new = rho * delta_accu + (1 - rho) * update ** 2
updates.append((delta_accu, delta_accu_new))
return updates
| |
import json
import time
from django.shortcuts import render, get_object_or_404, render_to_response, \
redirect
from django.utils import formats
from django.db import transaction
from django.db.models import F
from django.http import HttpResponse, Http404
from django.views.generic import View
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from votes.models import Vote, Option, Status, ActiveVote, PassiveVote
from filters.models import UserFilter
from users.models import UserProfile, Admin
from settings.models import VotingSystem
from django.contrib.auth.models import User
from votes.forms import EditVoteForm, EditVoteFilterForm, \
EditVoteOptionsForm, GetVoteOptionForm, EditVoteOptionForm, PasswordForm, \
EditScheduleForm, AdminSelectForm
VOTE_ERROR_TEMPLATE = "vote/vote_msg.html"
VOTE_RESULT_TEMPLATE = "vote/vote_result.html"
VOTE_EDIT_TEMPLATE = "vote/vote_edit.html"
SYS_EDIT_TEMPLATE = "vote/user_list.html"
def get_vote_and_system_or_404(system_name, vote_name):
"""
Gets a voting system and the corresponding vote or returns a 404.
"""
system = get_object_or_404(VotingSystem, machine_name=system_name)
vote = get_object_or_404(Vote, machine_name=vote_name, system=system)
return (system, vote)
def system_home(request, system_name):
ctx = {}
ctx['vs'] = vs = get_object_or_404(VotingSystem, machine_name=system_name)
all_votes = Vote.objects.filter(system=vs)
if request.user.is_authenticated() and vs.isAdmin(request.user.profile):
ctx['votes'] = all_votes
ctx['results'] = Vote.objects.filter(system=vs,
status__stage__in=[Status.PUBLIC,
Status.CLOSE])
ctx['alert_type'] = 'info'
ctx['alert_head'] = 'Non-public items shown'
ctx['alert_text'] = 'Some items shown here may not be public.'
ctx['is_vs_admin'] = True
else:
ctx['votes'] = Vote.objects.filter(system=ctx['vs'],
status__stage=Status.OPEN)
ctx['results'] = Vote.objects.filter(system=ctx['vs'],
status__stage=Status.PUBLIC)
return render(request, "vote/vote_system_overview.html", ctx)
@login_required
def admin(request, system_name, alert_type=None, alert_head=None,
alert_text=None):
# get the voting system
ctx = {}
vs = get_object_or_404(VotingSystem, machine_name=system_name)
# raise an error if the user trying to access is not an admin
if not vs.isAdmin(request.user.profile):
raise PermissionDenied
ctx['vs'] = vs
# add an alert state if needed
if alert_head or alert_text or alert_type:
ctx['alert_type'] = alert_type
ctx['alert_head'] = alert_head
ctx['alert_text'] = alert_text
# all the admins
ctx['admins'] = vs.admin_set.all()
admin_users = [a.user for a in ctx['admins']]
ctx['not_admins'] = [u for u in User.objects.all() if u not in admin_users]
return render(request, SYS_EDIT_TEMPLATE, ctx)
@login_required
def admin_add(request, system_name):
# you may only use POST
if request.method != "POST":
raise Http404
# get the voting system
ctx = {}
vs = get_object_or_404(VotingSystem, machine_name=system_name)
# raise an error if the user trying to access is not an admin
if not vs.isAdmin(request.user.profile):
raise PermissionDenied
try:
# parse the form
form = AdminSelectForm(request.POST)
if not form.is_valid():
raise Exception
user = User.objects.filter(username=form.cleaned_data["username"])[0]
except:
return admin(request, system_name=system_name,
alert_head='Grant Failed',
alert_text='Invalid data submitted')
try:
sa = Admin(user=user, system=vs)
sa.save()
except Exception as e:
return admin(request, system_name=system_name,
alert_head='Grant Failed', alert_text=str(e))
return admin(request, system_name=system_name, alert_type="success",
alert_head="Grant succeeded",
alert_text="User added to admins. ")
@login_required
def admin_remove(request, system_name):
# you may only use POST
if request.method != "POST":
raise Http404
# get the voting system
ctx = {}
vs = get_object_or_404(VotingSystem, machine_name=system_name)
# raise an error if the user trying to access is not an admin
if not vs.isAdmin(request.user.profile):
raise PermissionDenied
try:
# parse the form
form = AdminSelectForm(request.POST)
if not form.is_valid():
raise Exception
user = User.objects.filter(username=form.cleaned_data["username"])[0]
except:
return admin(request, system_name=system_name,
alert_head='Removing failed',
alert_text='Invalid data submitted')
try:
the_admin = Admin.objects.filter(system=vs, user=user)[0]
if the_admin.user.username == request.user.username:
raise Exception("Don't torture yourself. ")
the_admin.delete()
except Exception as e:
return admin(request, system_name=system_name,
alert_head='Removing failed. ', alert_text=str(e))
return admin(request, system_name=system_name, alert_type="success",
alert_head="Removing succeeded",
alert_text="User is no longer an admin. ")
def get_vote_props(ctx, vote):
# check if the vote is read only
ctx["vote_readonly"] = not vote.canBeModified()
# check if the vote is staged
ctx["vote_is_init"] = (vote.status.stage == Status.INIT)
ctx["vote_is_staged"] = (vote.status.stage == Status.STAGED)
ctx["vote_is_open"] = (vote.status.stage == Status.OPEN)
ctx["vote_is_closed"] = (vote.status.stage == Status.CLOSE)
ctx["vote_is_public"] = (vote.status.stage == Status.PUBLIC)
# check for all the times
ctx["vote_has_open_time"] = (vote.status.open_time is not None)
ctx["vote_has_close_time"] = (vote.status.close_time is not None)
ctx["vote_has_public_time"] = (vote.status.public_time is not None)
if ctx["vote_has_open_time"]:
ctx["vote_open_time"] = vote.status.open_time.strftime(
"%Y-%m-%d %H:%M:%S")
if ctx["vote_has_close_time"]:
ctx["vote_close_time"] = vote.status.close_time.strftime(
"%Y-%m-%d %H:%M:%S")
if ctx["vote_has_public_time"]:
ctx["vote_public_time"] = vote.status.public_time.strftime(
"%Y-%m-%d %H:%M:%S")
# and what we can do
ctx["can_set_stage"] = ctx["vote_is_init"]
ctx["can_set_time"] = ctx["vote_is_init"]
ctx["can_update_eligibile"] = ctx["vote_is_staged"] or ctx[
"vote_is_open"] or ctx["vote_is_closed"]
ctx["can_set_open"] = ctx["vote_is_staged"] and (
not ctx["vote_has_open_time"])
ctx["can_set_close"] = ctx["vote_is_open"] and (
not ctx["vote_has_close_time"])
ctx["can_set_public"] = ctx["vote_is_closed"] and (
not ctx["vote_has_public_time"])
return ctx
def vote_edit_context(request, system_name, vote_name):
"""
Returns context and basic parameters for vote editing.
"""
(system, vote) = get_vote_and_system_or_404(system_name, vote_name)
# touch the vote
vote.touch()
# raise an error if the user trying to access is not an admin
if not system.isAdmin(request.user.profile):
raise PermissionDenied
# make a context
ctx = {}
# get all the systems this user can edit
(admin_systems, other_systems) = request.user.profile.getSystems()
# add the vote to the system
ctx['vote'] = vote
ctx['vote_options'] = vote.option_set.order_by("number")
ctx['vote_uri'] = request.build_absolute_uri(
reverse('votes:vote', kwargs={
"system_name": system.machine_name,
"vote_name": vote.machine_name
})
)
ctx['results_uri'] = request.build_absolute_uri(
reverse('votes:results', kwargs={
"system_name": system.machine_name,
"vote_name": vote.machine_name
})
)
# and the filters
ctx['admin_systems'] = admin_systems
ctx['other_systems'] = other_systems
# reload the stages
ctx = get_vote_props(ctx, vote)
return (system, vote, ctx)
@login_required
def vote_add(request, system_name):
"""
Add a blank vote and redirect to its edit page
"""
vs = get_object_or_404(VotingSystem, machine_name=system_name)
# raise an error if the user trying to access is not an admin
if not vs.isAdmin(request.user.profile):
raise PermissionDenied
v = Vote()
s = Status()
s.save()
now = str(int(time.time()))
v.name = "Untitled Vote 1"
v.machine_name = "new_" + now
v.system = vs
v.status = s
v.creator = request.user
v.min_votes = 0
v.max_votes = 0
v.save()
return redirect('votes:edit', system_name=system_name,
vote_name=v.machine_name)
@login_required
def vote_delete(request, system_name, vote_name):
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
if vote.canDelete(request.user.profile):
vote.delete()
return redirect('votes:system', system_name=system_name)
@login_required
def vote_edit(request, system_name, vote_name):
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
if request.method == "POST":
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can ' \
'not be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# try to parse the form
try:
form = EditVoteForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# write the name and value, then save it in the database
try:
# store the name and value
vote.name = form.cleaned_data['name']
vote.machine_name = form.cleaned_data['machine_name']
vote.description = form.cleaned_data['description']
# and try to clean + save
vote.clean()
vote.save()
except Exception as e:
vote.machine_name = vote_name
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# we did it, we saved
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Saving suceeded'
ctx['alert_text'] = 'Form saved'
# render the template
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_filter(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# now try and parse the form
try:
form = EditVoteFilterForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# write filter, then save to db.
try:
# store the filter by id
vote.filter = \
UserFilter.objects.filter(id=form.cleaned_data["filter_id"])[0]
# and try to clean + save
vote.clean()
vote.save()
except Exception as e:
vote.machine_name = vote_name
ctx['alert_head'] = 'Saving filter failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# Woo, we made it
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Saving suceeded'
ctx['alert_text'] = 'Associated filter has been updated. '
# so render the basic template
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_stage(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_set_stage"]:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'A vote can only be staged when it is in init ' \
'stage. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# now try and parse the form
try:
form = PasswordForm(request.POST)
if not form.is_valid():
raise Exception
# read username + password
username = request.user.username
password = form.cleaned_data['password']
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.update_eligibility(username, password)
vote.status.stage = Status.STAGED
vote.status.save()
except Exception as e:
vote.machine_name = vote_name
ctx['alert_head'] = 'Staging vote failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Status updated'
ctx['alert_text'] = 'Vote has been staged. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_time(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_set_time"]:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Timings can not be changed'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# now try and parse the form
try:
form = EditScheduleForm(request.POST)
if not form.is_valid():
raise Exception
# set the open / closed / public time
open_time = form.cleaned_data["open_time"]
close_time = form.cleaned_data["close_time"]
public_time = form.cleaned_data["public_time"]
except Exception as e:
print(e, form.errors)
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.status.open_time = open_time
vote.status.close_time = close_time
vote.status.public_time = public_time
vote.status.save()
except Exception as e:
ctx['alert_head'] = 'Updating times failed. '
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Times updated'
ctx['alert_text'] = 'Scheduling has been saved'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_update(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_update_eligibile"]:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Given vote eligibility is already fixed. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# now try and parse the form
try:
form = PasswordForm(request.POST)
if not form.is_valid():
raise Exception
# read username + password
username = request.user.username
password = form.cleaned_data['password']
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.update_eligibility(username, password)
except Exception as e:
ctx['alert_head'] = 'Updating eligibility failed. '
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Eligibility updated'
ctx['alert_text'] = 'People have been re-counted. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_open(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_set_open"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'A vote can only be set to open if there is no ' \
'open time and it is already staged. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.status.stage = Status.OPEN
vote.status.save()
except Exception as e:
ctx['alert_head'] = 'Opening vote failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Status updated'
ctx['alert_text'] = 'Vote has been opened. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_close(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_set_close"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'A vote can only be set to close if there is no ' \
'close time and it is already open. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.status.stage = Status.CLOSE
vote.status.save()
except Exception as e:
ctx['alert_head'] = 'Closing vote failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Status updated'
ctx['alert_text'] = 'Vote has been closed. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_public(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is not closed, dont make it public
if not ctx["can_set_public"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'A vote can only be set to public if there is ' \
'no public time and it is already closed. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# set the vote status to public
try:
vote.status.stage = Status.PUBLIC
vote.status.save()
except Exception as e:
ctx['alert_head'] = 'Making vote public failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# reload the vote props
ctx = get_vote_props(ctx, vote)
# done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Status updated'
ctx['alert_text'] = 'Vote has been made public. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_option(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# now try and parse the form
try:
form = EditVoteOptionsForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
min_votes = form.cleaned_data["min_votes"]
max_votes = form.cleaned_data["max_votes"]
auto_open_options = form.cleaned_data["auto_open_options"]
count = vote.option_set.count()
# read min and max votes, then store them
try:
# check range for min votes
if min_votes < 0 or min_votes > count:
raise Exception(
"Minimum number of votes must be between 0 and the number of "
"available options. ")
# check range for max votes
if max_votes < 0 or max_votes > count:
raise Exception(
"Maximum number of votes must be between 0 and the number of "
"available options. ")
if min_votes > max_votes:
raise Exception(
"The maximum number of votes may not be smaller than the "
"minimum number of votes. ")
vote.min_votes = min_votes
vote.max_votes = max_votes
vote.auto_open_options = auto_open_options
# and try to clean + save
vote.clean()
vote.save()
except Exception as e:
vote.machine_name = vote_name
ctx['alert_head'] = 'Saving vote failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# Woo, we made it
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Saving suceeded'
ctx['alert_text'] = 'Option configuration updated. '
# so render the basic template
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_options_add(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# try to add an option
try:
vote.addOption()
except Exception as e:
ctx['alert_head'] = 'Adding option failed'
ctx['alert_text'] = 'Something went wrong. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Adding option suceeded'
ctx['alert_text'] = 'New option added. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_options_edit(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
form = EditVoteOptionForm(request.POST)
if not form.is_valid():
raise Exception
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# try to find the option
try:
option = Option.objects.filter(id=form.cleaned_data["option_id"])[0]
if option.id not in vote.option_set.values_list('id', flat=True):
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Nice try. That option does not exist. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# try and store the values
try:
option.name = form.cleaned_data["name"]
option.description = form.cleaned_data["description"]
option.personal_link = form.cleaned_data["personal_link"]
option.picture_url = form.cleaned_data["picture_url"]
option.link_name = form.cleaned_data["link_name"]
option.clean()
option.save()
except Exception as e:
ctx['alert_head'] = 'Saving option failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# Woo, we made it
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Saving suceeded'
ctx['alert_text'] = 'Option saved'
# so render the basic template
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_options_remove(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
form = GetVoteOptionForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
# find the option
option = Option.objects.filter(id=form.cleaned_data["option_id"])[0]
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Nice try. That option does not exist. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and try to remove it
try:
vote.deleteOption(option)
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Options updated'
ctx['alert_text'] = 'Option removed. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_options_down(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
form = GetVoteOptionForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
# find the option
option = Option.objects.filter(id=form.cleaned_data["option_id"])[0]
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Nice try. That option does not exist. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and try to remove it
try:
vote.moveUpOption(option)
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Options updated'
ctx['alert_text'] = 'Option moved down. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
@login_required
def vote_options_up(request, system_name, vote_name):
# you may only use POST
if request.method != "POST":
raise Http404
(system, vote, ctx) = vote_edit_context(request, system_name, vote_name)
# if the vote is read-only, do not save
if ctx["vote_readonly"]:
ctx['alert_head'] = 'Saving failed'
ctx[
'alert_text'] = 'Nice try. A vote that has been opened can not ' \
'be edited. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
form = GetVoteOptionForm(request.POST)
if not form.is_valid():
raise Exception
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
return render(request, VOTE_EDIT_TEMPLATE, ctx)
try:
# find the option
option = Option.objects.filter(id=form.cleaned_data["option_id"])[0]
except:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Nice try. That option does not exist. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and try to remove it
try:
vote.moveDownOption(option)
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = str(e)
return render(request, VOTE_EDIT_TEMPLATE, ctx)
# and done
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Options updated'
ctx['alert_text'] = 'Option moved up. '
return render(request, VOTE_EDIT_TEMPLATE, ctx)
def results(request, system_name, vote_name):
ctx = {}
# grab vote and system
(system, vote) = get_vote_and_system_or_404(system_name, vote_name)
vote.touch()
# set options and the vote
ctx['vote'] = vote
ctx['options'] = vote.option_set.order_by('-count').annotate(
percent=(F("count") * 100 / vote.passivevote.num_voters))
if vote.status.stage != Status.PUBLIC:
if vote.status.stage == Status.CLOSE and \
request.user.is_authenticated():
if vote.system.isAdmin(request.user.profile):
ctx['alert_type'] = 'info'
ctx['alert_head'] = 'Non-public'
ctx['alert_text'] = 'The results are not public yet. You ' \
'can see the results because you are ' \
'admin.'
return render(request, VOTE_RESULT_TEMPLATE, ctx)
ctx['alert_type'] = 'danger'
ctx['alert_head'] = 'Non-public'
ctx[
'alert_text'] = 'The results are not public yet. Please come ' \
'back later.'
return render(request, VOTE_ERROR_TEMPLATE, ctx)
# render the stuff
return render(request, VOTE_RESULT_TEMPLATE, ctx)
class VoteView(View):
def __init__(self, preview=False):
self.preview = preview
@method_decorator(login_required)
def get(self, request, system_name, vote_name):
(system, vote) = get_vote_and_system_or_404(system_name, vote_name)
vote.touch()
filter = vote.filter
status = vote.status
error = False
ctx = {}
ctx['vote'] = vote
ctx['preview'] = self.preview
options = vote.option_set.order_by("number")
ctx['options'] = options
# TODO Check status of vote
try:
user_details = json.loads(request.user.profile.details)
if not filter:
ctx['alert_head'] = "No filter given."
ctx[
'alert_text'] = "This vote has not been configured " \
"properly."
error = True
elif not filter.matches(user_details):
ctx['alert_head'] = "Not eligible"
ctx[
'alert_text'] = "You are not eligible for this vote. " \
"Tough luck."
error = True
except UserProfile.DoesNotExist:
ctx['alert_head'] = "User details invalid."
ctx[
'alert_text'] = "Your user details could not be retrieved " \
"from CampusNet. Please log out and try " \
"again later."
error = True
try:
av = ActiveVote.objects.get(user=request.user, vote=vote)
ctx['alert_type'] = "warning"
ctx['alert_head'] = "You have already voted."
ctx['alert_text'] = "Every user can only vote once. You have."
except ActiveVote.DoesNotExist:
pass
if (status.stage not in [Status.OPEN, Status.PUBLIC]):
error = True
ctx['alert_type'] = "danger"
ctx['alert_head'] = "Not open"
ctx['alert_text'] = "This vote is not open. Come back later."
if self.preview:
error = True
ctx['alert_type'] = "info"
ctx['alert_head'] = "Preview"
ctx['alert_text'] = "This is a preview, so you can't vote here."
if not error or self.preview:
if status.stage == Status.PUBLIC:
return redirect('votes:results',
system_name=system.machine_name,
vote_name=vote.machine_name)
return render(request, "vote/vote_vote.html", context=ctx)
else:
return render(request, VOTE_ERROR_TEMPLATE, context=ctx,
status=403)
def render_error_response(self, ctx):
return render_to_response(VOTE_ERROR_TEMPLATE, context=ctx)
@method_decorator(login_required)
def post(self, request, system_name, vote_name):
ctx = {}
# Make sure all the POST params are present
if 'vote_id' not in request.POST:
ctx['alert_head'] = "Something happened."
ctx['alert_text'] = "Go back to the start and try again."
return self.render_error_response(ctx)
if 'options_selected' not in request.POST:
ctx['alert_head'] = "Something happened."
ctx['alert_text'] = "Go back and start over."
return self.render_error_response(ctx)
options = json.loads(request.POST['options_selected'])
(system, vote) = get_vote_and_system_or_404(system_name, vote_name)
vote.touch()
filter = vote.filter
error = False
options_obj = Option.objects.filter(id__in=options, vote__id=vote.id)
pv = PassiveVote.objects.get(vote=vote)
ctx['vote'] = vote
ctx['options'] = options_obj
if not (len(options_obj) >= vote.min_votes and len(
options_obj) <= vote.max_votes):
ctx['alert_head'] = "Invalid selection."
ctx['alert_text'] = "Invalid number of options selected."
return self.render_error_response(ctx)
try:
user_details = json.loads(request.user.profile.details)
if not filter:
ctx['alert_head'] = "No filter given."
ctx[
'alert_text'] = "This vote has not been configured " \
"properly."
elif not filter.matches(user_details):
ctx['alert_head'] = "Not eligible"
ctx[
'alert_text'] = "You are not eligible for this vote. " \
"Tough luck."
except UserProfile.DoesNotExist:
ctx['alert_head'] = "User details invalid."
ctx[
'alert_text'] = "Your user details could not be retrieved " \
"from CampusNet. Please log out and try " \
"again later."
if 'alert_head' in ctx:
return self.render_error_response(ctx)
# TODO Check status of vote before counting vote
# TODO Do the actual vote counting
# Steps (all in one transaction)
# 1. Check if user has already voted
# 2. Increase count of selected options
# 3. Create ActiveVote with vote and user
with transaction.atomic():
try:
av = ActiveVote.objects.get(user=request.user, vote=vote)
ctx['alert_head'] = "You have already voted."
ctx['alert_text'] = "Every user can only vote once. You have."
return self.render_error_response(ctx)
except ActiveVote.DoesNotExist:
av = ActiveVote(user=request.user, vote=vote)
av.save()
pv.num_voters += 1
pv.save()
for opt in options_obj:
opt.count = F('count') + 1
opt.save()
ctx = {}
ctx['page_title'] = "Vote Done"
ctx['alert_type'] = "success"
ctx['alert_head'] = "You voted!"
ctx['alert_text'] = "Your votes have been counted."
return render_to_response(VOTE_ERROR_TEMPLATE, context=ctx)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various high level TF models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.ops import autoencoder_ops
from tensorflow.contrib.learn.python.learn.ops import dnn_ops
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def linear_regression_zero_init(X, y):
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return linear_regression(X, y, init_mean=0.0, init_stddev=0.0)
def logistic_regression_zero_init(X, y):
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(X, y, init_mean=0.0, init_stddev=0.0)
def linear_regression(X, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features.
y: tensor or placeholder for target.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
logging_ops.histogram_summary('linear_regression.X', X)
logging_ops.histogram_summary('linear_regression.y', y)
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape])
bias = vs.get_variable('bias', [output_shape])
else:
weights = vs.get_variable('weights', [X.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('linear_regression.weights', weights)
logging_ops.histogram_summary('linear_regression.bias', bias)
return losses_ops.mean_squared_error_regressor(X, y, weights, bias)
def logistic_regression(X,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
X: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for target,
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
logging_ops.histogram_summary('logistic_regression.X', X)
logging_ops.histogram_summary('logistic_regression.y', y)
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]])
bias = vs.get_variable('bias', [y.get_shape()[-1]])
else:
weights = vs.get_variable('weights',
[X.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
bias = vs.get_variable('bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev))
logging_ops.histogram_summary('logistic_regression.weights', weights)
logging_ops.histogram_summary('logistic_regression.bias', bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(X,
y,
weights,
bias,
class_weight=class_weight)
def get_dnn_model(hidden_units, target_predictor_fn, dropout=None):
"""Returns a function that creates a DNN TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_estimator(X, y):
"""DNN estimator with target predictor function on top."""
layers = dnn_ops.dnn(X, hidden_units, dropout=dropout)
return target_predictor_fn(layers, y)
return dnn_estimator
def get_autoencoder_model(hidden_units, target_predictor_fn,
activation, add_noise=None, dropout=None):
"""Returns a function that creates a Autoencoder TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_autoencoder_estimator(X):
"""Autoencoder estimator with target predictor function on top."""
encoder, decoder = autoencoder_ops.dnn_autoencoder(
X, hidden_units, activation,
add_noise=add_noise, dropout=dropout)
return encoder, decoder, target_predictor_fn(X, decoder)
return dnn_autoencoder_estimator
## This will be in Tensorflow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, nn.rnn_cell.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, nn.rnn_cell.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops_.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return outputs, array_ops_.concat(1, [state_fw, state_bw])
# End of Tensorflow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument X for input and returns transformed X.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes X, y and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed.
This saves computational time when unrolling past max
sequence length.
Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(X, y):
"""RNN estimator with target predictor function on top."""
X = input_op_fn(X)
if cell_type == 'rnn':
cell_fn = nn.rnn_cell.BasicRNNCell
elif cell_type == 'gru':
cell_fn = nn.rnn_cell.GRUCell
elif cell_type == 'lstm':
cell_fn = nn.rnn_cell.BasicLSTMCell
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
if bidirectional:
# forward direction cell
rnn_fw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# backward direction cell
rnn_bw_cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(rnn_fw_cell,
rnn_bw_cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
cell = nn.rnn_cell.MultiRNNCell([cell_fn(rnn_size)] * num_layers)
_, encoding = nn.rnn(cell,
X,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
| |
#!/usr/bin/env python
import os
import re
import sys
import argparse
from lib.util import execute, get_electron_version, parse_version, scoped_cwd, \
is_nightly, is_beta, is_stable, get_next_nightly, get_next_beta, \
get_next_stable_from_pre, get_next_stable_from_stable, clean_parse_version
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
parser = argparse.ArgumentParser(
description='Bump version numbers. Must specify at least one of the three'
+' options:\n'
+' --bump=patch to increment patch version, or\n'
+' --stable to promote current beta to stable, or\n'
+' --version={version} to set version number directly\n'
+'Note that you can use both --bump and --stable '
+'simultaneously.',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'--version',
default=None,
dest='new_version',
help='new version number'
)
parser.add_argument(
'--bump',
action='store',
default=None,
dest='bump',
help='increment [stable | beta | nightly]'
)
parser.add_argument(
'--dry-run',
action='store_true',
default= False,
dest='dry_run',
help='just to check that version number is correct'
)
args = parser.parse_args()
curr_version = get_electron_version()
if args.bump not in ['stable', 'beta', 'nightly']:
raise Exception('bump must be set to either stable, beta or nightly')
if is_nightly(curr_version):
if args.bump == 'nightly':
version = get_next_nightly(curr_version)
elif args.bump == 'beta':
version = get_next_beta(curr_version)
elif args.bump == 'stable':
version = get_next_stable_from_pre(curr_version)
else:
not_reached()
elif is_beta(curr_version):
if args.bump == 'nightly':
version = get_next_nightly(curr_version)
elif args.bump == 'beta':
version = get_next_beta(curr_version)
elif args.bump == 'stable':
version = get_next_stable_from_pre(curr_version)
else:
not_reached()
elif is_stable(curr_version):
if args.bump == 'nightly':
version = get_next_nightly(curr_version)
elif args.bump == 'beta':
raise Exception("You can\'t bump to a beta from stable")
elif args.bump == 'stable':
version = get_next_stable_from_stable(curr_version)
else:
not_reached()
else:
raise Exception("Invalid current version: " + curr_version)
if args.new_version == None and args.bump == None and args.stable == False:
parser.print_help()
return 1
versions = clean_parse_version(version)
suffix = ''
if '-' in version:
suffix = '-' + version.split('-')[1]
versions[3] = parse_version(version)[3]
version = version.split('-')[0]
if args.dry_run:
print 'new version number would be: {0}\n'.format(version + suffix)
return 0
with scoped_cwd(SOURCE_ROOT):
update_electron_gyp(version, suffix)
update_win_rc(version, versions)
update_version_h(versions, suffix)
update_info_plist(version)
update_package_json(version, suffix)
tag_version(version, suffix)
print 'Bumped to version: {0}'.format(version + suffix)
def not_reached():
raise Exception('Unreachable code was reached')
def increase_version(versions, index):
for i in range(index + 1, 4):
versions[i] = '0'
versions[index] = str(int(versions[index]) + 1)
return versions
def update_electron_gyp(version, suffix):
pattern = re.compile(" *'version%' *: *'[0-9.]+(-beta[0-9.]*)?(-dev)?"
+ "(-nightly[0-9.]*)?'")
with open('electron.gyp', 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
if pattern.match(lines[i]):
lines[i] = " 'version%': '{0}',\n".format(version + suffix)
with open('electron.gyp', 'w') as f:
f.write(''.join(lines))
return
def update_win_rc(version, versions):
pattern_fv = re.compile(' FILEVERSION [0-9,]+')
pattern_pv = re.compile(' PRODUCTVERSION [0-9,]+')
pattern_fvs = re.compile(' *VALUE "FileVersion", "[0-9.]+"')
pattern_pvs = re.compile(' *VALUE "ProductVersion", "[0-9.]+"')
win_rc = os.path.join('atom', 'browser', 'resources', 'win', 'atom.rc')
with open(win_rc, 'r') as f:
lines = f.readlines()
versions = [str(v) for v in versions]
for i in range(0, len(lines)):
line = lines[i]
if pattern_fv.match(line):
lines[i] = ' FILEVERSION {0}\r\n'.format(','.join(versions))
elif pattern_pv.match(line):
lines[i] = ' PRODUCTVERSION {0}\r\n'.format(','.join(versions))
elif pattern_fvs.match(line):
lines[i] = ' VALUE "FileVersion", "{0}"\r\n'.format(version)
elif pattern_pvs.match(line):
lines[i] = ' VALUE "ProductVersion", "{0}"\r\n'.format(version)
with open(win_rc, 'w') as f:
f.write(''.join(lines))
def update_version_h(versions, suffix):
version_h = os.path.join('atom', 'common', 'atom_version.h')
with open(version_h, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
if 'ATOM_MAJOR_VERSION' in line:
lines[i] = '#define ATOM_MAJOR_VERSION {0}\n'.format(versions[0])
lines[i + 1] = '#define ATOM_MINOR_VERSION {0}\n'.format(versions[1])
lines[i + 2] = '#define ATOM_PATCH_VERSION {0}\n'.format(versions[2])
if (suffix):
lines[i + 3] = '#define ATOM_PRE_RELEASE_VERSION {0}\n'.format(suffix)
else:
lines[i + 3] = '// #define ATOM_PRE_RELEASE_VERSION\n'
with open(version_h, 'w') as f:
f.write(''.join(lines))
return
def update_info_plist(version):
info_plist = os.path.join('atom', 'browser', 'resources', 'mac', 'Info.plist')
with open(info_plist, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
if 'CFBundleVersion' in line:
lines[i + 1] = ' <string>{0}</string>\n'.format(version)
if 'CFBundleShortVersionString' in line:
lines[i + 1] = ' <string>{0}</string>\n'.format(version)
with open(info_plist, 'w') as f:
f.write(''.join(lines))
def update_package_json(version, suffix):
package_json = 'package.json'
with open(package_json, 'r') as f:
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i];
if 'version' in line:
lines[i] = ' "version": "{0}",\n'.format(version + suffix)
break
with open(package_json, 'w') as f:
f.write(''.join(lines))
def tag_version(version, suffix):
execute([
'git',
'commit',
'-a',
'-m',
'Bump v{0}'.format(version + suffix),
'-n'
])
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import apiclient
import oauth2client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient import discovery
import httplib2
import oauth2client
import os
import ItemCache
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser], add_help=False)
except:
flags = None
OAUTH2_SCOPE = 'https://www.googleapis.com/auth/drive'
DIR_MIME = 'application/vnd.google-apps.folder'
class _GPIter(object):
def __init__(self, keys):
self.keys = keys
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count < len(self.keys):
result = self.keys[self.count]
self.count += 1
return result
raise StopIteration
class _GPhotoCache(object):
def __init__(self):
self.cache = ItemCache.ItemDiskCache('GPhoto.cache', '/tmp', None)
self.rcache = ItemCache.ItemDiskCache('GPhoto.rcache', '/tmp', None)
self.cache['root'] = ("/", "")
self.rcache[self._rkey("/", "")] = 'root'
def __len__(self):
return len(self.cache)
def __getitem__(self, file_id):
return self.cache[file_id]
def __setitem__(self, file_id, value):
(title, parent_id) = value
self.cache[file_id] = (title, parent_id)
self.rcache[self._rkey(title, parent_id)] = file_id
def __delitem__(self, file_id):
(title, parent_id) = self.cache[file_id]
del self.cache[file_id]
del self.rcache[self._rkey(title, parent_id)]
def __iter__(self):
return _GPIter(self.cache.keys())
def __contains__(self, file_id):
return file_id in self.cache
def _rkey(self, title, parent_id):
return str(title + '::' + parent_id)
def find(self, title, parent_id):
try:
return self.rcache[self._rkey(title, parent_id)]
except:
for c in self.cache:
(t, p_id) = self.cache[c]
if (parent_id == p_id) and (title == t):
self.rcache[self._rkey(title, parent_id)] = c
return c
return None
class GPhoto(object):
def __init__(self, oauth2json = None, oauth2storage = None):
self.oauth2json = oauth2json
self.oauth2storage = oauth2storage
self.store = None
self.creds = None
self.service = None
self.cache = _GPhotoCache()
def auth(self, oauth2json = None, oauth2storage = None):
if oauth2json is not None:
self.oauth2json = oauth2json
if oauth2storage is not None:
self.oauth2storage = oauth2storage
if self.oauth2json is None:
raise ValueError('Attribute oauth2json needs to be defined')
if self.oauth2storage is None:
raise ValueError('Attribute oauth2storage needs to be defined')
self.store = Storage(self.oauth2storage)
self.creds = self.store.get()
if self.creds is None or self.creds.invalid:
flow = oauth2client.client.flow_from_clientsecrets(self.oauth2json, OAUTH2_SCOPE)
if flags is not None:
self.creds = oauth2client.tools.run_flow(flow, self.store, flags.parse_args())
else:
self.creds = oauth2client.tools.run(flow, self.store)
self.store.put(self.creds)
self.service = discovery.build('drive', 'v2', http = self.creds.authorize(httplib2.Http()))
def create_dir(self, folder_title, parent_folder = 'root'):
# Check whether the directory does not already exists in cache
dir_id = self.cache.find(folder_title, parent_folder)
if dir_id is not None:
dentry = self.cache[dir_id]
return {
'id': dir_id,
'title': dentry[0],
'parents': [{"id": dentry[1]}],
'mimeType': DIR_MIME,
}
# The directory has not been found in the cache, lets ask Google
body = {
"title": folder_title,
"parents": [{"id": parent_folder}],
"mimeType": DIR_MIME,
}
directory = self.service.files().insert(body = body).execute()
self.cache[directory['id']] = (folder_title, parent_folder)
return directory
def upload_file(self, filename, parent_folder = 'root'):
media_body = apiclient.http.MediaFileUpload(filename, resumable = True)
basename = os.path.basename(filename)
body = {
"title": basename,
"parents": [{"id": parent_folder}],
}
try:
f = self.service.files().insert(body = body, media_body = media_body).execute()
self.cache[f['id']] = (basename, parent_folder)
return f
except apiclient.errors.HttpError as error:
return None
def get_id(self, path):
path_list = path.split('/')
pid = 'root'
for p in path_list:
fid = self.get_file_id(p, pid)
if fid is None:
return None
pid = fid
return fid
def get_file_id(self, title, parent_id = 'root'):
# Try cache
cache = self.cache.find(title, parent_id)
if cache is not None:
return cache
# Not found in cache - ask Google
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
children = self.service.children().list(folderId = parent_id, **param).execute()
for child in children.get('items', []):
if child['id'] not in self.cache:
ch = self.service.files().get(fileId = child['id']).execute()
self.cache[ch['id']] = (ch['title'], parent_id)
if ch['title'] == title:
return child['id']
page_token = children.get('nextPageToken')
if not page_token:
break
except apiclient.errors.HttpError as error:
raise
return None
def file_exists(self, file_name, root_dir = 'root'):
fn = file_name.split('/')
file_id = self.get_file_id(fn[0], root_dir)
if file_id is None:
return False
if len(fn) == 1:
# Check existence of the file
return file_id is not None
# Go one level deeper
fn.pop(0)
return self.file_exists('/'.join(fn), file_id)
if __name__ == "__main__":
oauth2json = os.path.expanduser('~/.gp.json')
oauth2storage = os.path.expanduser('~/.gp')
gp = GPhoto(oauth2json = oauth2json, oauth2storage = oauth2storage)
gp.auth()
#d = gp.create_dir("BufGuf")
#gp.upload_file(os.path.expanduser('~/fscheck.sh'), d['id'])
#gp.upload_file(os.path.expanduser('~/readiness-f23-alpha.txt'), d['id'])
#print 'U-Temp', gp.file_exists('U-Temp')
#print 'Work/Links', gp.file_exists('Work/Links')
#print 'Personal', gp.file_exists('Personal')
#print 'Personal/Blbost', gp.file_exists('Personal/Blbost')
#print 'Pictures/Foto/2015/07/29', gp.file_exists('Pictures/Foto/2015/07/29')
print('Pictures/Foto/2015/07/29/IMG_2552.JPG', gp.file_exists('Pictures/Foto/2015/07/29/IMG_2552.JPG'))
print('Pictures/Foto/2015/07/29', gp.file_exists('Pictures/Foto/2015/07/29'))
print('Pictures/Foto/2015/07/29/', gp.file_exists('Pictures/Foto/2015/07/29/'))
print('Pictures/Foto/2015/07/29/IMG_2552.jpg', gp.file_exists('Pictures/Foto/2015/07/29/IMG_2552.jpg'))
print('Pictures/Foto/2015/07/29/IMG2552.JPG', gp.file_exists('Pictures/Foto/2015/07/29/IMG2552.JPG'))
| |
#!/opt/conda/default/bin/python3
import json
import os
import subprocess as sp
import sys
import errno
from subprocess import check_output
assert sys.version_info > (3, 0), sys.version_info
if sys.version_info >= (3, 7):
def safe_call(*args, **kwargs):
sp.run(args, capture_output=True, check=True, **kwargs)
else:
def safe_call(*args, **kwargs):
try:
sp.check_output(args, stderr=sp.STDOUT, **kwargs)
except sp.CalledProcessError as e:
print(e.output.decode())
raise e
def get_metadata(key):
return check_output(['/usr/share/google/get_metadata_value', 'attributes/{}'.format(key)]).decode()
def mkdir_if_not_exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# get role of machine (master or worker)
role = get_metadata('dataproc-role')
if role == 'Master':
# additional packages to install
pip_pkgs = [
'setuptools',
'mkl<2020',
'lxml<5',
'google-cloud-storage==1.25.*',
'https://github.com/hail-is/jgscm/archive/v0.1.12+hail.zip',
'ipykernel==4.10.*',
'ipywidgets==7.4.*',
'jupyter-console==6.0.*',
'nbconvert==5.5.*',
'notebook==5.7.*',
'qtconsole==4.5.*'
]
# add user-requested packages
try:
user_pkgs = get_metadata('PKGS')
except Exception:
pass
else:
pip_pkgs.extend(user_pkgs.split('|'))
print('pip packages are {}'.format(pip_pkgs))
command = ['pip', 'install']
command.extend(pip_pkgs)
safe_call(*command)
print('getting metadata')
wheel_path = get_metadata('WHEEL')
wheel_name = wheel_path.split('/')[-1]
print('copying wheel')
safe_call('gsutil', 'cp', wheel_path, f'/home/hail/{wheel_name}')
safe_call('pip', 'install', '--no-dependencies', f'/home/hail/{wheel_name}')
print('setting environment')
spark_lib_base = '/usr/lib/spark/python/lib/'
files_to_add = [os.path.join(spark_lib_base, x) for x in os.listdir(spark_lib_base) if x.endswith('.zip')]
env_to_set = {
'PYTHONHASHSEED': '0',
'PYTHONPATH': ':'.join(files_to_add),
'SPARK_HOME': '/usr/lib/spark/',
'PYSPARK_PYTHON': '/opt/conda/default/bin/python',
'PYSPARK_DRIVER_PYTHON': '/opt/conda/default/bin/python',
}
# VEP ENV
try:
vep_config_uri = get_metadata('VEP_CONFIG_URI')
except Exception:
pass
else:
env_to_set["VEP_CONFIG_URI"] = vep_config_uri
print('setting environment')
for e, value in env_to_set.items():
safe_call('/bin/sh', '-c',
'set -ex; echo "export {}={}" | tee -a /etc/environment /usr/lib/spark/conf/spark-env.sh'.format(e, value))
hail_jar = sp.check_output([
'/bin/sh', '-c',
'set -ex; python3 -m pip show hail | grep Location | sed "s/Location: //"'
]).decode('ascii').strip() + '/hail/backend/hail-all-spark.jar'
conf_to_set = [
'spark.executorEnv.PYTHONHASHSEED=0',
'spark.app.name=Hail',
# the below are necessary to make 'submit' work
'spark.jars={}'.format(hail_jar),
'spark.driver.extraClassPath={}'.format(hail_jar),
'spark.executor.extraClassPath=./hail-all-spark.jar',
]
print('setting spark-defaults.conf')
with open('/etc/spark/conf/spark-defaults.conf', 'a') as out:
out.write('\n')
for c in conf_to_set:
out.write(c)
out.write('\n')
# Update python3 kernel spec with the environment variables and the hail
# spark monitor
try:
with open('/opt/conda/default/share/jupyter/kernels/python3/kernel.json', 'r') as f:
python3_kernel = json.load(f)
except: # noqa: E722
python3_kernel = {
'argv': [
'/opt/conda/default/bin/python',
'-m',
'ipykernel',
'-f',
'{connection_file}'
],
'display_name': 'Python 3',
'language': 'python',
}
python3_kernel['env'] = {
**python3_kernel.get('env', dict()),
**env_to_set,
'HAIL_SPARK_MONITOR': '1',
'SPARK_MONITOR_UI': 'http://localhost:8088/proxy/%APP_ID%',
}
# write python3 kernel spec file to default Jupyter kernel directory
mkdir_if_not_exists('/opt/conda/default/share/jupyter/kernels/python3/')
with open('/opt/conda/default/share/jupyter/kernels/python3/kernel.json', 'w') as f:
json.dump(python3_kernel, f)
# some old notebooks use the "Hail" kernel, so create that too
hail_kernel = {
**python3_kernel,
'display_name': 'Hail'
}
mkdir_if_not_exists('/opt/conda/default/share/jupyter/kernels/hail/')
with open('/opt/conda/default/share/jupyter/kernels/hail/kernel.json', 'w') as f:
json.dump(hail_kernel, f)
# create Jupyter configuration file
mkdir_if_not_exists('/opt/conda/default/etc/jupyter/')
with open('/opt/conda/default/etc/jupyter/jupyter_notebook_config.py', 'w') as f:
opts = [
'c.Application.log_level = "DEBUG"',
'c.NotebookApp.ip = "127.0.0.1"',
'c.NotebookApp.open_browser = False',
'c.NotebookApp.port = 8123',
'c.NotebookApp.token = ""',
'c.NotebookApp.contents_manager_class = "jgscm.GoogleStorageContentManager"'
]
f.write('\n'.join(opts) + '\n')
print('copying spark monitor')
spark_monitor_gs = 'gs://hail-common/sparkmonitor-3b2bc8c22921f5c920fc7370f3a160d820db1f51/sparkmonitor-0.0.11-py3-none-any.whl'
spark_monitor_wheel = '/home/hail/' + spark_monitor_gs.split('/')[-1]
safe_call('gsutil', 'cp', spark_monitor_gs, spark_monitor_wheel)
safe_call('pip', 'install', spark_monitor_wheel)
# setup jupyter-spark extension
safe_call('/opt/conda/default/bin/jupyter', 'serverextension', 'enable', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'install', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'enable', '--user', '--py', 'sparkmonitor')
safe_call('/opt/conda/default/bin/jupyter', 'nbextension', 'enable', '--user', '--py', 'widgetsnbextension')
safe_call("""ipython profile create && echo "c.InteractiveShellApp.extensions.append('sparkmonitor.kernelextension')" >> $(ipython profile locate default)/ipython_kernel_config.py""", shell=True)
# create systemd service file for Jupyter notebook server process
with open('/lib/systemd/system/jupyter.service', 'w') as f:
opts = [
'[Unit]',
'Description=Jupyter Notebook',
'After=hadoop-yarn-resourcemanager.service',
'[Service]',
'Type=simple',
'User=root',
'Group=root',
'WorkingDirectory=/home/hail/',
'ExecStart=/opt/conda/default/bin/python /opt/conda/default/bin/jupyter notebook --allow-root',
'Restart=always',
'RestartSec=1',
'[Install]',
'WantedBy=multi-user.target'
]
f.write('\n'.join(opts) + '\n')
# add Jupyter service to autorun and start it
safe_call('systemctl', 'daemon-reload')
safe_call('systemctl', 'enable', 'jupyter')
safe_call('service', 'jupyter', 'start')
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.datastore_v3_pb import *
import google.appengine.datastore.datastore_v3_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
class Request(ProtocolBuffer.ProtocolMessage):
has_service_name_ = 0
service_name_ = ""
has_method_ = 0
method_ = ""
has_request_ = 0
request_ = ""
has_request_id_ = 0
request_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def service_name(self): return self.service_name_
def set_service_name(self, x):
self.has_service_name_ = 1
self.service_name_ = x
def clear_service_name(self):
if self.has_service_name_:
self.has_service_name_ = 0
self.service_name_ = ""
def has_service_name(self): return self.has_service_name_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def request(self): return self.request_
def set_request(self, x):
self.has_request_ = 1
self.request_ = x
def clear_request(self):
if self.has_request_:
self.has_request_ = 0
self.request_ = ""
def has_request(self): return self.has_request_
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_service_name()): self.set_service_name(x.service_name())
if (x.has_method()): self.set_method(x.method())
if (x.has_request()): self.set_request(x.request())
if (x.has_request_id()): self.set_request_id(x.request_id())
def Equals(self, x):
if x is self: return 1
if self.has_service_name_ != x.has_service_name_: return 0
if self.has_service_name_ and self.service_name_ != x.service_name_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_request_ != x.has_request_: return 0
if self.has_request_ and self.request_ != x.request_: return 0
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_name not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_request_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_name_))
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_service_name_):
n += 1
n += self.lengthString(len(self.service_name_))
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_request_):
n += 1
n += self.lengthString(len(self.request_))
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
return n
def Clear(self):
self.clear_service_name()
self.clear_method()
self.clear_request()
self.clear_request_id()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
out.putVarInt32(26)
out.putPrefixedString(self.method_)
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def OutputPartial(self, out):
if (self.has_service_name_):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
if (self.has_method_):
out.putVarInt32(26)
out.putPrefixedString(self.method_)
if (self.has_request_):
out.putVarInt32(34)
out.putPrefixedString(self.request_)
if (self.has_request_id_):
out.putVarInt32(42)
out.putPrefixedString(self.request_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_service_name(d.getPrefixedString())
continue
if tt == 26:
self.set_method(d.getPrefixedString())
continue
if tt == 34:
self.set_request(d.getPrefixedString())
continue
if tt == 42:
self.set_request_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_name_: res+=prefix+("service_name: %s\n" % self.DebugFormatString(self.service_name_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_request_: res+=prefix+("request: %s\n" % self.DebugFormatString(self.request_))
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_name = 2
kmethod = 3
krequest = 4
krequest_id = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "service_name",
3: "method",
4: "request",
5: "request_id",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Request'
class ApplicationError(ProtocolBuffer.ProtocolMessage):
has_code_ = 0
code_ = 0
has_detail_ = 0
detail_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def detail(self): return self.detail_
def set_detail(self, x):
self.has_detail_ = 1
self.detail_ = x
def clear_detail(self):
if self.has_detail_:
self.has_detail_ = 0
self.detail_ = ""
def has_detail(self): return self.has_detail_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_detail()): self.set_detail(x.detail())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_detail_ != x.has_detail_: return 0
if self.has_detail_ and self.detail_ != x.detail_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
if (not self.has_detail_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: detail not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
n += self.lengthString(len(self.detail_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_detail_):
n += 1
n += self.lengthString(len(self.detail_))
return n
def Clear(self):
self.clear_code()
self.clear_detail()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_detail(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_detail_: res+=prefix+("detail: %s\n" % self.DebugFormatString(self.detail_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kdetail = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "detail",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.ApplicationError'
class Response(ProtocolBuffer.ProtocolMessage):
has_response_ = 0
response_ = ""
has_exception_ = 0
exception_ = ""
has_application_error_ = 0
application_error_ = None
has_java_exception_ = 0
java_exception_ = ""
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def response(self): return self.response_
def set_response(self, x):
self.has_response_ = 1
self.response_ = x
def clear_response(self):
if self.has_response_:
self.has_response_ = 0
self.response_ = ""
def has_response(self): return self.has_response_
def exception(self): return self.exception_
def set_exception(self, x):
self.has_exception_ = 1
self.exception_ = x
def clear_exception(self):
if self.has_exception_:
self.has_exception_ = 0
self.exception_ = ""
def has_exception(self): return self.has_exception_
def application_error(self):
if self.application_error_ is None:
self.lazy_init_lock_.acquire()
try:
if self.application_error_ is None: self.application_error_ = ApplicationError()
finally:
self.lazy_init_lock_.release()
return self.application_error_
def mutable_application_error(self): self.has_application_error_ = 1; return self.application_error()
def clear_application_error(self):
if self.has_application_error_:
self.has_application_error_ = 0;
if self.application_error_ is not None: self.application_error_.Clear()
def has_application_error(self): return self.has_application_error_
def java_exception(self): return self.java_exception_
def set_java_exception(self, x):
self.has_java_exception_ = 1
self.java_exception_ = x
def clear_java_exception(self):
if self.has_java_exception_:
self.has_java_exception_ = 0
self.java_exception_ = ""
def has_java_exception(self): return self.has_java_exception_
def MergeFrom(self, x):
assert x is not self
if (x.has_response()): self.set_response(x.response())
if (x.has_exception()): self.set_exception(x.exception())
if (x.has_application_error()): self.mutable_application_error().MergeFrom(x.application_error())
if (x.has_java_exception()): self.set_java_exception(x.java_exception())
def Equals(self, x):
if x is self: return 1
if self.has_response_ != x.has_response_: return 0
if self.has_response_ and self.response_ != x.response_: return 0
if self.has_exception_ != x.has_exception_: return 0
if self.has_exception_ and self.exception_ != x.exception_: return 0
if self.has_application_error_ != x.has_application_error_: return 0
if self.has_application_error_ and self.application_error_ != x.application_error_: return 0
if self.has_java_exception_ != x.has_java_exception_: return 0
if self.has_java_exception_ and self.java_exception_ != x.java_exception_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_application_error_ and not self.application_error_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSize())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSizePartial())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def Clear(self):
self.clear_response()
self.clear_exception()
self.clear_application_error()
self.clear_java_exception()
def OutputUnchecked(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSize())
self.application_error_.OutputUnchecked(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def OutputPartial(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSizePartial())
self.application_error_.OutputPartial(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_response(d.getPrefixedString())
continue
if tt == 18:
self.set_exception(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_application_error().TryMerge(tmp)
continue
if tt == 34:
self.set_java_exception(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_response_: res+=prefix+("response: %s\n" % self.DebugFormatString(self.response_))
if self.has_exception_: res+=prefix+("exception: %s\n" % self.DebugFormatString(self.exception_))
if self.has_application_error_:
res+=prefix+"application_error <\n"
res+=self.application_error_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_java_exception_: res+=prefix+("java_exception: %s\n" % self.DebugFormatString(self.java_exception_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresponse = 1
kexception = 2
kapplication_error = 3
kjava_exception = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "response",
2: "exception",
3: "application_error",
4: "java_exception",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Response'
class TransactionRequest_Precondition(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
has_hash_ = 0
hash_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def hash(self): return self.hash_
def set_hash(self, x):
self.has_hash_ = 1
self.hash_ = x
def clear_hash(self):
if self.has_hash_:
self.has_hash_ = 0
self.hash_ = ""
def has_hash(self): return self.has_hash_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_hash()): self.set_hash(x.hash())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_hash_ != x.has_hash_: return 0
if self.has_hash_ and self.hash_ != x.hash_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n
def Clear(self):
self.clear_key()
self.clear_hash()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 26:
self.set_hash(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_hash_: res+=prefix+("hash: %s\n" % self.DebugFormatString(self.hash_))
return res
class TransactionRequest(ProtocolBuffer.ProtocolMessage):
has_puts_ = 0
puts_ = None
has_deletes_ = 0
deletes_ = None
has_allow_multiple_eg_ = 0
allow_multiple_eg_ = 0
def __init__(self, contents=None):
self.precondition_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def precondition_size(self): return len(self.precondition_)
def precondition_list(self): return self.precondition_
def precondition(self, i):
return self.precondition_[i]
def mutable_precondition(self, i):
return self.precondition_[i]
def add_precondition(self):
x = TransactionRequest_Precondition()
self.precondition_.append(x)
return x
def clear_precondition(self):
self.precondition_ = []
def puts(self):
if self.puts_ is None:
self.lazy_init_lock_.acquire()
try:
if self.puts_ is None: self.puts_ = PutRequest()
finally:
self.lazy_init_lock_.release()
return self.puts_
def mutable_puts(self): self.has_puts_ = 1; return self.puts()
def clear_puts(self):
if self.has_puts_:
self.has_puts_ = 0;
if self.puts_ is not None: self.puts_.Clear()
def has_puts(self): return self.has_puts_
def deletes(self):
if self.deletes_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deletes_ is None: self.deletes_ = DeleteRequest()
finally:
self.lazy_init_lock_.release()
return self.deletes_
def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
def clear_deletes(self):
if self.has_deletes_:
self.has_deletes_ = 0;
if self.deletes_ is not None: self.deletes_.Clear()
def has_deletes(self): return self.has_deletes_
def allow_multiple_eg(self): return self.allow_multiple_eg_
def set_allow_multiple_eg(self, x):
self.has_allow_multiple_eg_ = 1
self.allow_multiple_eg_ = x
def clear_allow_multiple_eg(self):
if self.has_allow_multiple_eg_:
self.has_allow_multiple_eg_ = 0
self.allow_multiple_eg_ = 0
def has_allow_multiple_eg(self): return self.has_allow_multiple_eg_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.precondition_size()): self.add_precondition().CopyFrom(x.precondition(i))
if (x.has_puts()): self.mutable_puts().MergeFrom(x.puts())
if (x.has_deletes()): self.mutable_deletes().MergeFrom(x.deletes())
if (x.has_allow_multiple_eg()): self.set_allow_multiple_eg(x.allow_multiple_eg())
def Equals(self, x):
if x is self: return 1
if len(self.precondition_) != len(x.precondition_): return 0
for e1, e2 in zip(self.precondition_, x.precondition_):
if e1 != e2: return 0
if self.has_puts_ != x.has_puts_: return 0
if self.has_puts_ and self.puts_ != x.puts_: return 0
if self.has_deletes_ != x.has_deletes_: return 0
if self.has_deletes_ and self.deletes_ != x.deletes_: return 0
if self.has_allow_multiple_eg_ != x.has_allow_multiple_eg_: return 0
if self.has_allow_multiple_eg_ and self.allow_multiple_eg_ != x.allow_multiple_eg_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.precondition_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_puts_ and not self.puts_.IsInitialized(debug_strs)): initialized = 0
if (self.has_deletes_ and not self.deletes_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSize()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSize())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSize())
if (self.has_allow_multiple_eg_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSizePartial()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSizePartial())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSizePartial())
if (self.has_allow_multiple_eg_): n += 2
return n
def Clear(self):
self.clear_precondition()
self.clear_puts()
self.clear_deletes()
self.clear_allow_multiple_eg()
def OutputUnchecked(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputUnchecked(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSize())
self.puts_.OutputUnchecked(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSize())
self.deletes_.OutputUnchecked(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def OutputPartial(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputPartial(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSizePartial())
self.puts_.OutputPartial(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSizePartial())
self.deletes_.OutputPartial(out)
if (self.has_allow_multiple_eg_):
out.putVarInt32(48)
out.putBoolean(self.allow_multiple_eg_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_precondition().TryMerge(d)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_puts().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deletes().TryMerge(tmp)
continue
if tt == 48:
self.set_allow_multiple_eg(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.precondition_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Precondition%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_puts_:
res+=prefix+"puts <\n"
res+=self.puts_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_deletes_:
res+=prefix+"deletes <\n"
res+=self.deletes_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_allow_multiple_eg_: res+=prefix+("allow_multiple_eg: %s\n" % self.DebugFormatBool(self.allow_multiple_eg_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPreconditionGroup = 1
kPreconditionkey = 2
kPreconditionhash = 3
kputs = 4
kdeletes = 5
kallow_multiple_eg = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Precondition",
2: "key",
3: "hash",
4: "puts",
5: "deletes",
6: "allow_multiple_eg",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionRequest'
if _extension_runtime:
pass
__all__ = ['Request','ApplicationError','Response','TransactionRequest','TransactionRequest_Precondition']
| |
"""Config flow to configure Philips Hue."""
from __future__ import annotations
import asyncio
from typing import Any
from urllib.parse import urlparse
import aiohue
from aiohue.discovery import discover_nupnp, normalize_bridge_id
import async_timeout
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.components import ssdp
from homeassistant.const import CONF_HOST, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import aiohttp_client
from .bridge import authenticate_bridge
from .const import (
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
DEFAULT_ALLOW_HUE_GROUPS,
DEFAULT_ALLOW_UNREACHABLE,
DOMAIN,
LOGGER,
)
from .errors import AuthenticationRequired, CannotConnect
HUE_MANUFACTURERURL = ("http://www.philips.com", "http://www.philips-hue.com")
HUE_IGNORED_BRIDGE_NAMES = ["Home Assistant Bridge", "Espalexa"]
HUE_MANUAL_BRIDGE_ID = "manual"
class HueFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Hue config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return HueOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Hue flow."""
self.bridge: aiohue.Bridge | None = None
self.discovered_bridges: dict[str, aiohue.Bridge] | None = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# This is for backwards compatibility.
return await self.async_step_init(user_input)
@core.callback
def _async_get_bridge(self, host: str, bridge_id: str | None = None):
"""Return a bridge object."""
if bridge_id is not None:
bridge_id = normalize_bridge_id(bridge_id)
return aiohue.Bridge(
host,
websession=aiohttp_client.async_get_clientsession(self.hass),
bridge_id=bridge_id,
)
async def async_step_init(self, user_input=None):
"""Handle a flow start."""
# Check if user chooses manual entry
if user_input is not None and user_input["id"] == HUE_MANUAL_BRIDGE_ID:
return await self.async_step_manual()
if (
user_input is not None
and self.discovered_bridges is not None
and user_input["id"] in self.discovered_bridges
):
self.bridge = self.discovered_bridges[user_input["id"]]
await self.async_set_unique_id(self.bridge.id, raise_on_progress=False)
return await self.async_step_link()
# Find / discover bridges
try:
with async_timeout.timeout(5):
bridges = await discover_nupnp(
websession=aiohttp_client.async_get_clientsession(self.hass)
)
except asyncio.TimeoutError:
return self.async_abort(reason="discover_timeout")
if bridges:
# Find already configured hosts
already_configured = self._async_current_ids(False)
bridges = [
bridge for bridge in bridges if bridge.id not in already_configured
]
self.discovered_bridges = {bridge.id: bridge for bridge in bridges}
if not self.discovered_bridges:
return await self.async_step_manual()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required("id"): vol.In(
{
**{bridge.id: bridge.host for bridge in bridges},
HUE_MANUAL_BRIDGE_ID: "Manually add a Hue Bridge",
}
)
}
),
)
async def async_step_manual(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle manual bridge setup."""
if user_input is None:
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
)
self._async_abort_entries_match({"host": user_input["host"]})
self.bridge = self._async_get_bridge(user_input[CONF_HOST])
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Hue bridge.
Given a configured host, will ask the user to press the link button
to connect to the bridge.
"""
if user_input is None:
return self.async_show_form(step_id="link")
bridge = self.bridge
assert bridge is not None
errors = {}
try:
await authenticate_bridge(self.hass, bridge)
except AuthenticationRequired:
errors["base"] = "register_failed"
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", bridge.host)
return self.async_abort(reason="cannot_connect")
except Exception: # pylint: disable=broad-except
LOGGER.exception(
"Unknown error connecting with Hue bridge at %s", bridge.host
)
errors["base"] = "linking"
if errors:
return self.async_show_form(step_id="link", errors=errors)
# Can happen if we come from import or manual entry
if self.unique_id is None:
await self.async_set_unique_id(
normalize_bridge_id(bridge.id), raise_on_progress=False
)
return self.async_create_entry(
title=bridge.config.name,
data={CONF_HOST: bridge.host, CONF_USERNAME: bridge.username},
)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Hue bridge.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
# Filter out non-Hue bridges #1
if (
discovery_info.get(ssdp.ATTR_UPNP_MANUFACTURER_URL)
not in HUE_MANUFACTURERURL
):
return self.async_abort(reason="not_hue_bridge")
# Filter out non-Hue bridges #2
if any(
name in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "")
for name in HUE_IGNORED_BRIDGE_NAMES
):
return self.async_abort(reason="not_hue_bridge")
if (
ssdp.ATTR_SSDP_LOCATION not in discovery_info
or ssdp.ATTR_UPNP_SERIAL not in discovery_info
):
return self.async_abort(reason="not_hue_bridge")
host = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname
bridge = self._async_get_bridge(host, discovery_info[ssdp.ATTR_UPNP_SERIAL])
await self.async_set_unique_id(bridge.id)
self._abort_if_unique_id_configured(
updates={CONF_HOST: bridge.host}, reload_on_update=False
)
self.bridge = bridge
return await self.async_step_link()
async def async_step_homekit(self, discovery_info):
"""Handle a discovered Hue bridge on HomeKit.
The bridge ID communicated over HomeKit differs, so we cannot use that
as the unique identifier. Therefore, this method uses discovery without
a unique ID.
"""
self.bridge = self._async_get_bridge(discovery_info[CONF_HOST])
await self._async_handle_discovery_without_unique_id()
return await self.async_step_link()
async def async_step_import(self, import_info):
"""Import a new bridge as a config entry.
This flow is triggered by `async_setup` for both configured and
discovered bridges. Triggered for any bridge that does not have a
config entry yet (based on host).
This flow is also triggered by `async_step_discovery`.
"""
# Check if host exists, abort if so.
self._async_abort_entries_match({"host": import_info["host"]})
self.bridge = self._async_get_bridge(import_info["host"])
return await self.async_step_link()
class HueOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Hue options."""
def __init__(self, config_entry):
"""Initialize Hue options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage Hue options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_HUE_GROUPS,
default=self.config_entry.options.get(
CONF_ALLOW_HUE_GROUPS, DEFAULT_ALLOW_HUE_GROUPS
),
): bool,
vol.Optional(
CONF_ALLOW_UNREACHABLE,
default=self.config_entry.options.get(
CONF_ALLOW_UNREACHABLE, DEFAULT_ALLOW_UNREACHABLE
),
): bool,
}
),
)
| |
#!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import tempfile
import traceback
import unittest
import auto_push
from auto_push import LastReleaseBailout
import auto_roll
import common_includes
from common_includes import *
import create_release
from create_release import CreateRelease
import merge_to_branch
from merge_to_branch import *
import push_to_candidates
from push_to_candidates import *
import chromium_roll
from chromium_roll import ChromiumRoll
import releases
from releases import Releases
from auto_tag import AutoTag
TEST_CONFIG = {
"DEFAULT_CWD": None,
"BRANCHNAME": "test-prepare-push",
"CANDIDATESBRANCH": "test-candidates-push",
"PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-candidates-tempfile",
"CHANGELOG_ENTRY_FILE":
"/tmp/test-v8-push-to-candidates-tempfile-changelog-entry",
"PATCH_FILE": "/tmp/test-v8-push-to-candidates-tempfile-patch",
"COMMITMSG_FILE": "/tmp/test-v8-push-to-candidates-tempfile-commitmsg",
"CHROMIUM": "/tmp/test-v8-push-to-candidates-tempfile-chromium",
"SETTINGS_LOCATION": None,
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
}
AUTO_PUSH_ARGS = [
"-a", "author@chromium.org",
"-r", "reviewer@chromium.org",
]
class ToplevelTest(unittest.TestCase):
def testSortBranches(self):
S = releases.SortBranches
self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2])
self.assertEquals(["3.0", "2.25"], S(["2.25", "3.0", "2.24"])[0:2])
self.assertEquals(["3.11", "3.2"], S(["3.11", "3.2", "2.24"])[0:2])
def testFilterDuplicatesAndReverse(self):
F = releases.FilterDuplicatesAndReverse
self.assertEquals([], F([]))
self.assertEquals([["100", "10"]], F([["100", "10"]]))
self.assertEquals([["99", "9"], ["100", "10"]],
F([["100", "10"], ["99", "9"]]))
self.assertEquals([["98", "9"], ["100", "10"]],
F([["100", "10"], ["99", "9"], ["98", "9"]]))
self.assertEquals([["98", "9"], ["99", "10"]],
F([["100", "10"], ["99", "10"], ["98", "9"]]))
def testBuildRevisionRanges(self):
B = releases.BuildRevisionRanges
self.assertEquals({}, B([]))
self.assertEquals({"10": "100"}, B([["100", "10"]]))
self.assertEquals({"10": "100", "9": "99:99"},
B([["100", "10"], ["99", "9"]]))
self.assertEquals({"10": "100", "9": "97:99"},
B([["100", "10"], ["98", "9"], ["97", "9"]]))
self.assertEquals({"10": "100", "9": "99:99", "3": "91:98"},
B([["100", "10"], ["99", "9"], ["91", "3"]]))
self.assertEquals({"13": "101", "12": "100:100", "9": "94:97",
"3": "91:93, 98:99"},
B([["101", "13"], ["100", "12"], ["98", "3"],
["94", "9"], ["91", "3"]]))
def testMakeComment(self):
self.assertEquals("# Line 1\n# Line 2\n#",
MakeComment(" Line 1\n Line 2\n"))
self.assertEquals("#Line 1\n#Line 2",
MakeComment("Line 1\n Line 2"))
def testStripComments(self):
self.assertEquals(" Line 1\n Line 3\n",
StripComments(" Line 1\n# Line 2\n Line 3\n#\n"))
self.assertEquals("\nLine 2 ### Test\n #",
StripComments("###\n# \n\n# Line 1\nLine 2 ### Test\n #"))
def testMakeChangeLogBodySimple(self):
commits = [
["Title text 1",
"Title text 1\n\nBUG=\n",
"author1@chromium.org"],
["Title text 2.",
"Title text 2\n\nBUG=1234\n",
"author2@chromium.org"],
]
self.assertEquals(" Title text 1.\n"
" (author1@chromium.org)\n\n"
" Title text 2 (Chromium issue 1234).\n"
" (author2@chromium.org)\n\n",
MakeChangeLogBody(commits))
def testMakeChangeLogBodyEmpty(self):
self.assertEquals("", MakeChangeLogBody([]))
def testMakeChangeLogBodyAutoFormat(self):
commits = [
["Title text 1!",
"Title text 1\nLOG=y\nBUG=\n",
"author1@chromium.org"],
["Title text 2",
"Title text 2\n\nBUG=1234\n",
"author2@chromium.org"],
["Title text 3",
"Title text 3\n\nBUG=1234\nLOG = Yes\n",
"author3@chromium.org"],
["Title text 3",
"Title text 4\n\nBUG=1234\nLOG=\n",
"author4@chromium.org"],
]
self.assertEquals(" Title text 1.\n\n"
" Title text 3 (Chromium issue 1234).\n\n",
MakeChangeLogBody(commits, True))
def testRegressWrongLogEntryOnTrue(self):
body = """
Check elimination: Learn from if(CompareMap(x)) on true branch.
BUG=
R=verwaest@chromium.org
Committed: https://code.google.com/p/v8/source/detail?r=18210
"""
self.assertEquals("", MakeChangeLogBody([["title", body, "author"]], True))
def testMakeChangeLogBugReferenceEmpty(self):
self.assertEquals("", MakeChangeLogBugReference(""))
self.assertEquals("", MakeChangeLogBugReference("LOG="))
self.assertEquals("", MakeChangeLogBugReference(" BUG ="))
self.assertEquals("", MakeChangeLogBugReference("BUG=none\t"))
def testMakeChangeLogBugReferenceSimple(self):
self.assertEquals("(issue 987654)",
MakeChangeLogBugReference("BUG = v8:987654"))
self.assertEquals("(Chromium issue 987654)",
MakeChangeLogBugReference("BUG=987654 "))
def testMakeChangeLogBugReferenceFromBody(self):
self.assertEquals("(Chromium issue 1234567)",
MakeChangeLogBugReference("Title\n\nTBR=\nBUG=\n"
" BUG=\tchromium:1234567\t\n"
"R=somebody\n"))
def testMakeChangeLogBugReferenceMultiple(self):
# All issues should be sorted and grouped. Multiple references to the same
# issue should be filtered.
self.assertEquals("(issues 123, 234, Chromium issue 345)",
MakeChangeLogBugReference("Title\n\n"
"BUG=v8:234\n"
" BUG\t= 345, \tv8:234,\n"
"BUG=v8:123\n"
"R=somebody\n"))
self.assertEquals("(Chromium issues 123, 234)",
MakeChangeLogBugReference("Title\n\n"
"BUG=234,,chromium:123 \n"
"R=somebody\n"))
self.assertEquals("(Chromium issues 123, 234)",
MakeChangeLogBugReference("Title\n\n"
"BUG=chromium:234, , 123\n"
"R=somebody\n"))
self.assertEquals("(issues 345, 456)",
MakeChangeLogBugReference("Title\n\n"
"\t\tBUG=v8:345,v8:456\n"
"R=somebody\n"))
self.assertEquals("(issue 123, Chromium issues 345, 456)",
MakeChangeLogBugReference("Title\n\n"
"BUG=chromium:456\n"
"BUG = none\n"
"R=somebody\n"
"BUG=456,v8:123, 345"))
# TODO(machenbach): These test don't make much sense when the formatting is
# done later.
def testMakeChangeLogBugReferenceLong(self):
# -----------------00--------10--------20--------30--------
self.assertEquals("(issues 234, 1234567890, 1234567"
"8901234567890, Chromium issues 12345678,"
" 123456789)",
MakeChangeLogBugReference("BUG=v8:234\n"
"BUG=v8:1234567890\n"
"BUG=v8:12345678901234567890\n"
"BUG=123456789\n"
"BUG=12345678\n"))
# -----------------00--------10--------20--------30--------
self.assertEquals("(issues 234, 1234567890, 1234567"
"8901234567890, Chromium issues"
" 123456789, 1234567890)",
MakeChangeLogBugReference("BUG=v8:234\n"
"BUG=v8:12345678901234567890\n"
"BUG=v8:1234567890\n"
"BUG=123456789\n"
"BUG=1234567890\n"))
# -----------------00--------10--------20--------30--------
self.assertEquals("(Chromium issues 234, 1234567890"
", 12345678901234567, "
"1234567890123456789)",
MakeChangeLogBugReference("BUG=234\n"
"BUG=12345678901234567\n"
"BUG=1234567890123456789\n"
"BUG=1234567890\n"))
def Cmd(*args, **kwargs):
"""Convenience function returning a shell command test expectation."""
return {
"name": "command",
"args": args,
"ret": args[-1],
"cb": kwargs.get("cb"),
"cwd": kwargs.get("cwd", TEST_CONFIG["DEFAULT_CWD"]),
}
def RL(text, cb=None):
"""Convenience function returning a readline test expectation."""
return {
"name": "readline",
"args": [],
"ret": text,
"cb": cb,
"cwd": None,
}
def URL(*args, **kwargs):
"""Convenience function returning a readurl test expectation."""
return {
"name": "readurl",
"args": args[:-1],
"ret": args[-1],
"cb": kwargs.get("cb"),
"cwd": None,
}
class SimpleMock(object):
def __init__(self):
self._recipe = []
self._index = -1
def Expect(self, recipe):
self._recipe = recipe
def Call(self, name, *args, **kwargs): # pragma: no cover
self._index += 1
try:
expected_call = self._recipe[self._index]
except IndexError:
raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
if not isinstance(expected_call, dict):
raise NoRetryException("Found wrong expectation type for %s %s" %
(name, " ".join(args)))
if expected_call["name"] != name:
raise NoRetryException("Expected action: %s %s - Actual: %s" %
(expected_call["name"], expected_call["args"], name))
# Check if the given working directory matches the expected one.
if expected_call["cwd"] != kwargs.get("cwd"):
raise NoRetryException("Expected cwd: %s in %s %s - Actual: %s" %
(expected_call["cwd"],
expected_call["name"],
expected_call["args"],
kwargs.get("cwd")))
# The number of arguments in the expectation must match the actual
# arguments.
if len(args) > len(expected_call['args']):
raise NoRetryException("When calling %s with arguments, the "
"expectations must consist of at least as many arguments." %
name)
# Compare expected and actual arguments.
for (expected_arg, actual_arg) in zip(expected_call['args'], args):
if expected_arg != actual_arg:
raise NoRetryException("Expected: %s - Actual: %s" %
(expected_arg, actual_arg))
# The expected call contains an optional callback for checking the context
# at the time of the call.
if expected_call['cb']:
try:
expected_call['cb']()
except:
tb = traceback.format_exc()
raise NoRetryException("Caught exception from callback: %s" % tb)
# If the return value is an exception, raise it instead of returning.
if isinstance(expected_call['ret'], Exception):
raise expected_call['ret']
return expected_call['ret']
def AssertFinished(self): # pragma: no cover
if self._index < len(self._recipe) -1:
raise NoRetryException("Called mock too seldom: %d vs. %d" %
(self._index, len(self._recipe)))
class ScriptTest(unittest.TestCase):
def MakeEmptyTempFile(self):
handle, name = tempfile.mkstemp()
os.close(handle)
self._tmp_files.append(name)
return name
def MakeEmptyTempDirectory(self):
name = tempfile.mkdtemp()
self._tmp_files.append(name)
return name
def WriteFakeVersionFile(self, major=3, minor=22, build=4, patch=0):
version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
if not os.path.exists(os.path.dirname(version_file)):
os.makedirs(os.path.dirname(version_file))
with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
f.write("#define V8_MAJOR_VERSION %s\n" % major)
f.write("#define V8_MINOR_VERSION %s\n" % minor)
f.write("#define V8_BUILD_NUMBER %s\n" % build)
f.write("#define V8_PATCH_LEVEL %s\n" % patch)
f.write(" // Some line...\n")
f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
def MakeStep(self):
"""Convenience wrapper."""
options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
return MakeStep(step_class=Step, state=self._state,
config=TEST_CONFIG, side_effect_handler=self,
options=options)
def RunStep(self, script=PushToCandidates, step_class=Step, args=None):
"""Convenience wrapper."""
args = args if args is not None else ["-m"]
return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
def Call(self, fun, *args, **kwargs):
print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
print "%s %s" % (cmd, args)
print "in %s" % cwd
return self._mock.Call("command", cmd + " " + args, cwd=cwd)
def ReadLine(self):
return self._mock.Call("readline")
def ReadURL(self, url, params):
if params is not None:
return self._mock.Call("readurl", url, params)
else:
return self._mock.Call("readurl", url)
def Sleep(self, seconds):
pass
def GetDate(self):
return "1999-07-31"
def GetUTCStamp(self):
return "1000000"
def Expect(self, *args):
"""Convenience wrapper."""
self._mock.Expect(*args)
def setUp(self):
self._mock = SimpleMock()
self._tmp_files = []
self._state = {}
TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
def tearDown(self):
if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
# Clean up temps. Doesn't work automatically.
for name in self._tmp_files:
if os.path.isfile(name):
os.remove(name)
if os.path.isdir(name):
shutil.rmtree(name)
self._mock.AssertFinished()
def testGitMock(self):
self.Expect([Cmd("git --version", "git version 1.2.3"),
Cmd("git dummy", "")])
self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
self.assertEquals("", self.MakeStep().Git("dummy"))
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
os.environ["EDITOR"] = "vi"
self.Expect([
Cmd("which vi", "/usr/bin/vi"),
])
self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
def testTagTimeout(self):
self.Expect([
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/candidates", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/candidates", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/candidates", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\"Title\" origin/candidates", ""),
])
args = ["--branch", "candidates", "ab12345"]
self._state["version"] = "tag_name"
self._state["commit_title"] = "Title"
self.assertRaises(Exception,
lambda: self.RunStep(MergeToBranch, TagRevision, args))
def testReadAndPersistVersion(self):
self.WriteFakeVersionFile(build=5)
step = self.MakeStep()
step.ReadAndPersistVersion()
self.assertEquals("3", step["major"])
self.assertEquals("22", step["minor"])
self.assertEquals("5", step["build"])
self.assertEquals("0", step["patch"])
def testRegex(self):
self.assertEqual("(issue 321)",
re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
self.assertEqual("(Chromium issue 321)",
re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
cl = " too little\n\ttab\ttab\n too much\n trailing "
cl = MSub(r"\t", r" ", cl)
cl = MSub(r"^ {1,7}([^ ])", r" \1", cl)
cl = MSub(r"^ {9,80}([^ ])", r" \1", cl)
cl = MSub(r" +$", r"", cl)
self.assertEqual(" too little\n"
" tab tab\n"
" too much\n"
" trailing", cl)
self.assertEqual("//\n#define V8_BUILD_NUMBER 3\n",
MSub(r"(?<=#define V8_BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>3",
"//\n#define V8_BUILD_NUMBER 321\n"))
def testPreparePushRevision(self):
# Tests the default push hash used when the --revision option is not set.
self.Expect([
Cmd("git log -1 --format=%H HEAD", "push_hash")
])
self.RunStep(PushToCandidates, PreparePushRevision)
self.assertEquals("push_hash", self._state["push_hash"])
def testPrepareChangeLog(self):
self.WriteFakeVersionFile()
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
self.Expect([
Cmd("git log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
Cmd("git log -1 --format=%s rev1", "Title text 1"),
Cmd("git log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
Cmd("git log -1 --format=%an rev1", "author1@chromium.org"),
Cmd("git log -1 --format=%s rev2", "Title text 2."),
Cmd("git log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
Cmd("git log -1 --format=%an rev2", "author2@chromium.org"),
Cmd("git log -1 --format=%s rev3", "Title text 3"),
Cmd("git log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
Cmd("git log -1 --format=%an rev3", "author3@chromium.org"),
Cmd("git log -1 --format=%s rev4", "Title text 4"),
Cmd("git log -1 --format=%B rev4",
("Title\n\nBUG=456\nLOG=Y\n\n"
"Review URL: https://codereview.chromium.org/9876543210\n")),
URL("https://codereview.chromium.org/9876543210/description",
"Title\n\nBUG=456\nLOG=N\n\n"),
Cmd("git log -1 --format=%an rev4", "author4@chromium.org"),
])
self._state["last_push_master"] = "1234"
self._state["push_hash"] = "push_hash"
self._state["version"] = "3.22.5"
self.RunStep(PushToCandidates, PrepareChangeLog)
actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
expected_cl = """1999-07-31: Version 3.22.5
Title text 1.
Title text 3 (Chromium issue 321).
Performance and stability improvements on all platforms.
#
# The change log above is auto-generated. Please review if all relevant
# commit messages from the list below are included.
# All lines starting with # will be stripped.
#
# Title text 1.
# (author1@chromium.org)
#
# Title text 2 (Chromium issue 123).
# (author2@chromium.org)
#
# Title text 3 (Chromium issue 321).
# (author3@chromium.org)
#
# Title text 4 (Chromium issue 456).
# (author4@chromium.org)
#
#"""
self.assertEquals(expected_cl, actual_cl)
def testEditChangeLog(self):
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
TextToFile(" New \n\tLines \n", TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
os.environ["EDITOR"] = "vi"
self.Expect([
RL(""), # Open editor.
Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
])
self.RunStep(PushToCandidates, EditChangeLog)
self.assertEquals("New\n Lines",
FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
TAGS = """
4425.0
0.0.0.0
3.9.6
3.22.4
test_tag
"""
# Version as tag: 3.22.4.0. Version on master: 3.22.6.
# Make sure that the latest version is 3.22.6.0.
def testIncrementVersion(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
self.RunStep(PushToCandidates, IncrementVersion)
self.assertEquals("3", self._state["new_major"])
self.assertEquals("22", self._state["new_minor"])
self.assertEquals("7", self._state["new_build"])
self.assertEquals("0", self._state["new_patch"])
def _TestSquashCommits(self, change_log, expected_msg):
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
with open(TEST_CONFIG["CHANGELOG_ENTRY_FILE"], "w") as f:
f.write(change_log)
self.Expect([
Cmd("git diff origin/candidates hash1", "patch content"),
])
self._state["push_hash"] = "hash1"
self._state["date"] = "1999-11-11"
self.RunStep(PushToCandidates, SquashCommits)
self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
patch = FileToText(TEST_CONFIG["PATCH_FILE"])
self.assertTrue(re.search(r"patch content", patch))
def testSquashCommitsUnformatted(self):
change_log = """1999-11-11: Version 3.22.5
Log text 1.
Chromium issue 12345
Performance and stability improvements on all platforms.\n"""
commit_msg = """Version 3.22.5 (based on hash1)
Log text 1. Chromium issue 12345
Performance and stability improvements on all platforms."""
self._TestSquashCommits(change_log, commit_msg)
def testSquashCommitsFormatted(self):
change_log = """1999-11-11: Version 3.22.5
Long commit message that fills more than 80 characters (Chromium issue
12345).
Performance and stability improvements on all platforms.\n"""
commit_msg = """Version 3.22.5 (based on hash1)
Long commit message that fills more than 80 characters (Chromium issue 12345).
Performance and stability improvements on all platforms."""
self._TestSquashCommits(change_log, commit_msg)
def testSquashCommitsQuotationMarks(self):
change_log = """Line with "quotation marks".\n"""
commit_msg = """Line with "quotation marks"."""
self._TestSquashCommits(change_log, commit_msg)
def testBootstrapper(self):
work_dir = self.MakeEmptyTempDirectory()
class FakeScript(ScriptsBase):
def _Steps(self):
return []
# Use the test configuration without the fake testing default work dir.
fake_config = dict(TEST_CONFIG)
del(fake_config["DEFAULT_CWD"])
self.Expect([
Cmd("fetch v8", "", cwd=work_dir),
])
FakeScript(fake_config, self).Run(["--work-dir", work_dir])
def _PushToCandidates(self, force=False, manual=False):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
# The version file on master has build level 5, while the version
# file from candidates has build level 4.
self.WriteFakeVersionFile(build=5)
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
master_change_log = "2014-03-17: Sentinel\n"
TextToFile(master_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
os.environ["EDITOR"] = "vi"
commit_msg_squashed = """Version 3.22.5 (squashed - based on push_hash)
Log text 1 (issue 321).
Performance and stability improvements on all platforms."""
commit_msg = """Version 3.22.5 (based on push_hash)
Log text 1 (issue 321).
Performance and stability improvements on all platforms."""
def ResetChangeLog():
"""On 'git co -b new_branch origin/candidates',
and 'git checkout -- ChangeLog',
the ChangLog will be reset to its content on candidates."""
candidates_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
TextToFile(candidates_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
def ResetToCandidates():
ResetChangeLog()
self.WriteFakeVersionFile()
def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
self.assertTrue(
re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the candidates branch got correctly
# modified.
change_log = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
"""1999-07-31: Version 3.22.5
Log text 1 (issue 321).
Performance and stability improvements on all platforms.
1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n""",
change_log)
force_flag = " -f" if not manual else ""
expectations = []
if not force:
expectations.append(Cmd("which vi", "/usr/bin/vi"))
expectations += [
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd(("git new-branch %s --upstream origin/master" %
TEST_CONFIG["BRANCHNAME"]), ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
"Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
]
if manual:
expectations.append(RL("")) # Open editor.
if not force:
expectations.append(
Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""))
expectations += [
Cmd("git fetch", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git diff origin/candidates push_hash", "patch content\n"),
Cmd(("git new-branch %s --upstream origin/candidates" %
TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
Cmd("git checkout -f origin/candidates -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
Cmd("git commit -am \"%s\"" % commit_msg_squashed, ""),
]
if manual:
expectations.append(RL("Y")) # Sanity check.
expectations += [
Cmd("git cl land -f --bypass-hooks", ""),
Cmd("git checkout -f master", ""),
Cmd("git fetch", ""),
Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
Cmd(("git new-branch %s --upstream origin/candidates" %
TEST_CONFIG["CANDIDATESBRANCH"]), "", cb=ResetToCandidates),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git cl land -f --bypass-hooks", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
"\"Version 3.22.5 (based on push_hash)\""
" origin/candidates", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin 3.22.5", ""),
Cmd("git checkout -f some_branch", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "--revision", "push_hash"]
if force: args.append("-f")
if manual: args.append("-m")
else: args += ["-r", "reviewer@chromium.org"]
PushToCandidates(TEST_CONFIG, self).Run(args)
cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
# Note: The version file is on build number 5 again in the end of this test
# since the git command that merges to master is mocked out.
def testPushToCandidatesManual(self):
self._PushToCandidates(manual=True)
def testPushToCandidatesSemiAutomatic(self):
self._PushToCandidates()
def testPushToCandidatesForced(self):
self._PushToCandidates(force=True)
def testCreateRelease(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
# The version file on master has build level 5.
self.WriteFakeVersionFile(build=5)
master_change_log = "2014-03-17: Sentinel\n"
TextToFile(master_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
commit_msg = """Version 3.22.5
Log text 1 (issue 321).
Performance and stability improvements on all platforms."""
def ResetChangeLog():
last_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
TextToFile(last_change_log,
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
def CheckVersionCommit():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
self.assertTrue(
re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the candidates branch got correctly
# modified.
change_log = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
"""1999-07-31: Version 3.22.5
Log text 1 (issue 321).
Performance and stability improvements on all platforms.
1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n""",
change_log)
expectations = [
Cmd("git fetch origin "
"+refs/heads/*:refs/heads/* "
"+refs/pending/*:refs/pending/* "
"+refs/pending-tags/*:refs/pending-tags/*", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
Cmd("git reset --hard origin/master", ""),
Cmd("git checkout -b work-branch push_hash", ""),
Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git push origin "
"refs/heads/work-branch:refs/pending/heads/3.22.5 "
"push_hash:refs/pending-tags/heads/3.22.5 "
"push_hash:refs/heads/3.22.5", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
"\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin 3.22.5", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch", "* master\n work-branch\n"),
Cmd("git branch -D work-branch", ""),
Cmd("git gc", ""),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org",
"-r", "reviewer@chromium.org",
"--revision", "push_hash"]
CreateRelease(TEST_CONFIG, self).Run(args)
cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
# Note: The version file is on build number 5 again in the end of this test
# since the git command that merges to master is mocked out.
C_V8_22624_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
"""
C_V8_123455_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123455 123
"""
C_V8_123456_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
ROLL_COMMIT_MSG = """Update V8 to version 3.22.4 (based on abc).
Summary of changes available at:
https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
Please follow these instructions for assigning/CC'ing issues:
https://code.google.com/p/v8-wiki/wiki/TriagingIssues
TBR=g_name@chromium.org,reviewer@chromium.org"""
def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
chrome_dir = TEST_CONFIG["CHROMIUM"]
os.makedirs(os.path.join(chrome_dir, "v8"))
# Write fake deps file.
TextToFile("Some line\n \"v8_revision\": \"123444\",\n some line",
os.path.join(chrome_dir, "DEPS"))
def WriteDeps():
TextToFile("Some line\n \"v8_revision\": \"22624\",\n some line",
os.path.join(chrome_dir, "DEPS"))
expectations = [
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git log -1 --format=%s roll_hsh",
"Version 3.22.4 (based on abc)\n"),
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
"document.write('g_name')"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
Cmd("git new-branch v8-roll-roll_hsh", "", cwd=chrome_dir),
Cmd("roll-dep v8 roll_hsh", "rolled", cb=WriteDeps, cwd=chrome_dir),
Cmd(("git commit -am \"%s\" "
"--author \"author@chromium.org <author@chromium.org>\"" %
self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
cwd=chrome_dir),
]
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
"--sheriff",
"-r", "reviewer@chromium.org",
"--last-roll", "last_roll_hsh",
"roll_hsh"]
ChromiumRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
def testCheckLastPushRecently(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
"Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..abc123", "\n"),
])
self._state["candidate"] = "abc123"
self.assertEquals(0, self.RunStep(
auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
def testAutoPush(self):
self.Expect([
Cmd("git fetch", ""),
Cmd("git fetch origin +refs/heads/roll:refs/heads/roll", ""),
Cmd("git show-ref -s refs/heads/roll", "abc123\n"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
"Version 3.22.4 (based on abc3)\n"),
Cmd("git log --format=%H abc3..abc123", "some_stuff\n"),
])
auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
state = json.loads(FileToText("%s-state.json"
% TEST_CONFIG["PERSISTFILE_BASENAME"]))
self.assertEquals("abc123", state["candidate"])
def testAutoRollExistingRoll(self):
self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"},"
"{\"subject\": \"Update V8 to Version...\"}]}")),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
self.assertEquals(0, result)
# Snippet from the original DEPS file.
FAKE_DEPS = """
vars = {
"v8_revision": "abcd123455",
}
deps = {
"src/v8":
(Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
Var("v8_revision"),
}
"""
def testAutoRollUpToDate(self):
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git rev-list --max-age=740800 --tags",
"bad_tag\nhash_234\nhash_123"),
Cmd("git describe --tags bad_tag", ""),
Cmd("git describe --tags hash_234", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
Cmd("git describe --tags abcd123455", "3.22.4"),
Cmd("git describe --tags hash_234", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
self.assertEquals(0, result)
def testAutoRoll(self):
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git rev-list --max-age=740800 --tags",
"bad_tag\nhash_234\nhash_123"),
Cmd("git describe --tags bad_tag", ""),
Cmd("git describe --tags hash_234", "3.22.4"),
Cmd("git describe --tags hash_123", "3.22.3"),
Cmd("git describe --tags abcd123455", "3.22.3.1"),
Cmd("git describe --tags hash_234", "3.22.4"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
self.assertEquals(0, result)
def testMergeToBranch(self):
TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
self.WriteFakeVersionFile(build=5)
os.environ["EDITOR"] = "vi"
extra_patch = self.MakeEmptyTempFile()
def VerifyPatch(patch):
return lambda: self.assertEquals(patch,
FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
msg = """Version 3.22.5.1 (cherry-pick)
Merged ab12345
Merged ab23456
Merged ab34567
Merged ab45678
Merged ab56789
Title4
Title2
Title3
Title1
Revert "Something"
BUG=123,234,345,456,567,v8:123
LOG=N
"""
def VerifyLand():
commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
self.assertEquals(msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+1", version))
self.assertTrue(
re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
"--reverse origin/master"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"Port ab23456\" "
"--reverse origin/master"),
""),
Cmd(("git log --format=%H --grep=\"Port ab34567\" "
"--reverse origin/master"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
# Simulate git being down which stops the script.
Cmd("git log -1 --format=%s ab12345", None),
# Restart script in the failing step.
Cmd("git log -1 --format=%s ab12345", "Title4"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd("git log -1 --format=%s ab34567", "Title3"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
Cmd("git log -1 ab34567", "Title3\nLOG=n\nBUG=567, 456"),
Cmd("git log -1 ab45678", "Title1\nBUG="),
Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
Cmd("git log -1 -p ab12345", "patch4"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch4")),
Cmd("git log -1 -p ab23456", "patch2"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch2")),
Cmd("git log -1 -p ab34567", "patch3"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch3")),
Cmd("git log -1 -p ab45678", "patch1"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch1")),
Cmd("git log -1 -p ab56789", "patch5\n"),
Cmd(("git apply --index --reject \"%s\"" %
TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
"", cb=VerifyPatch("patch5\n")),
Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
RL("Y"), # Automatically increment patch level?
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
RL("reviewer@chromium.org"), # V8 reviewer.
Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
"--bypass-hooks --cc \"ulan@chromium.org\"", ""),
Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
RL("LGTM"), # Enter LGTM for V8 CL.
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
cb=VerifyLand),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
"\" refs/remotes/origin/candidates",
""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep=\""
"Version 3.22.5.1 (cherry-pick)"
"\" refs/remotes/origin/candidates",
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin 3.22.5.1", ""),
Cmd("git checkout -f some_branch", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
# ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
# MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
args = ["-f", "-p", extra_patch, "--branch", "candidates",
"ab12345", "ab23456", "ab34567"]
# The first run of the script stops because of git being down.
self.assertRaises(GitFailedException,
lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
# Test that state recovery after restarting the script works.
args += ["-s", "4"]
MergeToBranch(TEST_CONFIG, self).Run(args)
def testReleases(self):
c_hash1_commit_log = """Update V8 to Version 4.2.71.
Cr-Commit-Position: refs/heads/master@{#5678}
"""
c_hash2_commit_log = """Revert something.
BUG=12345
Reason:
> Some reason.
> Cr-Commit-Position: refs/heads/master@{#12345}
> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
Review URL: https://codereview.chromium.org/12345
Cr-Commit-Position: refs/heads/master@{#4567}
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
"""
c_hash3_commit_log = """Simple.
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
"""
c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick).
Merged abc12.
Review URL: fake.com
Cr-Commit-Position: refs/heads/candidates@{#234}
"""
c_hash_123_commit_log = """Version 3.3.1.0
git-svn-id: googlecode@123 0039-1c4b
"""
c_hash_345_commit_log = """Version 3.4.0.
Cr-Commit-Position: refs/heads/candidates@{#345}
"""
c_hash_456_commit_log = """Version 4.2.71.
Cr-Commit-Position: refs/heads/4.2.71@{#1}
"""
json_output = self.MakeEmptyTempFile()
csv_output = self.MakeEmptyTempFile()
self.WriteFakeVersionFile()
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
chrome_dir = TEST_CONFIG["CHROMIUM"]
chrome_v8_dir = os.path.join(chrome_dir, "v8")
os.makedirs(chrome_v8_dir)
def WriteDEPS(revision):
TextToFile("Line\n \"v8_revision\": \"%s\",\n line\n" % revision,
os.path.join(chrome_dir, "DEPS"))
WriteDEPS(567)
def ResetVersion(major, minor, build, patch=0):
return lambda: self.WriteFakeVersionFile(major=major,
minor=minor,
build=build,
patch=patch)
def ResetDEPS(revision):
return lambda: WriteDEPS(revision)
self.Expect([
Cmd("git status -s -uno", ""),
Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git rev-list --max-age=395200 --tags",
"bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
Cmd("git describe --tags hash_234", "3.3.1.1"),
Cmd("git describe --tags hash_123", "3.21.2"),
Cmd("git describe --tags hash_345", "3.22.3"),
Cmd("git describe --tags hash_456", "4.2.71"),
Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 3, 1, 1)),
Cmd("git branch -r --contains hash_234", " branch-heads/3.3\n"),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%s hash_234", ""),
Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
Cmd("git log -1 --format=%ci hash_234", "18:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 21, 2)),
Cmd("git branch -r --contains hash_123", " branch-heads/3.21\n"),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%s hash_123", ""),
Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
Cmd("git log -1 --format=%ci hash_123", "03:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 22, 3)),
Cmd("git branch -r --contains hash_345", " origin/candidates\n"),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%s hash_345", ""),
Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
Cmd("git log -1 --format=%ci hash_345", ""),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 22, 5)),
Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
cb=ResetVersion(4, 2, 71)),
Cmd("git branch -r --contains hash_456", " origin/4.2.71\n"),
Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
Cmd("git log -1 --format=%H hash_456^", "master_456"),
Cmd("git log -1 --format=%B master_456",
"Cr-Commit-Position: refs/heads/master@{#456}"),
Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
Cmd("git log -1 --format=%ci hash_456", "02:15"),
Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
cb=ResetVersion(3, 22, 5)),
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], "",
cwd=chrome_dir),
Cmd("git fetch origin", "", cwd=chrome_v8_dir),
Cmd("git log --format=%H --grep=\"V8\"",
"c_hash0\nc_hash1\nc_hash2\nc_hash3\n",
cwd=chrome_dir),
Cmd("git diff --name-only c_hash0 c_hash0^", "", cwd=chrome_dir),
Cmd("git diff --name-only c_hash1 c_hash1^", "DEPS", cwd=chrome_dir),
Cmd("git checkout -f c_hash1 -- DEPS", "",
cb=ResetDEPS("hash_456"),
cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
cwd=chrome_dir),
Cmd("git diff --name-only c_hash2 c_hash2^", "DEPS", cwd=chrome_dir),
Cmd("git checkout -f c_hash2 -- DEPS", "",
cb=ResetDEPS("hash_345"),
cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
cwd=chrome_dir),
Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS("deadbeef"),
cwd=chrome_dir),
Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
cwd=chrome_dir),
Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS("hash_567"),
cwd=chrome_dir),
Cmd("git branch -r", " weird/123\n branch-heads/7\n", cwd=chrome_dir),
Cmd("git checkout -f branch-heads/7 -- DEPS", "",
cb=ResetDEPS("hash_345"),
cwd=chrome_dir),
Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS("hash_567"),
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
Cmd("git checkout -f some_branch", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
args = ["-c", TEST_CONFIG["CHROMIUM"],
"--json", json_output,
"--csv", csv_output,
"--max-releases", "1"]
Releases(TEST_CONFIG, self).Run(args)
# Check expected output.
csv = ("4.2.71,4.2.71,1,5678,\r\n"
"3.22.3,candidates,345,4567:5677,\r\n"
"3.21.2,3.21,123,,\r\n"
"3.3.1.1,3.3,234,,abc12\r\n")
self.assertEquals(csv, FileToText(csv_output))
expected_json = [
{
"revision": "1",
"revision_git": "hash_456",
"master_position": "456",
"master_hash": "master_456",
"patches_merged": "",
"version": "4.2.71",
"chromium_revision": "5678",
"branch": "4.2.71",
"review_link": "",
"date": "02:15",
"chromium_branch": "",
# FIXME(machenbach): Fix revisions link for git.
"revision_link": "https://code.google.com/p/v8/source/detail?r=1",
},
{
"revision": "345",
"revision_git": "hash_345",
"master_position": "",
"master_hash": "",
"patches_merged": "",
"version": "3.22.3",
"chromium_revision": "4567:5677",
"branch": "candidates",
"review_link": "",
"date": "",
"chromium_branch": "7",
"revision_link": "https://code.google.com/p/v8/source/detail?r=345",
},
{
"revision": "123",
"revision_git": "hash_123",
"patches_merged": "",
"master_position": "",
"master_hash": "",
"version": "3.21.2",
"chromium_revision": "",
"branch": "3.21",
"review_link": "",
"date": "03:15",
"chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=123",
},
{
"revision": "234",
"revision_git": "hash_234",
"patches_merged": "abc12",
"master_position": "",
"master_hash": "",
"version": "3.3.1.1",
"chromium_revision": "",
"branch": "3.3",
"review_link": "fake.com",
"date": "18:15",
"chromium_branch": "",
"revision_link": "https://code.google.com/p/v8/source/detail?r=234",
},
]
self.assertEquals(expected_json, json.loads(FileToText(json_output)))
class SystemTest(unittest.TestCase):
def testReload(self):
options = ScriptsBase(
TEST_CONFIG, DEFAULT_SIDE_EFFECT_HANDLER, {}).MakeOptions([])
step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
options=options,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER)
body = step.Reload(
"""------------------------------------------------------------------------
r17997 | machenbach@chromium.org | 2013-11-22 11:04:04 +0100 (...) | 6 lines
Prepare push to trunk. Now working on version 3.23.11.
R=danno@chromium.org
Review URL: https://codereview.chromium.org/83173002
------------------------------------------------------------------------""")
self.assertEquals(
"""Prepare push to trunk. Now working on version 3.23.11.
R=danno@chromium.org
Committed: https://code.google.com/p/v8/source/detail?r=17997""", body)
| |
import os
import json
import sys
import subprocess
import logging
import argparse
from os.path import dirname, abspath, join
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), "../.."))
sys.path.insert(0, ROOT)
from tools.utils import run_cmd, delete_dir_files, mkdir, copy_file
def del_file(name):
""" Delete the file in RTOS/CMSIS/features directory of mbed-os
Args:
name - name of the file
"""
result = []
search_path = [join(ROOT, 'rtos'), join(ROOT, 'cmsis'), join(ROOT, 'features')]
for path in search_path:
for root, dirs, files in os.walk(path):
if name in files:
result.append(os.path.join(root, name))
for file in result:
os.remove(file)
rel_log.debug("Deleted: %s", os.path.relpath(file, ROOT))
def copy_folder(src, dest):
""" Copy contents of folder in mbed-os listed path
Args:
src - src folder path
dest - destination folder path
"""
files = os.listdir(src)
for file in files:
abs_src_file = os.path.join(src, file)
if os.path.isfile(abs_src_file):
abs_dst_file = os.path.join(dest, file)
mkdir(os.path.dirname(abs_dst_file))
copy_file(abs_src_file, abs_dst_file)
def run_cmd_with_output(command, exit_on_failure=False):
""" Passes a command to the system and returns a True/False result once the
command has been executed, indicating success/failure. If the command was
successful then the output from the command is returned to the caller.
Commands are passed as a list of tokens.
E.g. The command 'git remote -v' would be passed in as ['git', 'remote', '-v']
Args:
command - system command as a list of tokens
exit_on_failure - If True exit the program on failure (default = False)
Returns:
result - True/False indicating the success/failure of the command
output - The output of the command if it was successful, else empty string
"""
rel_log.debug('[Exec] %s', ' '.join(command))
returncode = 0
output = ""
try:
output = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
returncode = e.returncode
if exit_on_failure:
rel_log.error("The command %s failed with return code: %s",
(' '.join(command)), returncode)
sys.exit(1)
return returncode, output
def get_curr_sha(repo_path):
""" Gets the latest SHA for the specified repo
Args:
repo_path - path to the repository
Returns:
sha - last commit SHA
"""
cwd = os.getcwd()
os.chdir(abspath(repo_path))
cmd = "git log --pretty=format:%h -n 1"
_, sha = run_cmd_with_output(cmd, exit_on_failure=True)
os.chdir(cwd)
return sha
def branch_exists(name):
""" Check if branch already exists in mbed-os local repository.
It will not verify if branch is present in remote repository.
Args:
name - branch name
Returns:
True - If branch is already present
"""
cmd = "git branch"
_, output = run_cmd_with_output(cmd, exit_on_failure=False)
if name in output:
return True
return False
def branch_checkout(name):
"""
Checkout the required branch
Args:
name - branch name
"""
cmd = "git checkout " + name
run_cmd_with_output(cmd, exit_on_failure=False)
def get_last_cherry_pick_sha(branch):
"""
SHA of last cherry pick commit is returned. SHA should be added to all
cherry-pick commits with -x option.
Args:
branch - Hash to be verified.
Returns - SHA if found, else None
"""
cmd = "git checkout " + branch
run_cmd_with_output(cmd, exit_on_failure=False)
sha = None
get_commit = "git log -n 1"
_, output = run_cmd_with_output(get_commit, exit_on_failure=True)
lines = output.split('\n')
for line in lines:
if 'cherry picked from' in line:
sha = line.split(' ')[-1]
return sha[:-1]
return sha
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-l', '--log-level',
help="Level for providing logging output",
default='INFO')
parser.add_argument('-r', '--repo-path',
help="Git Repository to be imported",
default=None,
required=True)
parser.add_argument('-c', '--config-file',
help="Configuration file",
default=None,
required=True)
args = parser.parse_args()
level = getattr(logging, args.log_level.upper())
# Set logging level
logging.basicConfig(level=level)
rel_log = logging.getLogger("Importer")
if (args.repo_path is None) or (args.config_file is None):
rel_log.error("Repository path and config file required as input. Use \"--help\" for more info.")
exit(1)
json_file = os.path.abspath(args.config_file)
if not os.path.isfile(json_file):
rel_log.error("%s not found.", args.config_file)
exit(1)
repo = os.path.abspath(args.repo_path)
if not os.path.exists(repo):
rel_log.error("%s not found.", args.repo_path)
exit(1)
sha = get_curr_sha(repo)
if not sha:
rel_log.error("Could not obtain latest SHA")
exit(1)
rel_log.info("%s SHA = %s", os.path.basename(repo), sha)
branch = 'feature_' + os.path.basename(repo) + '_' + sha
commit_msg = "[" + os.path.basename(repo) + "]" + ": Updated to " + sha
# Read configuration data
with open(json_file, 'r') as config:
json_data = json.load(config)
'''
Check if branch exists already, in case branch is present
we will skip all file transfer and merge operations and will
jump to cherry-pick
'''
if branch_exists(branch):
rel_log.info("Branch present = %s", branch)
else:
data_files = json_data["files"]
data_folders = json_data["folders"]
## Remove all files listed in .json from mbed-os repo to avoid duplications
for file in data_files:
src_file = file['src_file']
del_file(os.path.basename(src_file))
for folder in data_folders:
dest_folder = folder['dest_folder']
delete_dir_files(dest_folder)
rel_log.debug("Deleted = %s", folder)
rel_log.info("Removed files/folders listed in json file")
## Copy all the CMSIS files listed in json file to mbed-os
for file in data_files:
repo_file = os.path.join(repo, file['src_file'])
mbed_path = os.path.join(ROOT, file['dest_file'])
mkdir(os.path.dirname(mbed_path))
copy_file(repo_file, mbed_path)
rel_log.debug("Copied = %s", mbed_path)
for folder in data_folders:
repo_folder = os.path.join(repo, folder['src_folder'])
mbed_path = os.path.join(ROOT, folder['dest_folder'])
copy_folder(repo_folder, mbed_path)
rel_log.debug("Copied = %s", mbed_path)
## Create new branch with all changes
create_branch = "git checkout -b "+ branch
run_cmd_with_output(create_branch, exit_on_failure=True)
rel_log.info("Branch created = %s", branch)
add_files = "git add -A"
run_cmd_with_output(add_files, exit_on_failure=True)
commit_branch = "git commit -m \"" + commit_msg + "\""
run_cmd_with_output(commit_branch, exit_on_failure=True)
rel_log.info("Commit added = %s", mbed_path)
## Checkout the feature branch
branch_checkout(branch)
commit_sha = json_data["commit_sha"]
last_sha = get_last_cherry_pick_sha(branch)
if not last_sha:
## Apply commits specific to mbed-os changes
for sha in commit_sha:
cherry_pick_sha = "git cherry-pick -x " + sha
run_cmd_with_output(cherry_pick_sha, exit_on_failure=True)
rel_log.info("Commit added = %s", cherry_pick_sha)
## Few commits are already applied, check the next in sequence
## and skip to last applied
else:
found = False
for sha in commit_sha:
if sha == last_sha:
found = True
continue
if found is True:
cherry_pick_sha = "git cherry-pick -x " + sha
run_cmd_with_output(cherry_pick_sha, exit_on_failure=True)
| |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GYP and GN
MB is a wrapper script for GYP and GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import pprint
import re
import shutil
import sys
import subprocess
import tempfile
def main(args):
mbw = MetaBuildWrapper()
mbw.ParseArgs(args)
return mbw.args.func()
class MetaBuildWrapper(object):
def __init__(self):
p = os.path
d = os.path.dirname
self.chromium_src_dir = p.normpath(d(d(d(p.abspath(__file__)))))
self.default_config = p.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.executable = sys.executable
self.platform = sys.platform
self.sep = os.sep
self.args = argparse.Namespace()
self.configs = {}
self.masters = {}
self.mixins = {}
self.private_configs = []
self.common_dev_configs = []
self.unsupported_configs = []
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-g', '--goma-dir', default=self.ExpandUser('~/goma'),
help='path to goma directory (default is %(default)s).')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def CmdAnalyze(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNAnalyze(vals)
elif vals['type'] == 'gyp':
return self.RunGYPAnalyze(vals)
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdGen(self):
vals = self.GetConfig()
self.ClobberIfNeeded(vals)
if vals['type'] == 'gn':
return self.RunGNGen(vals)
if vals['type'] == 'gyp':
return self.RunGYPGen(vals)
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdLookup(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
cmd = self.GNCmd('gen', '_path_', vals['gn_args'])
env = None
elif vals['type'] == 'gyp':
cmd, env = self.GYPCmd('_path_', vals)
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
self.PrintCmd(cmd, env)
return 0
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdValidate(self):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Figure out the whole list of configs and ensure that no config is
# listed in more than one category.
all_configs = {}
for config in self.common_dev_configs:
all_configs[config] = 'common_dev_configs'
for config in self.private_configs:
if config in all_configs:
errs.append('config "%s" listed in "private_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'private_configs'
for config in self.unsupported_configs:
if config in all_configs:
errs.append('config "%s" listed in "unsupported_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'unsupported_configs'
for master in self.masters:
for builder in self.masters[master]:
config = self.masters[master][builder]
if config in all_configs and all_configs[config] not in self.masters:
errs.append('Config "%s" used by a bot is also listed in "%s".' %
(config, all_configs[config]))
else:
all_configs[config] = master
# Check that every referenced config actually exists.
for config, loc in all_configs.items():
if not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
'\n ' + '\n '.join(errs))
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
self.ReadConfigFile()
config = self.ConfigFromArgs()
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
return self.FlattenConfig(config)
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.common_dev_configs = contents['common_dev_configs']
self.configs = contents['configs']
self.masters = contents['masters']
self.mixins = contents['mixins']
self.private_configs = contents['private_configs']
self.unsupported_configs = contents['unsupported_configs']
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
return self.masters[self.args.master][self.args.builder]
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = {
'type': None,
'gn_args': [],
'gyp_defines': '',
'gyp_crosscompile': False,
}
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
# TODO: check for cycles in mixins.
visited.append(m)
mixin_vals = self.mixins[m]
if 'type' in mixin_vals:
vals['type'] = mixin_vals['type']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'gyp_crosscompile' in mixin_vals:
vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
if 'gyp_defines' in mixin_vals:
if vals['gyp_defines']:
vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
else:
vals['gyp_defines'] = mixin_vals['gyp_defines']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def ClobberIfNeeded(self, vals):
path = self.args.path[0]
build_dir = self.ToAbsPath(path)
mb_type_path = self.PathJoin(build_dir, 'mb_type')
needs_clobber = False
new_mb_type = vals['type']
if self.Exists(build_dir):
if self.Exists(mb_type_path):
old_mb_type = self.ReadFile(mb_type_path)
if old_mb_type != new_mb_type:
self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
(old_mb_type, new_mb_type, path))
needs_clobber = True
else:
# There is no 'mb_type' file in the build directory, so this probably
# means that the prior build(s) were not done through mb, and we
# have no idea if this was a GYP build or a GN build. Clobber it
# to be safe.
self.Print("%s/mb_type missing, clobbering to be safe" % path)
needs_clobber = True
if self.args.dryrun:
return
if needs_clobber:
self.RemoveDirectory(build_dir)
self.MaybeMakeDirectory(build_dir)
self.WriteFile(mb_type_path, new_mb_type)
def RunGNGen(self, vals):
path = self.args.path[0]
cmd = self.GNCmd('gen', path, vals['gn_args'], extra_args=['--check'])
swarming_targets = []
if self.args.swarming_targets_file:
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use ninja_to_gn.pyl to convert
# the compile targets to the matching GN labels.
contents = self.ReadFile(self.args.swarming_targets_file)
swarming_targets = contents.splitlines()
gn_isolate_map = ast.literal_eval(self.ReadFile(self.PathJoin(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
gn_labels = []
for target in swarming_targets:
if not target in gn_isolate_map:
raise MBErr('test target "%s" not found in %s' %
(target, '//testing/buildbot/gn_isolate_map.pyl'))
gn_labels.append(gn_isolate_map[target]['label'])
gn_runtime_deps_path = self.ToAbsPath(path, 'runtime_deps')
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(path))
self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
if ret:
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
for target in swarming_targets:
if gn_isolate_map[target]['type'] == 'gpu_browser_test':
runtime_deps_target = 'browser_tests'
elif gn_isolate_map[target]['type'] == 'script':
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory.
label = gn_isolate_map[target]['label']
runtime_deps_target = 'obj/%s.stamp' % label.replace(':', '/')
else:
runtime_deps_target = target
if self.platform == 'win32':
deps_path = self.ToAbsPath(path,
runtime_deps_target + '.exe.runtime_deps')
else:
deps_path = self.ToAbsPath(path,
runtime_deps_target + '.runtime_deps')
if not self.Exists(deps_path):
raise MBErr('did not generate %s' % deps_path)
command, extra_files = self.GetIsolateCommand(target, vals,
gn_isolate_map)
runtime_deps = self.ReadFile(deps_path).splitlines()
isolate_path = self.ToAbsPath(path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s%s%s.isolated' % (path, self.sep, target)),
'--isolate',
self.ToSrcRelPath('%s%s%s.isolate' % (path, self.sep, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return ret
def GNCmd(self, subcommand, path, gn_args='', extra_args=None):
if self.platform == 'linux2':
subdir = 'linux64'
elif self.platform == 'darwin':
subdir = 'mac'
else:
subdir = 'win'
gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, 'gn')
cmd = [gn_path, subcommand, path]
gn_args = gn_args.replace("$(goma_dir)", self.args.goma_dir)
if gn_args:
cmd.append('--args=%s' % gn_args)
if extra_args:
cmd.extend(extra_args)
return cmd
def RunGYPGen(self, vals):
path = self.args.path[0]
output_dir = self.ParseGYPConfigPath(path)
cmd, env = self.GYPCmd(output_dir, vals)
ret, _, _ = self.Run(cmd, env=env)
return ret
def RunGYPAnalyze(self, vals):
output_dir = self.ParseGYPConfigPath(self.args.path[0])
if self.args.verbose:
inp = self.ReadInputJSON(['files', 'targets'])
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
cmd, env = self.GYPCmd(output_dir, vals)
cmd.extend(['-f', 'analyzer',
'-G', 'config_path=%s' % self.args.input_path[0],
'-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
ret, _, _ = self.Run(cmd, env=env)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(self.args.output_path[0]))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return ret
def GetIsolateCommand(self, target, vals, gn_isolate_map):
# This needs to mirror the settings in //build/config/ui.gni:
# use_x11 = is_linux && !use_ozone.
# TODO(dpranke): Figure out how to keep this in sync better.
use_x11 = (self.platform == 'linux2' and
not 'target_os="android"' in vals['gn_args'] and
not 'use_ozone=true' in vals['gn_args'])
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
executable_suffix = '.exe' if self.platform == 'win32' else ''
test_type = gn_isolate_map[target]['type']
cmdline = []
extra_files = []
if use_x11 and test_type == 'windowed_test_launcher':
extra_files = [
'xdisplaycheck',
'../../testing/test_env.py',
'../../testing/xvfb.py',
]
cmdline = [
'../../testing/xvfb.py',
'.',
'./' + str(target),
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'./' + str(target) + executable_suffix,
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type == 'gpu_browser_test':
extra_files = [
'../../testing/test_env.py'
]
gtest_filter = gn_isolate_map[target]['gtest_filter']
cmdline = [
'../../testing/test_env.py',
'./browser_tests' + executable_suffix,
'--test-launcher-bot-mode',
'--enable-gpu',
'--test-launcher-jobs=1',
'--gtest_filter=%s' % gtest_filter,
]
elif test_type == 'script':
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'../../' + self.ToSrcRelPath(gn_isolate_map[target]['script'])
] + gn_isolate_map[target].get('args', [])
elif test_type in ('raw'):
extra_files = []
cmdline = [
'./' + str(target) + executable_suffix,
] + gn_isolate_map[target].get('args')
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
# TODO: Support normal paths in addition to source-absolute paths.
assert(path.startswith('//'))
return path[2:].replace('/', self.sep)
def ParseGYPConfigPath(self, path):
rpath = self.ToSrcRelPath(path)
output_dir, _, _ = rpath.rpartition(self.sep)
return output_dir
def GYPCmd(self, output_dir, vals):
gyp_defines = vals['gyp_defines']
goma_dir = self.args.goma_dir
# GYP uses shlex.split() to split the gyp defines into separate arguments,
# so we can support backslashes and and spaces in arguments by quoting
# them, even on Windows, where this normally wouldn't work.
if '\\' in goma_dir or ' ' in goma_dir:
goma_dir = "'%s'" % goma_dir
gyp_defines = gyp_defines.replace("$(goma_dir)", goma_dir)
cmd = [
self.executable,
self.PathJoin('build', 'gyp_chromium'),
'-G',
'output_dir=' + output_dir,
]
# Ensure that we have an environment that only contains
# the exact values of the GYP variables we need.
env = os.environ.copy()
if 'GYP_CHROMIUM_NO_ACTION' in env:
del env['GYP_CHROMIUM_NO_ACTION']
if 'GYP_CROSSCOMPILE' in env:
del env['GYP_CROSSCOMPILE']
env['GYP_DEFINES'] = gyp_defines
if vals['gyp_crosscompile']:
env['GYP_CROSSCOMPILE'] = '1'
return cmd, env
def RunGNAnalyze(self, vals):
# analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals)
if ret:
return ret
inp = self.ReadInputJSON(['files', 'targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
# Bail out early if a GN file was modified, since 'gn refs' won't know
# what to do about it.
if any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']):
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# Bail out early if 'all' was asked for, since 'gn refs' won't recognize it.
if 'all' in inp['targets']:
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
return 0
ret = 0
response_file = self.TempFile()
response_file.write('\n'.join(inp['files']) + '\n')
response_file.close()
matching_targets = []
try:
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all', '--as=output']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
build_dir = self.ToSrcRelPath(self.args.path[0]) + self.sep
for output in out.splitlines():
build_output = output.replace(build_dir, '')
if build_output in inp['targets']:
matching_targets.append(build_output)
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
for label in out.splitlines():
build_target = label[2:]
# We want to accept 'chrome/android:chrome_public_apk' and
# just 'chrome_public_apk'. This may result in too many targets
# getting built, but we can adjust that later if need be.
for input_target in inp['targets']:
if (input_target == build_target or
build_target.endswith(':' + input_target)):
matching_targets.append(input_target)
finally:
self.RemoveFile(response_file.name)
if matching_targets:
# TODO: it could be that a target X might depend on a target Y
# and both would be listed in the input, but we would only need
# to specify target X as a build_target (whereas both X and Y are
# targets). I'm not sure if that optimization is generally worth it.
self.WriteJSON({'targets': sorted(set(matching_targets)),
'build_targets': sorted(set(matching_targets)),
'status': 'Found dependency'}, output_path)
else:
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
if self.args.verbose:
outp = json.loads(self.ReadFile(output_path))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd, env):
if self.platform == 'win32':
env_prefix = 'set '
env_quoter = QuoteForSet
shell_quoter = QuoteForCmd
else:
env_prefix = ''
env_quoter = pipes.quote
shell_quoter = pipes.quote
def print_env(var):
if env and var in env:
self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
print_env('GYP_CROSSCOMPILE')
print_env('GYP_DEFINES')
if cmd[0] == self.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[shell_quoter(arg) for arg in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
def Run(self, cmd, env=None, force_verbose=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd, env)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env)
if self.args.verbose or force_verbose:
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None):
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def PathJoin(self, *comps):
# This function largely exists so it can be overriden for testing.
return os.path.join(*comps)
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if self.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
# details of this next section, which handles escaping command lines
# so that they can be copied and pasted into a cmd window.
UNSAFE_FOR_SET = set('^<>&|')
UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
def QuoteForSet(arg):
if any(a in UNSAFE_FOR_SET for a in arg):
arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
return arg
def QuoteForCmd(arg):
# First, escape the arg so that CommandLineToArgvW will parse it properly.
# From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
if arg == '' or ' ' in arg or '"' in arg:
quote_re = re.compile(r'(\\*)"')
arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
# Then check to see if the arg contains any metacharacters other than
# double quotes; if it does, quote everything (including the double
# quotes) for safety.
if any(a in UNSAFE_FOR_CMD for a in arg):
arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
return arg
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except MBErr as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("interrupted, exiting", stream=sys.stderr)
sys.exit(130)
| |
import ast
import hashlib
import uuid
import arrow
from flask import current_app
from flask import request
from flask_login import AnonymousUserMixin
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from werkzeug.security import check_password_hash
from werkzeug.security import generate_password_hash
from .. import login_manager
class Permission(object):
"""
A specific permission task is given a bit position. Eight tasks are
avalible because there are eight bits in a byte.
"""
VIEW = int('00000001', 2)
EDIT = int('00000010', 2)
INSERT = int('00000100', 2)
DELETE = int('00001000', 2)
# TASK_TBD = int('00010000', 2)
# TASK_TBD = int('00100000', 2)
# TASK_TBD = int('01000000', 2)
ADMINISTER = int('10000000', 2) # 0xff
class Base(object):
"""docstring for Base Model"""
key = str(uuid.uuid4())
def __init__(self, *args, **kwargs):
self.key = str(uuid.uuid4())
super(Base, self).__init__(*args, **kwargs)
def save(self):
__tablename__ = self.__tablename__
count = current_app.redis.lpush(
__tablename__, vars(self))
if count:
return self
@classmethod
def get(cls, key, default=None):
__tablename__ = cls.__tablename__
table = current_app.redis.lrange(__tablename__, 0, -1)
if not table:
return default
print(len(table))
for record in table:
record = ast.literal_eval(record)
if record.get('key', None) and record['key'] == key:
obj = cls(**record)
obj.key = key
return obj
return default
class Role(Base):
__tablename__ = 'roles'
def __init__(self, name, default=False, *args, **kwargs):
super(Role, self).__init__()
self.name = name
self.default = default
self.permissions = {}
self.users = []
@staticmethod
def insert_roles():
"""Update or create all Roles."""
roles = {
'Sales': (Permission.VIEW |
Permission.INSERT, True),
'Manager': (Permission.VIEW |
Permission.EDIT |
Permission.INSERT, False),
'Administrator': (int('11111111', 2), False)
}
for r in roles:
role = Role.get(name=r)
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
role.save()
@staticmethod
def add_user(role_name, user):
role = Role.get(name=role_name)
role.users.append(user)
role.save
def __str__(self):
return '<Role %r>' % self.name
class User(UserMixin, Base):
__tablename__ = 'users'
def __init__(self, email, first_name, last_name,
role=None, *args, **kwargs):
super(User, self).__init__()
self.email = email.strip()
self.first_name = first_name.strip()
self.last_name = last_name.strip()
self.password_hash = kwargs.get('password_hash', None)
self.role = role
self.about_me = kwargs.get('about_me', '').strip()
self.avatar_hash = kwargs.get('avatar_hash', None)
self.authenticated = True
# if self.role is None:
# if self.email in current_app.config['DASHBOARD_ADMIN']:
# self.role = Role.get(permissions=0xff)
# if self.role is None:
# self.role = Role.get(default=True)
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@property
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
@staticmethod
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
@staticmethod
def is_active(self):
"""True, as all users are active."""
return True
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
"""
Generate a JSON Web Signature token with an expiration.
"""
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.key})
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.key})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except ValueError:
return False
if data.get('reset') != self.key:
return False
self.password = new_password
self.save()
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.key, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except ValueError:
return False
if data.get('change_email') != self.key:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.get(email=new_email) is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.save()
return True
@classmethod
def get_email(cls, email, default=None):
__tablename__ = cls.__tablename__
table = current_app.redis.lrange(__tablename__, 0, -1)
if not table:
return default
for record in table:
record = ast.literal_eval(record)
if record.get('email', None) and record['email'] == email:
obj = cls(**record)
obj.key = record.get('key')
return obj
return default
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def get_id(self):
return self.key
def ping(self):
self.last_seen = str(arrow.now('Africa/Lagos'))
self.save()
def gravatar(self, size=100, default='identicon', rating='g'):
# match the security of the client request
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def __str__(self):
return '<User %r>' % self.key
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
# Register AnonymousUser as the class assigned to 'current_user' when the user
# is not logged in. This will enable the app to call 'current_user.can()'
# without having to first check if the user is logged in
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
"""
Callback function required by Flask-Login that loads a User, given the
User identifier. Returns User object or None.
"""
return User.get(user_id)
| |
# -*- coding: utf-8 -*-
"""The source file generator for tool source files."""
import os
import textwrap
from yaldevtools.source_generators import interface
class ToolSourceFileGenerator(interface.SourceFileGenerator):
"""Tool source file generator."""
def _GenerateGetoptString(self, tool_options):
"""Generates a getopt string.
Args:
tool_options (list[tuple[str, str, st]])): tool options.
Returns:
str: getopt string.
"""
getopt_string_segments = []
for option, argument, _ in tool_options:
getopt_string = option
if argument:
getopt_string = '{0:s}:'.format(getopt_string)
getopt_string_segments.append(getopt_string)
return ''.join(getopt_string_segments)
def _GenerateGetoptSwitch(self, project_configuration, tool_options):
"""Generates a getopt switch.
Args:
project_configuration (ProjectConfiguration): project configuration.
tool_options (list[tuple[str, str, st]])): tool options.
Returns:
str: getopt switch.
"""
largest_argument_length = 0
for _, argument, _ in tool_options:
largest_argument_length = max(largest_argument_length, len(argument))
# TODO: move getopt_switch into templates.
getopt_switch = []
for option, argument, _ in tool_options:
if getopt_switch:
getopt_switch.append('')
if argument:
getopt_switch.extend([
'\t\t\tcase (system_integer_t) \'{0:s}\':'.format(option),
'\t\t\t\toption_{0:s} = optarg;'.format(argument),
'',
'\t\t\t\tbreak;'])
elif option == 'h':
getopt_switch.extend([
'\t\t\tcase (system_integer_t) \'{0:s}\':'.format(option),
'\t\t\t\tusage_fprint(',
'\t\t\t\t stdout );',
'',
'\t\t\t\treturn( EXIT_SUCCESS );'])
elif option == 'v':
getopt_switch.extend([
'\t\t\tcase (system_integer_t) \'{0:s}\':'.format(option),
'\t\t\t\tverbose = 1;',
'',
'\t\t\t\tbreak;'])
elif option == 'V':
getopt_switch.extend([
'\t\t\tcase (system_integer_t) \'{0:s}\':'.format(option),
'\t\t\t\t{0:s}_output_copyright_fprint('.format(
project_configuration.tools_directory),
'\t\t\t\t stdout );',
'',
'\t\t\t\treturn( EXIT_SUCCESS );'])
return '\n'.join(getopt_switch)
def _GenerateInfoHandleHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates an info handle header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'info_handle')
template_filename = os.path.join(template_directory, 'header.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
template_filename = os.path.join(template_directory, 'includes.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
template_mappings['info_tool_source_type'] = (
project_configuration.info_tool_source_type)
for template_name in (
'struct.h', 'initialize.h', 'free.h', 'signal_abort.h'):
template_filename = os.path.join(template_directory, template_name)
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
# TODO: add condition
template_filename = os.path.join(template_directory, 'set_ascii_codepage.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
for template_name in ('open.h', 'close.h'):
template_filename = os.path.join(template_directory, template_name)
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['info_tool_source_type']
template_filename = os.path.join(template_directory, 'footer.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
def _GenerateInfoHandleSourceFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates an info handle source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'info_handle')
template_filename = os.path.join(template_directory, 'header.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
template_filename = os.path.join(template_directory, 'includes.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
template_mappings['info_tool_source_type'] = (
project_configuration.info_tool_source_type)
for template_name in ('initialize.c', 'free.c', 'signal_abort.c'):
template_filename = os.path.join(template_directory, template_name)
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
# TODO: add condition
template_filename = os.path.join(template_directory, 'set_ascii_codepage.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
for template_name in ('open.c', 'close.c'):
template_filename = os.path.join(template_directory, template_name)
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['info_tool_source_type']
def _GenerateInfoTool(
self, project_configuration, template_mappings, output_writer):
"""Generates an info tool.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
"""
info_tool_name = '{0:s}info'.format(
project_configuration.library_name_suffix)
info_tool_filename = '{0:s}.c'.format(info_tool_name)
info_tool_filename = os.path.join(
project_configuration.tools_directory, info_tool_filename)
if os.path.exists(info_tool_filename):
output_filename = os.path.join(
project_configuration.tools_directory, 'info_handle.h')
self._GenerateInfoHandleHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'info_handle.c')
self._GenerateInfoHandleSourceFile(
project_configuration, template_mappings, output_writer,
output_filename)
self._GenerateInfoToolSourceFile(
project_configuration, template_mappings, info_tool_name,
output_writer, info_tool_filename)
def _GenerateInfoToolSourceFile(
self, project_configuration, template_mappings, info_tool_name,
output_writer, output_filename):
"""Generates an info tool source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
info_tool_name (str): name of the info tool.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalinfo')
info_tool_options = self._GetInfoToolOptions(
project_configuration, info_tool_name)
template_mappings['info_tool_name'] = info_tool_name
template_mappings['info_tool_source_description'] = (
project_configuration.info_tool_source_description)
template_mappings['info_tool_source_type'] = (
project_configuration.info_tool_source_type)
template_filename = os.path.join(template_directory, 'header.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
template_filename = os.path.join(template_directory, 'includes.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
self._GenerateInfoToolSourceUsageFunction(
project_configuration, template_mappings, info_tool_name,
info_tool_options, output_writer, output_filename)
template_filename = os.path.join(template_directory, 'signal_handler.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
self._GenerateInfoToolSourceMainFunction(
project_configuration, template_mappings, info_tool_name,
info_tool_options, output_writer, output_filename)
del template_mappings['info_tool_name']
del template_mappings['info_tool_source_description']
del template_mappings['info_tool_source_type']
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
def _GenerateInfoToolSourceMainFunction(
self, project_configuration, template_mappings, info_tool_name,
info_tool_options, output_writer, output_filename):
"""Generates an info tool source main function.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
info_tool_name (str): name of the info tool.
info_tool_options (list[tuple[str, str, st]])): info tool options.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalinfo')
variable_declarations = self._GenerateMainFunctionVariableDeclarations(
info_tool_options)
getopt_string = self._GenerateGetoptString(info_tool_options)
getopt_switch = self._GenerateGetoptSwitch(
project_configuration, info_tool_options)
template_mappings['info_tool_getopt_string'] = getopt_string
template_mappings['info_tool_options_switch'] = getopt_switch
template_mappings['info_tool_options_variable_declarations'] = (
variable_declarations)
template_filename = os.path.join(template_directory, 'main-start.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['info_tool_getopt_string']
del template_mappings['info_tool_options_switch']
del template_mappings['info_tool_options_variable_declarations']
# TODO: add condition
template_filename = os.path.join(
template_directory, 'main-option_codepage.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
template_filename = os.path.join(template_directory, 'main-end.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
def _GenerateInfoToolSourceUsageFunction(
self, project_configuration, template_mappings, info_tool_name,
info_tool_options, output_writer, output_filename):
"""Generates an info tool source usage function.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
info_tool_name (str): name of the info tool.
info_tool_options (list[tuple[str, str, st]])): info tool options.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalinfo')
alignment_padding = ' '
width = 80 - len(alignment_padding)
text_wrapper = textwrap.TextWrapper(width=width)
options_details = []
options_usage = []
options_without_arguments = []
for option, argument, description in info_tool_options:
description_lines = text_wrapper.wrap(description)
description_line = description_lines.pop(0)
details = '\tfprintf( stream, "\\t-{0:s}:{1:s}{2:s}\\n"'.format(
option, alignment_padding, description_line)
# TODO: determine indentation size
for description_line in description_lines:
options_details.append(details)
details = '\t "\\t {0:s}{1:s}\\n"'.format(
alignment_padding, description_line)
details = '{0:s} );'.format(details)
options_details.append(details)
if not argument:
options_without_arguments.append(option)
else:
usage = '[ -{0:s} {1:s} ]'.format(option, argument)
options_usage.append(usage)
usage = '[ -{0:s} ]'.format(''.join(options_without_arguments))
options_usage.append(usage)
if project_configuration.info_tool_source_type:
options_usage.append(project_configuration.info_tool_source_type)
usage = 'Usage: {0:s} '.format(info_tool_name)
usage_length = len(usage)
alignment_padding = ' ' * usage_length
options_usage = ' '.join(options_usage)
width = 80 - usage_length
text_wrapper = textwrap.TextWrapper(width=width)
usage_lines = text_wrapper.wrap(options_usage)
info_tool_usage = []
usage_line = usage_lines.pop(0)
usage = '\tfprintf( stream, "{0:s}{1:s}\\n"'.format(usage, usage_line)
for usage_line in usage_lines:
info_tool_usage.append(usage)
usage = '\t "{0:s}{1:s}\\n"'.format(
alignment_padding, usage_line)
usage = '{0:s}\\n" );'.format(usage[:-1])
info_tool_usage.append(usage)
template_mappings['info_tool_options'] = '\n'.join(options_details)
template_mappings['info_tool_usage'] = '\n'.join(info_tool_usage)
template_filename = os.path.join(template_directory, 'usage.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['info_tool_options']
del template_mappings['info_tool_usage']
def _GenerateMainFunctionVariableDeclarations(self, tool_options):
"""Generates the variable declarations of the main function.
Args:
tool_options (list[tuple[str, str, st]])): tool options.
Returns:
str: variable declarations.
"""
largest_argument_length = 0
for _, argument, _ in tool_options:
largest_argument_length = max(largest_argument_length, len(argument))
variable_declarations = []
for _, argument, _ in tool_options:
if argument:
alignment_padding = ' ' * (largest_argument_length - len(argument))
variable_declaration = (
'\tsystem_character_t *option_{0:s}{1:s} = NULL;').format(
argument, alignment_padding)
variable_declarations.append(variable_declaration)
return '\n'.join(sorted(variable_declarations))
def _GenerateMountDokanHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount dokan header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_dokan')
template_filename = os.path.join(template_directory, 'mount_dokan.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountDokanSourceFile(
self, project_configuration, template_mappings, mount_tool_name,
output_writer, output_filename):
"""Generates a mount dokan source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
mount_tool_name (str): name of the mount tool.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_dokan')
template_mappings['mount_tool_name'] = mount_tool_name
template_filename = os.path.join(template_directory, 'mount_dokan.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_name']
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountFileEntryHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount file entry header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'mount_file_entry')
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_description'] = (
mount_tool_file_entry_type.replace('_', ' '))
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
template_filename = os.path.join(template_directory, 'mount_file_entry.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_description']
del template_mappings['mount_tool_file_entry_type_name']
self._CorrectDescriptionSpelling(
project_configuration.mount_tool_file_entry_type, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountFileEntrySourceFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount file entry source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'mount_file_entry')
template_names = ['header.c', 'includes.c', 'initialize.c']
if not project_configuration.mount_tool_file_system_type:
template_names.extend(['free.c', 'get_parent_file_entry.c'])
else:
template_names.extend([
'free-file_system_type.c',
'get_parent_file_entry-file_system_type.c'])
file_entry_creation_time_type = (
project_configuration.mount_tool_file_entry_creation_time_type)
if not file_entry_creation_time_type:
template_name = 'get_creation_time-mounted_timestamp.c'
elif not project_configuration.mount_tool_file_system_type:
template_name = 'get_creation_time-{0:s}_and_mounted_timestamp.c'.format(
file_entry_creation_time_type)
else:
template_name = 'get_creation_time-{0:s}.c'.format(
file_entry_creation_time_type)
template_names.append(template_name)
file_entry_access_time_type = (
project_configuration.mount_tool_file_entry_access_time_type)
if not file_entry_access_time_type:
template_name = 'get_access_time-mounted_timestamp.c'
elif not project_configuration.mount_tool_file_system_type:
template_name = 'get_access_time-{0:s}_and_mounted_timestamp.c'.format(
file_entry_access_time_type)
else:
template_name = 'get_access_time-{0:s}.c'.format(
file_entry_access_time_type)
template_names.append(template_name)
file_entry_modification_time_type = (
project_configuration.mount_tool_file_entry_modification_time_type)
if not file_entry_modification_time_type:
template_name = 'get_modification_time-mounted_timestamp.c'
elif not project_configuration.mount_tool_file_system_type:
template_name = (
'get_modification_time-{0:s}_and_mounted_timestamp.c'.format(
file_entry_modification_time_type))
else:
template_name = 'get_modification_time-{0:s}.c'.format(
file_entry_modification_time_type)
template_names.append(template_name)
file_entry_inode_change_time_type = (
project_configuration.mount_tool_file_entry_inode_change_time_type)
if not file_entry_inode_change_time_type:
template_name = 'get_inode_change_time-mounted_timestamp.c'
elif not project_configuration.mount_tool_file_system_type:
template_name = (
'get_inode_change_time-{0:s}_and_mounted_timestamp.c'.format(
file_entry_inode_change_time_type))
else:
template_name = 'get_inode_change_time-{0:s}.c'.format(
file_entry_inode_change_time_type)
template_names.append(template_name)
if not project_configuration.mount_tool_file_system_type:
template_names.append('get_file_mode.c')
else:
template_names.append('get_file_mode-file_system_type.c')
template_names.append('get_name.c')
if not project_configuration.mount_tool_file_system_type:
template_names.append('get_sub_file_entries.c')
else:
template_names.append('get_sub_file_entries-file_system_type.c')
template_names.append('read_buffer_at_offset.c')
if not project_configuration.mount_tool_file_system_type:
template_names.append('get_size.c')
else:
template_names.append('get_size-file_system_type.c')
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
file_entry_access_time_value = (
project_configuration.mount_tool_file_entry_access_time_value)
template_mappings['mount_tool_file_entry_access_time_value'] = (
file_entry_access_time_value)
template_mappings['mount_tool_file_entry_access_time_value_description'] = (
file_entry_access_time_value.replace('_', ' '))
file_entry_creation_time_value = (
project_configuration.mount_tool_file_entry_creation_time_value)
template_mappings['mount_tool_file_entry_creation_time_value'] = (
file_entry_creation_time_value)
template_mappings[
'mount_tool_file_entry_creation_time_value_description'] = (
file_entry_creation_time_value.replace('_', ' '))
file_entry_inode_change_time_value = (
project_configuration.mount_tool_file_entry_inode_change_time_value)
template_mappings['mount_tool_file_entry_inode_change_time_value'] = (
file_entry_inode_change_time_value)
template_mappings[
'mount_tool_file_entry_inode_change_time_value_description'] = (
file_entry_inode_change_time_value.replace('_', ' '))
file_entry_modification_time_value = (
project_configuration.mount_tool_file_entry_modification_time_value)
template_mappings['mount_tool_file_entry_modification_time_value'] = (
file_entry_modification_time_value)
template_mappings[
'mount_tool_file_entry_modification_time_value_description'] = (
file_entry_modification_time_value.replace('_', ' '))
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_description'] = (
mount_tool_file_entry_type.replace('_', ' '))
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
mount_tool_file_entry_type_size_value = (
project_configuration.mount_tool_file_entry_type_size_value or '')
template_mappings['mount_tool_file_entry_type_size_value'] = (
mount_tool_file_entry_type_size_value)
template_mappings['mount_tool_file_entry_type_size_value_description'] = (
mount_tool_file_entry_type_size_value.replace('_', ' '))
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_file_entry_access_time_value']
del template_mappings['mount_tool_file_entry_access_time_value_description']
del template_mappings['mount_tool_file_entry_creation_time_value']
del template_mappings[
'mount_tool_file_entry_creation_time_value_description']
del template_mappings['mount_tool_file_entry_inode_change_time_value']
del template_mappings[
'mount_tool_file_entry_inode_change_time_value_description']
del template_mappings['mount_tool_file_entry_modification_time_value']
del template_mappings[
'mount_tool_file_entry_modification_time_value_description']
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_description']
del template_mappings['mount_tool_file_entry_type_name']
del template_mappings['mount_tool_file_entry_type_size_value']
del template_mappings['mount_tool_file_entry_type_size_value_description']
self._CorrectDescriptionSpelling(
project_configuration.mount_tool_file_entry_type, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
def _GenerateMountFileSystemHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount file system header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'mount_file_system')
template_names = ['header.h', 'includes-start.h']
if not project_configuration.mount_tool_file_system_type:
template_names.append('includes-file_entry_type_array.h')
else:
template_names.append('includes-file_system_type.h')
template_names.extend([
'includes-end.h', 'struct-start.h', 'struct-mounted_timestamp.h'])
if not project_configuration.mount_tool_file_system_type:
template_names.extend([
'struct-path_prefix.h', 'struct-file_entry_type_array.h'])
else:
template_names.append('struct-file_system_type.h')
template_names.extend([
'struct-end.h', 'initialize.h', 'free.h', 'signal_abort.h'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('set_path_prefix.h')
else:
template_names.extend([
'set_file_system_type.h', 'get_file_system_type.h'])
template_names.append('get_mounted_timestamp.h')
if not project_configuration.mount_tool_file_system_type:
template_names.extend([
'get_number_of_file_entry_types.h', 'get_file_entry_type_by_index.h'])
else:
template_names.append('get_file_entry_type_path_from_path.h')
template_names.append('get_file_entry_type_by_path.h')
if not project_configuration.mount_tool_file_system_type:
template_names.extend([
'append_file_entry_type.h', 'get_path_from_file_entry_index.h'])
else:
template_names.extend([
'get_filename_from_name.h', 'get_filename_from_file_entry_type.h'])
template_names.append('footer.h')
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_description'] = (
mount_tool_file_entry_type.replace('_', ' '))
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
file_system_type = project_configuration.mount_tool_file_system_type
if not file_system_type:
file_system_type = mount_tool_file_entry_type
template_mappings['mount_tool_file_system_type'] = file_system_type
template_mappings['mount_tool_file_system_type_description'] = (
file_system_type.replace('_', ' '))
template_mappings['mount_tool_file_system_type_name'] = (
'{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, file_system_type))
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_description']
del template_mappings['mount_tool_file_entry_type_name']
del template_mappings['mount_tool_file_system_type']
del template_mappings['mount_tool_file_system_type_description']
del template_mappings['mount_tool_file_system_type_name']
self._CorrectDescriptionSpelling(
project_configuration.mount_tool_file_entry_type, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountFileSystemSourceFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount file system source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(
self._template_directory, 'mount_file_system')
template_names = ['header.c', 'includes-start.c']
if not project_configuration.mount_tool_file_system_type:
template_names.append('includes-file_entry_type_array.c')
else:
template_names.append('includes-file_system_type.c')
template_names.extend(['includes-end.c', 'initialize-start.c'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('initialize-file_entry_type_array.c')
template_names.extend(['initialize-end.c', 'free-start.c'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('free-path_prefix.c')
template_names.append('free-file_entry_type_array.c')
template_names.append('free-end.c')
# TODO: add support for signal abort base type for libvslvm.
if not project_configuration.mount_tool_file_system_type:
template_names.extend(['signal_abort.c', 'set_path_prefix.c'])
else:
template_names.extend([
'signal_abort-file_system_type.c', 'set_file_system_type.c',
'get_file_system_type.c'])
template_names.append('get_mounted_timestamp.c')
if not project_configuration.mount_tool_file_system_type:
template_names.extend([
'get_number_of_file_entry_types.c', 'get_file_entry_type_by_index.c',
'get_file_entry_type_by_path.c', 'append_file_entry_type.c',
'get_path_from_file_entry_index.c'])
else:
template_names.extend([
'get_file_entry_type_path_from_path.c',
'get_file_entry_type_by_path-file_system_type.c',
'get_filename_from_name.c', 'get_filename_from_file_entry_type.c'])
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_description'] = (
mount_tool_file_entry_type.replace('_', ' '))
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
file_system_type = project_configuration.mount_tool_file_system_type
if not file_system_type:
file_system_type = mount_tool_file_entry_type
template_mappings['mount_tool_file_system_type'] = file_system_type
template_mappings['mount_tool_file_system_type_description'] = (
file_system_type.replace('_', ' '))
template_mappings['mount_tool_file_system_type_name'] = (
'{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, file_system_type))
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_description']
del template_mappings['mount_tool_file_entry_type_name']
del template_mappings['mount_tool_file_system_type']
del template_mappings['mount_tool_file_system_type_description']
del template_mappings['mount_tool_file_system_type_name']
self._CorrectDescriptionSpelling(
project_configuration.mount_tool_file_entry_type, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
self._VerticalAlignFunctionArguments(output_filename)
def _GenerateMountFuseHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount fuse header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_fuse')
template_filename = os.path.join(template_directory, 'mount_fuse.h')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountFuseSourceFile(
self, project_configuration, template_mappings, mount_tool_name,
output_writer, output_filename):
"""Generates a mount fuse source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
mount_tool_name (str): name of the mount tool.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_fuse')
template_mappings['mount_tool_name'] = mount_tool_name
template_filename = os.path.join(template_directory, 'mount_fuse.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_name']
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountHandleHeaderFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount handle header file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_handle')
template_names = ['header.h', 'includes-start.h']
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('includes-file_io_handle.h')
template_names.append('includes-end.h')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('definitions-format.h')
template_names.append('struct-start.h')
if project_configuration.HasMountToolsFeatureParent():
template_names.append('struct-basename.h')
template_names.append('struct-file_system.h')
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('struct-codepage.h')
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
template_names.append('struct-encrypted_root_plist.h')
# TODO: set option via configuation
if project_configuration.library_name == 'libfsapfs':
template_names.append('struct-file_system_index.h')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('struct-format.h')
if project_configuration.HasMountToolsFeatureKeys():
if project_configuration.library_name == 'libbde':
template_names.append('struct-keys-libbde.h')
else:
template_names.append('struct-keys.h')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('struct-offset.h')
if project_configuration.HasMountToolsFeaturePassword():
template_names.append('struct-password.h')
if project_configuration.HasMountToolsFeatureRecoveryPassword():
template_names.append('struct-recovery_password.h')
if project_configuration.HasMountToolsFeatureStartupKey():
template_names.append('struct-startup_key.h')
if project_configuration.HasMountToolsFeatureUnlock():
template_names.append('struct-is_locked.h')
template_names.append('struct-end.h')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('system_string_copy_from_64_bit_in_decimal.h')
template_names.extend(['initialize.h', 'free.h', 'signal_abort.h'])
if project_configuration.HasMountToolsFeatureParent():
template_names.append('set_basename.h')
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('set_codepage.h')
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
template_names.append('set_encrypted_root_plist.h')
# TODO: set option via configuation
if project_configuration.library_name == 'libfsapfs':
template_names.append('set_file_system_index.h')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('set_format.h')
if project_configuration.HasMountToolsFeatureKeys():
template_names.append('set_keys.h')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('set_offset.h')
if project_configuration.HasMountToolsFeaturePassword():
template_names.append('set_password.h')
if project_configuration.HasMountToolsFeatureRecoveryPassword():
template_names.append('set_recovery_password.h')
if project_configuration.HasMountToolsFeatureStartupKey():
template_names.append('set_startup_key.h')
if not project_configuration.mount_tool_file_system_type:
template_names.append('set_path_prefix.h')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('open-multi_source.h')
else:
template_names.append('open.h')
if project_configuration.HasMountToolsFeatureParent():
template_names.append('open_parent.h')
template_names.append('close.h')
if project_configuration.HasMountToolsFeatureUnlock():
template_names.append('is_locked.h')
template_names.extend(['get_file_entry_by_path.h', 'footer.h'])
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
template_mappings['mount_tool_source_type'] = (
project_configuration.mount_tool_source_type)
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_name']
del template_mappings['mount_tool_source_type']
self._SortIncludeHeaders(project_configuration, output_filename)
def _GenerateMountHandleSourceFile(
self, project_configuration, template_mappings, output_writer,
output_filename):
"""Generates a mount handle source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'mount_handle')
template_names = ['header.c', 'includes-start.c']
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('includes-codepage.c')
if project_configuration.HasMountToolsFeatureKeys():
if project_configuration.library_name == 'libbde':
template_names.append('includes-keys-libbde.c')
else:
template_names.append('includes-keys.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('includes-file_io_handle.c')
template_names.append('includes-end.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('file_io_handle.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('system_string_copy_from_64_bit_in_decimal.c')
template_names.append('initialize-start.c')
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('initialize-codepage.c')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('initialize-format.c')
template_names.extend(['initialize-end.c', 'free-start.c'])
if project_configuration.HasMountToolsFeatureParent():
template_names.append('free-basename.c')
template_names.append('free-file_system.c')
if project_configuration.HasMountToolsFeatureKeys():
template_names.append('free-keys.c')
template_names.extend(['free-end.c', 'signal_abort.c'])
if project_configuration.HasMountToolsFeatureParent():
template_names.append('set_basename.c')
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('set_codepage.c')
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
template_names.append('set_encrypted_root_plist.c')
# TODO: set option via configuation
if project_configuration.library_name == 'libfsapfs':
template_names.append('set_file_system_index.c')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('set_format.c')
if project_configuration.HasMountToolsFeatureKeys():
if project_configuration.library_name == 'libbde':
template_names.append('set_keys-libbde.c')
else:
template_names.append('set_keys.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('set_offset.c')
if not project_configuration.mount_tool_base_type:
if project_configuration.HasMountToolsFeaturePassword():
template_names.append('set_password.c')
if project_configuration.HasMountToolsFeatureRecoveryPassword():
template_names.append('set_recovery_password.c')
if project_configuration.HasMountToolsFeatureStartupKey():
template_names.append('set_startup_key.c')
if not project_configuration.mount_tool_file_system_type:
template_names.append('set_path_prefix.c')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('open-start-multi_source.c')
else:
template_names.append('open-start.c')
template_names.append('open-variables-start.c')
if project_configuration.HasMountToolsFeatureParent():
template_names.append('open-variables-basename.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('open-variables-glob.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('open-variables-file_io_handle.c')
if project_configuration.mount_tool_base_type:
if project_configuration.library_name == 'libfsapfs':
template_names.append('open-variables-file_system_index.c')
template_names.append('open-variables-end.c')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('open-check_arguments-multi_source.c')
else:
template_names.append('open-check_arguments.c')
if project_configuration.HasMountToolsFeatureParent():
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('open-basename-multi_source.c')
else:
template_names.append('open-basename.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('open-glob.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('open-offset.c')
template_names.append('open-initialize.c')
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
template_names.append('open-encrypted_root_plist.c')
if project_configuration.HasMountToolsFeatureKeys():
if project_configuration.library_name == 'libbde':
template_names.append('open-keys-libbde.c')
else:
template_names.append('open-keys.c')
if project_configuration.HasMountToolsFeaturePassword():
template_names.append('open-password.c')
if project_configuration.HasMountToolsFeatureRecoveryPassword():
template_names.append('open-recovery_password.c')
if project_configuration.HasMountToolsFeatureStartupKey():
template_names.append('open-startup_key.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('open-open-file_io_handle.c')
elif project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('open-open-multi_source.c')
else:
template_names.append('open-open.c')
if project_configuration.library_name == 'libfsapfs':
template_names.append('open-file_system_index.c')
if project_configuration.HasMountToolsFeatureUnlock():
template_names.append('open-is_locked.c')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('open-format.c')
if project_configuration.HasMountToolsFeatureParent():
template_names.append('open-open_parent.c')
if not project_configuration.mount_tool_file_system_type:
template_names.append('open-append_file_system_type.c')
else:
template_names.append('open-set_file_system_type.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('open-free-glob.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('open-set-file_io_handle.c')
template_names.append('open-on_error.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('open-on_error-glob.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('open-on_error-file_io_handle.c')
template_names.append('open-end.c')
if project_configuration.HasMountToolsFeatureParent():
template_names.append('open_parent.c')
template_names.extend(['close-start.c', 'close-variables-start.c'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('close-variables-no_file_system_type.c')
template_names.extend(['close-variables-end.c', 'close-check_arguments.c'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('close-close.c')
else:
template_names.append('close-close-file_system_type.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('close-file_io_handle.c')
if not project_configuration.mount_tool_file_system_type:
template_names.append('close-end.c')
else:
template_names.append('close-end-file_system_type.c')
if project_configuration.HasMountToolsFeatureUnlock():
template_names.append('is_locked.c')
template_names.extend([
'get_file_entry_by_path-start.c',
'get_file_entry_by_path-variables.c',
'get_file_entry_by_path-body.c',
'get_file_entry_by_path-file_entry_initialize.c'])
if not project_configuration.mount_tool_file_system_type:
template_names.append('get_file_entry_by_path-end.c')
else:
template_names.append('get_file_entry_by_path-end-file_system_type.c')
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
mount_tool_file_entry_type = (
project_configuration.mount_tool_file_entry_type or '')
base_type = project_configuration.mount_tool_base_type
if not base_type:
base_type = project_configuration.mount_tool_file_system_type
if not base_type:
base_type = mount_tool_file_entry_type
template_mappings['mount_tool_base_type'] = base_type
template_mappings['mount_tool_base_type_description'] = (
base_type.replace('_', ' '))
template_mappings['mount_tool_base_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, base_type)
template_mappings['mount_tool_file_entry_type'] = mount_tool_file_entry_type
template_mappings['mount_tool_file_entry_type_description'] = (
mount_tool_file_entry_type.replace('_', ' '))
template_mappings['mount_tool_file_entry_type_name'] = '{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, mount_tool_file_entry_type)
file_system_type = project_configuration.mount_tool_file_system_type
if not file_system_type:
file_system_type = mount_tool_file_entry_type
template_mappings['mount_tool_file_system_type'] = file_system_type
template_mappings['mount_tool_file_system_type_description'] = (
file_system_type.replace('_', ' '))
template_mappings['mount_tool_file_system_type_name'] = (
'{0:s}_{1:s}'.format(
project_configuration.library_name_suffix, file_system_type))
template_mappings['mount_tool_source_type'] = (
project_configuration.mount_tool_source_type)
template_mappings['mount_tool_source_type_description'] = (
project_configuration.mount_tool_source_type.replace('_', ' '))
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
del template_mappings['mount_tool_base_type']
del template_mappings['mount_tool_base_type_description']
del template_mappings['mount_tool_base_type_name']
del template_mappings['mount_tool_file_entry_type']
del template_mappings['mount_tool_file_entry_type_description']
del template_mappings['mount_tool_file_entry_type_name']
del template_mappings['mount_tool_file_system_type']
del template_mappings['mount_tool_file_system_type_description']
del template_mappings['mount_tool_file_system_type_name']
del template_mappings['mount_tool_source_type']
del template_mappings['mount_tool_source_type_description']
if base_type:
self._CorrectDescriptionSpelling(base_type, output_filename)
self._CorrectDescriptionSpelling(
project_configuration.mount_tool_file_entry_type, output_filename)
if file_system_type:
self._CorrectDescriptionSpelling(file_system_type, output_filename)
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
self._VerticalAlignFunctionArguments(output_filename)
def _GenerateMountTool(
self, project_configuration, template_mappings, output_writer):
"""Generates a mount tool.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
output_writer (OutputWriter): output writer.
"""
mount_tool_name = '{0:s}mount'.format(
project_configuration.library_name_suffix)
mount_tool_filename = '{0:s}.c'.format(mount_tool_name)
mount_tool_filename = os.path.join(
project_configuration.tools_directory, mount_tool_filename)
if os.path.exists(mount_tool_filename):
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_file_entry.h')
self._GenerateMountFileEntryHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_file_entry.c')
self._GenerateMountFileEntrySourceFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_file_system.h')
self._GenerateMountFileSystemHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_file_system.c')
self._GenerateMountFileSystemSourceFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_handle.h')
self._GenerateMountHandleHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_handle.c')
self._GenerateMountHandleSourceFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_dokan.h')
self._GenerateMountDokanHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_dokan.c')
self._GenerateMountDokanSourceFile(
project_configuration, template_mappings, mount_tool_name,
output_writer, output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_fuse.h')
self._GenerateMountFuseHeaderFile(
project_configuration, template_mappings, output_writer,
output_filename)
output_filename = os.path.join(
project_configuration.tools_directory, 'mount_fuse.c')
self._GenerateMountFuseSourceFile(
project_configuration, template_mappings, mount_tool_name,
output_writer, output_filename)
self._GenerateMountToolSourceFile(
project_configuration, template_mappings, mount_tool_name,
output_writer, mount_tool_filename)
def _GenerateMountToolSourceFile(
self, project_configuration, template_mappings, mount_tool_name,
output_writer, output_filename):
"""Generates a mount tool source file.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
mount_tool_name (str): name of the mount tool.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalmount')
template_names = ['header.c', 'includes-start.c']
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('includes-rlimit.c')
template_names.append('includes-yaltools.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('includes-glob.c')
template_names.append('includes-end.c')
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
mount_tool_options = self._GetMountToolOptions(
project_configuration, mount_tool_name)
template_mappings['mount_tool_name'] = mount_tool_name
template_mappings['mount_tool_path_prefix'] = (
project_configuration.mount_tool_path_prefix)
template_mappings['mount_tool_path_prefix_upper_case'] = (
project_configuration.mount_tool_path_prefix.upper())
template_mappings['mount_tool_source_description'] = (
project_configuration.mount_tool_source_description)
template_mappings['mount_tool_source_description_long'] = (
project_configuration.mount_tool_source_description_long)
template_mappings['mount_tool_source_type'] = (
project_configuration.mount_tool_source_type)
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename)
self._GenerateMountToolSourceUsageFunction(
project_configuration, template_mappings, mount_tool_name,
mount_tool_options, output_writer, output_filename)
template_filename = os.path.join(template_directory, 'signal_handler.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
self._GenerateMountToolSourceMainFunction(
project_configuration, template_mappings, mount_tool_name,
mount_tool_options, output_writer, output_filename)
del template_mappings['mount_tool_name']
del template_mappings['mount_tool_path_prefix']
del template_mappings['mount_tool_path_prefix_upper_case']
del template_mappings['mount_tool_source_description']
del template_mappings['mount_tool_source_description_long']
del template_mappings['mount_tool_source_type']
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
def _GenerateMountToolSourceMainFunction(
self, project_configuration, template_mappings, mount_tool_name,
mount_tool_options, output_writer, output_filename):
"""Generates a mount tool source main function.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
mount_tool_name (str): name of the mount tool.
mount_tool_options (list[tuple[str, str, st]])): mount tool options.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalmount')
file_system_type = project_configuration.mount_tool_file_system_type
variable_declarations = self._GenerateMainFunctionVariableDeclarations(
mount_tool_options)
getopt_string = self._GenerateGetoptString(mount_tool_options)
getopt_switch = self._GenerateGetoptSwitch(
project_configuration, mount_tool_options)
template_names = ['main-start.c']
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('main-variables-rlimit.c')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('main-variables-multi_source.c')
else:
template_names.append('main-variables.c')
if not file_system_type:
template_names.append('main-variables-path_prefix.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('main-variables-glob.c')
template_names.append('main-locale.c')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('main-getopt-multi_source.c')
else:
template_names.append('main-getopt.c')
template_names.append('main-verbose.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('main-initialize-glob.c')
template_names.append('main-initialize.c')
if project_configuration.HasMountToolsFeatureCodepage():
template_names.append('main-option_codepage.c')
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
template_names.append('main-option_encrypted_root_plist.c')
if project_configuration.library_name == 'libfsapfs':
template_names.append('main-option_file_system_index.c')
if project_configuration.HasMountToolsFeatureKeys():
template_names.append('main-option_keys.c')
if project_configuration.HasMountToolsFeatureOffset():
template_names.append('main-option_offset.c')
if project_configuration.HasMountToolsFeaturePassword():
template_names.append('main-option_password.c')
if project_configuration.HasMountToolsFeatureRecoveryPassword():
template_names.append('main-option_recovery_password.c')
if project_configuration.HasMountToolsFeatureStartupKey():
template_names.append('main-option_startup_key.c')
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
template_names.append('main-set_maximum_number_of_open_handles.c')
if not file_system_type:
template_names.append('main-set_path_prefix.c')
if project_configuration.HasMountToolsFeatureMultiSource():
template_names.append('main-open-multi_source.c')
else:
template_names.append('main-open.c')
if project_configuration.HasMountToolsFeatureUnlock():
template_names.append('main-is_locked.c')
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('main-glob_free.c')
template_names.extend([
'main-fuse.c', 'main-dokan.c', 'main-on_error.c'])
if project_configuration.HasMountToolsFeatureGlob():
template_names.append('main-on_error-glob.c')
template_names.append('main-end.c')
template_filenames = [
os.path.join(template_directory, template_name)
for template_name in template_names]
template_mappings['mount_tool_getopt_string'] = getopt_string
template_mappings['mount_tool_options_switch'] = getopt_switch
template_mappings['mount_tool_options_variable_declarations'] = (
variable_declarations)
self._GenerateSections(
template_filenames, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['mount_tool_getopt_string']
del template_mappings['mount_tool_options_switch']
del template_mappings['mount_tool_options_variable_declarations']
self._SortIncludeHeaders(project_configuration, output_filename)
self._SortVariableDeclarations(output_filename)
self._VerticalAlignFunctionArguments(output_filename)
def _GenerateMountToolSourceUsageFunction(
self, project_configuration, template_mappings, mount_tool_name,
mount_tool_options, output_writer, output_filename):
"""Generates a mount tool source usage function.
Args:
project_configuration (ProjectConfiguration): project configuration.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
mount_tool_name (str): name of the mount tool.
mount_tool_options (list[tuple[str, str, st]])): mount tool options.
output_writer (OutputWriter): output writer.
output_filename (str): path of the output file.
"""
template_directory = os.path.join(self._template_directory, 'yalmount')
alignment_padding = ' '
width = 80 - len(alignment_padding)
text_wrapper = textwrap.TextWrapper(width=width)
options_details = []
options_usage = []
options_without_arguments = []
for option, argument, description in mount_tool_options:
description_lines = text_wrapper.wrap(description)
description_line = description_lines.pop(0)
details = '\tfprintf( stream, "\\t-{0:s}:{1:s}{2:s}\\n"'.format(
option, alignment_padding, description_line)
for description_line in description_lines:
options_details.append(details)
details = '\t "\\t {0:s}{1:s}\\n"'.format(
alignment_padding, description_line)
details = '{0:s} );'.format(details)
options_details.append(details)
if not argument:
options_without_arguments.append(option)
else:
usage = '[ -{0:s} {1:s} ]'.format(option, argument)
options_usage.append(usage)
usage = '[ -{0:s} ]'.format(''.join(options_without_arguments))
options_usage.append(usage)
options_usage.extend([
project_configuration.mount_tool_source_type, 'mount_point'])
mount_tool_source_alignment = ' ' *(
len('mount_point') - len(project_configuration.mount_tool_source_type))
usage = 'Usage: {0:s} '.format(mount_tool_name)
usage_length = len(usage)
alignment_padding = ' ' * usage_length
options_usage = ' '.join(options_usage)
width = 80 - usage_length
text_wrapper = textwrap.TextWrapper(width=width)
usage_lines = text_wrapper.wrap(options_usage)
mount_tool_usage = []
usage_line = usage_lines.pop(0)
usage = '\tfprintf( stream, "{0:s}{1:s}\\n"'.format(usage, usage_line)
for usage_line in usage_lines:
mount_tool_usage.append(usage)
usage = '\t "{0:s}{1:s}\\n"'.format(
alignment_padding, usage_line)
usage = '{0:s}\\n" );'.format(usage[:-1])
mount_tool_usage.append(usage)
template_mappings['mount_tool_options'] = '\n'.join(options_details)
template_mappings['mount_tool_source_alignment'] = (
mount_tool_source_alignment)
template_mappings['mount_tool_usage'] = '\n'.join(mount_tool_usage)
template_filename = os.path.join(template_directory, 'usage.c')
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename,
access_mode='a')
del template_mappings['mount_tool_options']
del template_mappings['mount_tool_source_alignment']
del template_mappings['mount_tool_usage']
def _GetInfoToolOptions(self, project_configuration, info_tool_name):
"""Retrieves the info tool options.
Args:
project_configuration (ProjectConfiguration): project configuration.
info_tool_name (str): name of the info tool.
Returns:
list[tuple[str, str, str]]: info tool options.
"""
# TODO: sort options with lower case before upper case.
info_tool_options = []
# TODO: add condition
info_tool_options.append(
('c', 'codepage', (
'codepage of ASCII strings, options: ascii, windows-874, '
'windows-932, windows-936, windows-949, windows-950, '
'windows-1250, windows-1251, windows-1252 (default), '
'windows-1253, windows-1254, windows-1255, windows-1256, '
'windows-1257 or windows-1258 ')))
info_tool_options.extend([
('h', '', 'shows this help'),
('v', '', 'verbose output to stderr'),
('V', '', 'print version')])
return info_tool_options
def _GetMountToolOptions(self, project_configuration, mount_tool_name):
"""Retrieves the mount tool options.
Args:
project_configuration (ProjectConfiguration): project configuration.
mount_tool_name (str): name of the mount tool.
Returns:
list[tuple[str, str, str]]: mount tool options.
"""
# TODO: sort options with lower case before upper case.
mount_tool_options = []
if project_configuration.HasMountToolsFeatureCodepage():
option = ('c', 'codepage', (
'codepage of ASCII strings, options: ascii, windows-874, '
'windows-932, windows-936, windows-949, windows-950, windows-1250, '
'windows-1251, windows-1252 (default), windows-1253, windows-1254, '
'windows-1255, windows-1256, windows-1257 or windows-1258'))
mount_tool_options.append(option)
if project_configuration.HasMountToolsFeatureEncryptedRootPlist():
option = ('e', 'plist_path', (
'specify the path of the EncryptedRoot.plist.wipekey file'))
mount_tool_options.append(option)
# TODO: set option via configuation
if project_configuration.library_name == 'libfsapfs':
option = ('f', 'file_system_index', (
'specify a specific file system or \\"all\\"'))
mount_tool_options.append(option)
# TODO: set option via configuation
if project_configuration.library_name == 'libewf':
option = ('f', 'format', (
'specify the input format, options: raw (default), files (restricted '
'to logical volume files)'))
mount_tool_options.append(option)
mount_tool_options.append(('h', '', 'shows this help'))
if project_configuration.HasMountToolsFeatureKeys():
# TODO: set keys option description via configuation
if project_configuration.library_name == 'libbde':
option = ('k', 'keys', (
'specify the full volume encryption key and tweak key formatted in '
'base16 and separated by a : character e.g. FVEK:TWEAK'))
elif project_configuration.library_name == 'libfvde':
option = ('k', 'keys', (
'specify the volume master key formatted in base16'))
elif project_configuration.library_name in ('libluksde', 'libqcow'):
option = ('k', 'keys', 'specify the key formatted in base16')
mount_tool_options.append(option)
if project_configuration.HasMountToolsFeatureOffset():
option = ('o', 'offset', 'specify the {0:s} offset in bytes'.format(
project_configuration.mount_tool_source_type))
mount_tool_options.append(option)
if project_configuration.HasMountToolsFeaturePassword():
option = ('p', 'password', 'specify the password/passphrase')
mount_tool_options.append(option)
if project_configuration.HasMountToolsFeatureRecoveryPassword():
option = (
'r', 'recovery_password', 'specify the recovery password/passphrase')
mount_tool_options.append(option)
if project_configuration.HasMountToolsFeatureStartupKey():
option = ('s', 'startup_key_path', (
'specify the path of the file containing the startup key. Typically '
'this file has the extension .BEK'))
mount_tool_options.append(option)
mount_tool_options.extend([
('v', '', ('verbose output to stderr, while {0:s} will remain '
'running in the foreground').format(mount_tool_name)),
('V', '', 'print version'),
('X', 'extended_options', 'extended options to pass to sub system')])
return mount_tool_options
def Generate(self, project_configuration, output_writer):
"""Generates tools source files.
Args:
project_configuration (ProjectConfiguration): project configuration.
output_writer (OutputWriter): output writer.
"""
tools_path = os.path.join(
self._projects_directory, project_configuration.library_name,
project_configuration.tools_directory)
library_header = 'yaltools_{0:s}.h'.format(
project_configuration.library_name)
if not os.path.exists(tools_path):
return
template_mappings = self._GetTemplateMappings(
project_configuration,
authors_separator=',\n * ')
# TODO: add support for ouput.[ch]
for directory_entry in os.listdir(self._template_directory):
# Ignore yaltools_library.h in favor of yaltools_libyal.h
if directory_entry == library_header:
continue
template_filename = os.path.join(
self._template_directory, directory_entry)
if not os.path.isfile(template_filename):
continue
if directory_entry == 'yaltools_libyal.h':
output_filename = '{0:s}tools_{1:s}.h'.format(
project_configuration.library_name_suffix,
project_configuration.library_name)
else:
output_filename = '{0:s}_{1:s}'.format(
project_configuration.tools_directory, directory_entry[9:])
output_filename = os.path.join(
project_configuration.tools_directory, output_filename)
if not os.path.exists(output_filename):
continue
self._GenerateSection(
template_filename, template_mappings, output_writer, output_filename)
self._GenerateInfoTool(
project_configuration, template_mappings, output_writer)
self._GenerateMountTool(
project_configuration, template_mappings, output_writer)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class clusterinstance(base_resource) :
""" Configuration for cluster instance resource. """
def __init__(self) :
self._clid = 0
self._deadinterval = 0
self._hellointerval = 0
self._preemption = ""
self._quorumtype = ""
self._adminstate = ""
self._propstate = ""
self._operationalstate = ""
self._status = ""
self._rsskeymismatch = False
self._licensemismatch = False
self._jumbonotsupported = False
self._operationalpropstate = ""
self.___count = 0
@property
def clid(self) :
ur"""Unique number that identifies the cluster.<br/>Minimum length = 1<br/>Maximum length = 16.
"""
try :
return self._clid
except Exception as e:
raise e
@clid.setter
def clid(self, clid) :
ur"""Unique number that identifies the cluster.<br/>Minimum length = 1<br/>Maximum length = 16
"""
try :
self._clid = clid
except Exception as e:
raise e
@property
def deadinterval(self) :
ur"""Amount of time, in seconds, after which nodes that do not respond to the heartbeats are assumed to be down.<br/>Default value: 3<br/>Minimum length = 3<br/>Maximum length = 60.
"""
try :
return self._deadinterval
except Exception as e:
raise e
@deadinterval.setter
def deadinterval(self, deadinterval) :
ur"""Amount of time, in seconds, after which nodes that do not respond to the heartbeats are assumed to be down.<br/>Default value: 3<br/>Minimum length = 3<br/>Maximum length = 60
"""
try :
self._deadinterval = deadinterval
except Exception as e:
raise e
@property
def hellointerval(self) :
ur"""Interval, in milliseconds, at which heartbeats are sent to each cluster node to check the health status.<br/>Default value: 200<br/>Minimum length = 200<br/>Maximum length = 1000.
"""
try :
return self._hellointerval
except Exception as e:
raise e
@hellointerval.setter
def hellointerval(self, hellointerval) :
ur"""Interval, in milliseconds, at which heartbeats are sent to each cluster node to check the health status.<br/>Default value: 200<br/>Minimum length = 200<br/>Maximum length = 1000
"""
try :
self._hellointerval = hellointerval
except Exception as e:
raise e
@property
def preemption(self) :
ur"""Preempt a cluster node that is configured as a SPARE if an ACTIVE node becomes available.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._preemption
except Exception as e:
raise e
@preemption.setter
def preemption(self, preemption) :
ur"""Preempt a cluster node that is configured as a SPARE if an ACTIVE node becomes available.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._preemption = preemption
except Exception as e:
raise e
@property
def quorumtype(self) :
ur"""Quorum Configuration Choices - "Majority" (recommended) requires majority of nodes to be online for the cluster to be UP. "None" relaxes this requirement.<br/>Default value: MAJORITY<br/>Possible values = MAJORITY, NONE.
"""
try :
return self._quorumtype
except Exception as e:
raise e
@quorumtype.setter
def quorumtype(self, quorumtype) :
ur"""Quorum Configuration Choices - "Majority" (recommended) requires majority of nodes to be online for the cluster to be UP. "None" relaxes this requirement.<br/>Default value: MAJORITY<br/>Possible values = MAJORITY, NONE
"""
try :
self._quorumtype = quorumtype
except Exception as e:
raise e
@property
def adminstate(self) :
ur"""Cluster Admin State.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._adminstate
except Exception as e:
raise e
@property
def propstate(self) :
ur"""Enable/Disable the execution of commands on the cluster. This will not impact the execution of commands on individual cluster nodes by using the NSIP.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._propstate
except Exception as e:
raise e
@property
def operationalstate(self) :
ur"""Cluster Operational State.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._operationalstate
except Exception as e:
raise e
@property
def status(self) :
ur"""Cluster Operational State.<br/>Possible values = DOWN, UP, PARTIAL-UP, UNKNOWN.
"""
try :
return self._status
except Exception as e:
raise e
@property
def rsskeymismatch(self) :
ur"""This argument is used to determine if there is a RSS key mismatch at cluster instance level.
"""
try :
return self._rsskeymismatch
except Exception as e:
raise e
@property
def licensemismatch(self) :
ur"""This argument is used to determine if there is a License mismatch at cluster instance level.
"""
try :
return self._licensemismatch
except Exception as e:
raise e
@property
def jumbonotsupported(self) :
ur"""This argument is used to determine if Jumbo framework is not supported at cluster instance level.
"""
try :
return self._jumbonotsupported
except Exception as e:
raise e
@property
def operationalpropstate(self) :
ur"""Cluster Operational Propagation State.<br/>Default value: ENABLED<br/>Possible values = UNKNOWN, ENABLED, DISABLED, AUTO DISABLED.
"""
try :
return self._operationalpropstate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(clusterinstance_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.clusterinstance
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.clid is not None :
return str(self.clid)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add clusterinstance.
"""
try :
if type(resource) is not list :
addresource = clusterinstance()
addresource.clid = resource.clid
addresource.deadinterval = resource.deadinterval
addresource.hellointerval = resource.hellointerval
addresource.preemption = resource.preemption
addresource.quorumtype = resource.quorumtype
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].clid = resource[i].clid
addresources[i].deadinterval = resource[i].deadinterval
addresources[i].hellointerval = resource[i].hellointerval
addresources[i].preemption = resource[i].preemption
addresources[i].quorumtype = resource[i].quorumtype
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete clusterinstance.
"""
try :
if type(resource) is not list :
deleteresource = clusterinstance()
if type(resource) != type(deleteresource):
deleteresource.clid = resource
else :
deleteresource.clid = resource.clid
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].clid = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].clid = resource[i].clid
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update clusterinstance.
"""
try :
if type(resource) is not list :
updateresource = clusterinstance()
updateresource.clid = resource.clid
updateresource.deadinterval = resource.deadinterval
updateresource.hellointerval = resource.hellointerval
updateresource.preemption = resource.preemption
updateresource.quorumtype = resource.quorumtype
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].clid = resource[i].clid
updateresources[i].deadinterval = resource[i].deadinterval
updateresources[i].hellointerval = resource[i].hellointerval
updateresources[i].preemption = resource[i].preemption
updateresources[i].quorumtype = resource[i].quorumtype
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of clusterinstance resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = clusterinstance()
if type(resource) != type(unsetresource):
unsetresource.clid = resource
else :
unsetresource.clid = resource.clid
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].clid = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].clid = resource[i].clid
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def enable(cls, client, resource) :
ur""" Use this API to enable clusterinstance.
"""
try :
if type(resource) is not list :
enableresource = clusterinstance()
if type(resource) != type(enableresource):
enableresource.clid = resource
else :
enableresource.clid = resource.clid
return enableresource.perform_operation(client,"enable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
enableresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].clid = resource[i]
else :
if (resource and len(resource) > 0) :
enableresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
enableresources[i].clid = resource[i].clid
result = cls.perform_operation_bulk_request(client, enableresources,"enable")
return result
except Exception as e :
raise e
@classmethod
def disable(cls, client, resource) :
ur""" Use this API to disable clusterinstance.
"""
try :
if type(resource) is not list :
disableresource = clusterinstance()
if type(resource) != type(disableresource):
disableresource.clid = resource
else :
disableresource.clid = resource.clid
return disableresource.perform_operation(client,"disable")
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
disableresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].clid = resource[i]
else :
if (resource and len(resource) > 0) :
disableresources = [ clusterinstance() for _ in range(len(resource))]
for i in range(len(resource)) :
disableresources[i].clid = resource[i].clid
result = cls.perform_operation_bulk_request(client, disableresources,"disable")
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the clusterinstance resources that are configured on netscaler.
"""
try :
if not name :
obj = clusterinstance()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = clusterinstance()
obj.clid = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [clusterinstance() for _ in range(len(name))]
obj = [clusterinstance() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = clusterinstance()
obj[i].clid = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of clusterinstance resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusterinstance()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the clusterinstance resources configured on NetScaler.
"""
try :
obj = clusterinstance()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of clusterinstance resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = clusterinstance()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Operationalpropstate:
UNKNOWN = "UNKNOWN"
ENABLED = "ENABLED"
DISABLED = "DISABLED"
AUTO_DISABLED = "AUTO DISABLED"
class Propstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Status:
DOWN = "DOWN"
UP = "UP"
PARTIAL_UP = "PARTIAL-UP"
UNKNOWN = "UNKNOWN"
class Adminstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Preemption:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Operationalstate:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Quorumtype:
MAJORITY = "MAJORITY"
NONE = "NONE"
class clusterinstance_response(base_response) :
def __init__(self, length=1) :
self.clusterinstance = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.clusterinstance = [clusterinstance() for _ in range(length)]
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Pascal VOC database
This class loads ground truth notations from standard Pascal VOC XML data formats
and transform them into IMDB format. Selective search is used for proposals, see roidb
function. Results are written as the Pascal VOC format. Evaluation is based on mAP
criterion.
"""
import cPickle
import cv2
import os
import numpy as np
from ..logger import logger
from imdb import IMDB
from pascal_voc_eval import voc_eval
from ds_utils import unique_boxes, filter_small_boxes
class PascalVOC(IMDB):
def __init__(self, image_set, root_path, devkit_path):
"""
fill basic information to initialize imdb
:param image_set: 2007_trainval, 2007_test, etc
:param root_path: 'selective_search_data' and 'cache'
:param devkit_path: data and results
:return: imdb object
"""
year, image_set = image_set.split('_')
super(PascalVOC, self).__init__('voc_' + year, image_set, root_path, devkit_path) # set self.name
self.year = year
self.root_path = root_path
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'VOC' + year)
self.classes = ['__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
logger.info('%s num_images %d' % (self.name, self.num_images))
self.config = {'comp_id': 'comp4',
'use_diff': False,
'min_size': 2}
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
logger.info('%s gt roidb loaded from %s' % (self.name, cache_file))
return roidb
gt_roidb = [self.load_pascal_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
logger.info('%s wrote gt roidb to %s' % (self.name, cache_file))
return gt_roidb
def load_pascal_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import xml.etree.ElementTree as ET
roi_rec = dict()
roi_rec['image'] = self.image_path_from_index(index)
size = cv2.imread(roi_rec['image']).shape
roi_rec['height'] = size[0]
roi_rec['width'] = size[1]
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
class_to_index = dict(zip(self.classes, range(self.num_classes)))
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_to_index[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
roi_rec.update({'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
return roi_rec
def load_selective_search_roidb(self, gt_roidb):
"""
turn selective search proposals into selective search roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import scipy.io
matfile = os.path.join(self.root_path, 'selective_search_data', self.name + '.mat')
assert os.path.exists(matfile), 'selective search data does not exist: {}'.format(matfile)
raw_data = scipy.io.loadmat(matfile)['boxes'].ravel() # original was dict ['images', 'boxes']
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 # pascal voc dataset starts from 1.
keep = unique_boxes(boxes)
boxes = boxes[keep, :]
keep = filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self, gt_roidb, append_gt=False):
"""
get selective search roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of selective search
"""
cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
logger.info('%s ss roidb loaded from %s' % (self.name, cache_file))
return roidb
if append_gt:
logger.info('%s appending ground truth annotations' % self.name)
ss_roidb = self.load_selective_search_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self.load_selective_search_roidb(gt_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
logger.info('%s wrote ss roidb to %s' % (self.name, cache_file))
return roidb
def evaluate_detections(self, detections):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
# make all these folders for results
result_dir = os.path.join(self.devkit_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
self.do_python_eval()
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
:return: a string template
"""
res_file_folder = os.path.join(self.devkit_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
:param all_boxes: boxes to be processed [bbox, confidence]
:return: None
"""
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
logger.info('Writing %s VOC results file' % cls)
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(self):
"""
python evaluation wrapper
:return: None
"""
annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self.year) < 2010 else False
logger.info('VOC07 metric? ' + ('Y' if use_07_metric else 'No'))
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
logger.info('AP for {} = {:.4f}'.format(cls, ap))
logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))
| |
"""Unit tests for layout functions."""
import pytest
numpy = pytest.importorskip('numpy')
test_smoke_empty_graphscipy = pytest.importorskip('scipy')
import pytest
import networkx as nx
from networkx.testing import almost_equal
class TestLayout(object):
@classmethod
def setup_class(cls):
cls.Gi = nx.grid_2d_graph(5, 5)
cls.Gs = nx.Graph()
nx.add_path(cls.Gs, 'abcdef')
cls.bigG = nx.grid_2d_graph(25, 25) # > 500 nodes for sparse
@staticmethod
def collect_node_distances(positions):
distances = []
prev_val = None
for k in positions:
if prev_val is not None:
diff = positions[k] - prev_val
distances.append(numpy.dot(diff, diff) ** 0.5)
prev_val = positions[k]
return distances
def test_spring_fixed_without_pos(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0])
pos = {0: (1, 1), 2: (0, 0)}
pytest.raises(ValueError, nx.spring_layout, G, fixed=[0, 1], pos=pos)
nx.spring_layout(G, fixed=[0, 2], pos=pos) # No ValueError
def test_spring_init_pos(self):
# Tests GH #2448
import math
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (2, 3)])
init_pos = {0: (0.0, 0.0)}
fixed_pos = [0]
pos = nx.fruchterman_reingold_layout(G, pos=init_pos, fixed=fixed_pos)
has_nan = any(math.isnan(c) for coords in pos.values() for c in coords)
assert not has_nan, 'values should not be nan'
def test_smoke_empty_graph(self):
G = []
vpos = nx.random_layout(G)
vpos = nx.circular_layout(G)
vpos = nx.planar_layout(G)
vpos = nx.spring_layout(G)
vpos = nx.fruchterman_reingold_layout(G)
vpos = nx.spectral_layout(G)
vpos = nx.shell_layout(G)
vpos = nx.bipartite_layout(G, G)
vpos = nx.spiral_layout(G)
# FIXME vpos = nx.kamada_kawai_layout(G)
def test_smoke_int(self):
G = self.Gi
vpos = nx.random_layout(G)
vpos = nx.circular_layout(G)
vpos = nx.planar_layout(G)
vpos = nx.spring_layout(G)
vpos = nx.fruchterman_reingold_layout(G)
vpos = nx.fruchterman_reingold_layout(self.bigG)
vpos = nx.spectral_layout(G)
vpos = nx.spectral_layout(G.to_directed())
vpos = nx.spectral_layout(self.bigG)
vpos = nx.spectral_layout(self.bigG.to_directed())
vpos = nx.shell_layout(G)
vpos = nx.spiral_layout(G)
vpos = nx.kamada_kawai_layout(G)
vpos = nx.kamada_kawai_layout(G, dim=1)
def test_smoke_string(self):
G = self.Gs
vpos = nx.random_layout(G)
vpos = nx.circular_layout(G)
vpos = nx.planar_layout(G)
vpos = nx.spring_layout(G)
vpos = nx.fruchterman_reingold_layout(G)
vpos = nx.spectral_layout(G)
vpos = nx.shell_layout(G)
vpos = nx.spiral_layout(G)
vpos = nx.kamada_kawai_layout(G)
vpos = nx.kamada_kawai_layout(G, dim=1)
def check_scale_and_center(self, pos, scale, center):
center = numpy.array(center)
low = center - scale
hi = center + scale
vpos = numpy.array(list(pos.values()))
length = vpos.max(0) - vpos.min(0)
assert (length <= 2 * scale).all()
assert (vpos >= low).all()
assert (vpos <= hi).all()
def test_scale_and_center_arg(self):
sc = self.check_scale_and_center
c = (4, 5)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G, center=c), scale=0.5, center=(4.5, 5.5))
# rest can have 2*scale length: [-scale, scale]
sc(nx.spring_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spectral_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.circular_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.shell_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.spiral_layout(G, scale=2, center=c), scale=2, center=c)
sc(nx.kamada_kawai_layout(G, scale=2, center=c), scale=2, center=c)
def test_planar_layout_non_planar_input(self):
G = nx.complete_graph(9)
pytest.raises(nx.NetworkXException, nx.planar_layout, G)
def test_smoke_planar_layout_embedding_input(self):
embedding = nx.PlanarEmbedding()
embedding.set_data({0: [1, 2], 1: [0, 2], 2: [0, 1]})
nx.planar_layout(embedding)
def test_default_scale_and_center(self):
sc = self.check_scale_and_center
c = (0, 0)
G = nx.complete_graph(9)
G.add_node(9)
sc(nx.random_layout(G), scale=0.5, center=(0.5, 0.5))
sc(nx.spring_layout(G), scale=1, center=c)
sc(nx.spectral_layout(G), scale=1, center=c)
sc(nx.circular_layout(G), scale=1, center=c)
sc(nx.shell_layout(G), scale=1, center=c)
sc(nx.spiral_layout(G), scale=1, center=c)
sc(nx.kamada_kawai_layout(G), scale=1, center=c)
def test_circular_planar_and_shell_dim_error(self):
G = nx.path_graph(4)
pytest.raises(ValueError, nx.circular_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=1)
pytest.raises(ValueError, nx.shell_layout, G, dim=3)
pytest.raises(ValueError, nx.planar_layout, G, dim=1)
pytest.raises(ValueError, nx.planar_layout, G, dim=3)
def test_adjacency_interface_numpy(self):
A = nx.to_numpy_array(self.Gs)
pos = nx.drawing.layout._fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
def test_adjacency_interface_scipy(self):
A = nx.to_scipy_sparse_matrix(self.Gs, dtype='d')
pos = nx.drawing.layout._sparse_fruchterman_reingold(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_spectral(A)
assert pos.shape == (6, 2)
pos = nx.drawing.layout._sparse_fruchterman_reingold(A, dim=3)
assert pos.shape == (6, 3)
def test_single_nodes(self):
G = nx.path_graph(1)
vpos = nx.shell_layout(G)
assert not vpos[0].any()
G = nx.path_graph(4)
vpos = nx.shell_layout(G, [[0], [1, 2], [3]])
assert not vpos[0].any()
assert vpos[3].any() # ensure node 3 not at origin (#3188)
def test_smoke_initial_pos_fruchterman_reingold(self):
pos = nx.circular_layout(self.Gi)
npos = nx.fruchterman_reingold_layout(self.Gi, pos=pos)
def test_fixed_node_fruchterman_reingold(self):
# Dense version (numpy based)
pos = nx.circular_layout(self.Gi)
npos = nx.spring_layout(self.Gi, pos=pos, fixed=[(0, 0)])
assert tuple(pos[(0, 0)]) == tuple(npos[(0, 0)])
# Sparse version (scipy based)
pos = nx.circular_layout(self.bigG)
npos = nx.spring_layout(self.bigG, pos=pos, fixed=[(0, 0)])
for axis in range(2):
assert almost_equal(pos[(0, 0)][axis], npos[(0, 0)][axis])
def test_center_parameter(self):
G = nx.path_graph(1)
vpos = nx.random_layout(G, center=(1, 1))
vpos = nx.circular_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.planar_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spring_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spectral_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.shell_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
vpos = nx.spiral_layout(G, center=(1, 1))
assert tuple(vpos[0]) == (1, 1)
def test_center_wrong_dimensions(self):
G = nx.path_graph(1)
assert id(nx.spring_layout) == id(nx.fruchterman_reingold_layout)
pytest.raises(ValueError, nx.random_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.circular_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.planar_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spring_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spring_layout, G, dim=3, center=(1, 1))
pytest.raises(ValueError, nx.spectral_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spectral_layout, G, dim=3, center=(1, 1))
pytest.raises(ValueError, nx.shell_layout, G, center=(1, 1, 1))
pytest.raises(ValueError, nx.spiral_layout, G, center=(1, 1, 1))
def test_empty_graph(self):
G = nx.empty_graph()
vpos = nx.random_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.circular_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.planar_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.bipartite_layout(G, G)
assert vpos == {}
vpos = nx.spring_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.fruchterman_reingold_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.spectral_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.shell_layout(G, center=(1, 1))
assert vpos == {}
vpos = nx.spiral_layout(G, center=(1, 1))
assert vpos == {}
def test_bipartite_layout(self):
G = nx.complete_bipartite_graph(3, 5)
top, bottom = nx.bipartite.sets(G)
vpos = nx.bipartite_layout(G, top)
assert len(vpos) == len(G)
top_x = vpos[list(top)[0]][0]
bottom_x = vpos[list(bottom)[0]][0]
for node in top:
assert vpos[node][0] == top_x
for node in bottom:
assert vpos[node][0] == bottom_x
vpos = nx.bipartite_layout(G, top,
align='horizontal',
center=(2, 2),
scale=2,
aspect_ratio=1)
assert len(vpos) == len(G)
top_y = vpos[list(top)[0]][1]
bottom_y = vpos[list(bottom)[0]][1]
for node in top:
assert vpos[node][1] == top_y
for node in bottom:
assert vpos[node][1] == bottom_y
pytest.raises(ValueError, nx.bipartite_layout, G, top, align='foo')
def test_kamada_kawai_costfn_1d(self):
costfn = nx.drawing.layout._kamada_kawai_costfn
pos = numpy.array([4.0, 7.0])
invdist = 1 / numpy.array([[0.1, 2.0], [2.0, 0.3]])
cost, grad = costfn(pos, numpy, invdist, meanweight=0, dim=1)
assert almost_equal(cost, ((3 / 2.0 - 1) ** 2))
assert almost_equal(grad[0], -0.5)
assert almost_equal(grad[1], 0.5)
def test_kamada_kawai_costfn_2d(self):
costfn = nx.drawing.layout._kamada_kawai_costfn
pos = numpy.array([[1.3, -3.2],
[2.7, -0.3],
[5.1, 2.5]])
invdist = 1 / numpy.array([[0.1, 2.1, 1.7],
[2.1, 0.2, 0.6],
[1.7, 0.6, 0.3]])
meanwt = 0.3
cost, grad = costfn(pos.ravel(), numpy, invdist,
meanweight=meanwt, dim=2)
expected_cost = 0.5 * meanwt * numpy.sum(numpy.sum(pos, axis=0) ** 2)
for i in range(pos.shape[0]):
for j in range(i + 1, pos.shape[0]):
diff = numpy.linalg.norm(pos[i] - pos[j])
expected_cost += (diff * invdist[i][j] - 1.0) ** 2
assert almost_equal(cost, expected_cost)
dx = 1e-4
for nd in range(pos.shape[0]):
for dm in range(pos.shape[1]):
idx = nd * pos.shape[1] + dm
pos0 = pos.flatten()
pos0[idx] += dx
cplus = costfn(pos0, numpy, invdist,
meanweight=meanwt, dim=pos.shape[1])[0]
pos0[idx] -= 2 * dx
cminus = costfn(pos0, numpy, invdist,
meanweight=meanwt, dim=pos.shape[1])[0]
assert almost_equal(grad[idx], (cplus - cminus) / (2 * dx),
places=5)
def test_spiral_layout(self):
G = self.Gs
# a lower value of resolution should result in a more compact layout
# intuitively, the total distance from the start and end nodes
# via each node in between (transiting through each) will be less,
# assuming rescaling does not occur on the computed node positions
pos_standard = nx.spiral_layout(G, resolution=0.35)
pos_tighter = nx.spiral_layout(G, resolution=0.34)
distances = self.collect_node_distances(pos_standard)
distances_tighter = self.collect_node_distances(pos_tighter)
assert sum(distances) > sum(distances_tighter)
# return near-equidistant points after the first value if set to true
pos_equidistant = nx.spiral_layout(G, equidistant=True)
distances_equidistant = self.collect_node_distances(pos_equidistant)
for d in range(1, len(distances_equidistant) - 1):
# test similarity to two decimal places
assert almost_equal(
distances_equidistant[d],
distances_equidistant[d+1],
2
)
| |
import os
import os.path
import tempfile
import shutil
from nose.tools import eq_
from build_pack_utils import utils
from dingus import Dingus
from common.integration import ErrorHelper
from common.components import BuildPackAssertHelper
from common.components import HttpdAssertHelper
from common.components import PhpAssertHelper
from common.components import DownloadAssertHelper
from common.components import CodizyAssertHelper
from common.base import BaseCompileApp
codizy_extn = utils.load_extension('extensions/codizy')
class TestCodizyInstaller(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp(prefix='build-')
phpCfgDir = os.path.join(self.build_dir, 'php', 'etc')
os.makedirs(phpCfgDir)
shutil.copy('defaults/config/php/5.4.x/php.ini',
phpCfgDir)
shutil.copy('defaults/config/php/5.4.x/php-fpm.conf',
phpCfgDir)
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
def test_configure(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'PHP_EXTENSIONS': []
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy._configure()
eq_(True, 'xhprof' in ctx['PHP_EXTENSIONS'])
eq_(True, 'codizy' in ctx['PHP_EXTENSIONS'])
eq_(True, 'curl' in ctx['PHP_EXTENSIONS'])
eq_(True, 'gettext' in ctx['PHP_EXTENSIONS'])
eq_(True, 'mbstring' in ctx['PHP_EXTENSIONS'])
eq_(True, 'openssl' in ctx['PHP_EXTENSIONS'])
eq_(True, 'zlib' in ctx['PHP_EXTENSIONS'])
eq_(7, len(ctx['PHP_EXTENSIONS']))
eq_(True, 'ioncube' in ctx['ZEND_EXTENSIONS'])
eq_(1, len(ctx['ZEND_EXTENSIONS']))
def test_configure_doesnt_override_extns(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'PHP_EXTENSIONS': ['a', 'b'],
'ZEND_EXTENSIONS': ['opcache', 'xdebug']
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy._configure()
eq_(True, 'xhprof' in ctx['PHP_EXTENSIONS'])
eq_(True, 'codizy' in ctx['PHP_EXTENSIONS'])
eq_(True, 'curl' in ctx['PHP_EXTENSIONS'])
eq_(True, 'gettext' in ctx['PHP_EXTENSIONS'])
eq_(True, 'mbstring' in ctx['PHP_EXTENSIONS'])
eq_(True, 'openssl' in ctx['PHP_EXTENSIONS'])
eq_(True, 'zlib' in ctx['PHP_EXTENSIONS'])
eq_(True, 'a' in ctx['PHP_EXTENSIONS'])
eq_(True, 'b' in ctx['PHP_EXTENSIONS'])
eq_(9, len(ctx['PHP_EXTENSIONS']))
eq_(True, 'ioncube' in ctx['ZEND_EXTENSIONS'])
eq_('ioncube', ctx['ZEND_EXTENSIONS'][0])
eq_(3, len(ctx['ZEND_EXTENSIONS']))
def test_should_not_compile_php56(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.6.0'
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(False, codizy._should_compile())
def test_should_compile_not_set(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33'
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(False, codizy._should_compile())
def test_should_compile_enabled(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'CODIZY_INSTALL': True
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(True, codizy._should_compile())
def test_should_compile_enabled_but_not_supported(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.6.0',
'CODIZY_INSTALL': True
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(False, codizy._should_compile())
def test_should_compile_service(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'VCAP_SERVICES': {
"rediscloud": [{"credentials": {}, "label": "rediscloud"}],
"codizy": [{"credentials": {}, "label": "codizy"}]
}
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(True, codizy._should_compile())
def test_should_compile_service_but_not_supported(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.6.0',
'VCAP_SERVICES': {
"rediscloud": [{"credentials": {}, "label": "rediscloud"}],
"codizy": [{"credentials": {}, "label": "codizy"}]
}
})
codizy = codizy_extn.CodizyExtension(ctx)
eq_(False, codizy._should_compile())
def test_compile(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'VCAP_SERVICES': {
"rediscloud": [{"credentials": {}, "label": "rediscloud"}],
"codizy": [{"credentials": {}, "label": "codizy"}]
}
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy._setup_codizy = Dingus()
inst = Dingus()
codizy.compile(inst)
eq_(1, len(inst.package.calls()))
eq_('CODIZY', inst.package.calls()[0].args[0])
eq_(1, len(codizy._setup_codizy.calls()))
def test_setup_codizy_no_auto_prepend_file(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'CODIZY_INSTALL': True
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy.load_config()
codizy._php_ini._lines = []
eq_(0, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
codizy._setup_codizy()
eq_(1, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
eq_('auto_prepend_file = @{HOME}/codizy/client/application/setup.php',
codizy._php_ini._lines[0])
def test_setup_codizy_empty_auto_prepend_file(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'CODIZY_INSTALL': True
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy.load_config()
codizy._php_ini._lines = ['auto_prepend_file =']
eq_(1, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
codizy._setup_codizy()
eq_(1, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
eq_('auto_prepend_file = @{HOME}/codizy/client/application/setup.php',
codizy._php_ini._lines[0])
def test_setup_codizy_existing_auto_prepend_file(self):
ctx = utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'PHP_VERSION': '5.4.33',
'CODIZY_INSTALL': True
})
codizy = codizy_extn.CodizyExtension(ctx)
codizy.load_config()
codizy._php_ini._lines = ['auto_prepend_file = @{HOME}/php/file.php']
eq_(1, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
codizy._setup_codizy()
eq_(1, len([line for line in codizy._php_ini._lines
if line.startswith('auto_prepend_file')]))
eq_('auto_prepend_file = @{HOME}/php/auto_prepend_file.php',
codizy._php_ini._lines[0])
path = os.path.join(ctx['BUILD_DIR'], 'php', 'auto_prepend_file.php')
with open(path, 'rt') as fp:
lines = fp.readlines()
eq_(4, len(lines))
eq_(' require("@{HOME}/php/file.php");\n', lines[1])
eq_(' require("@{HOME}/codizy/client/application/setup.php");\n',
lines[2])
class TestCompileCodizyWithPHP(BaseCompileApp):
def __init__(self):
self.app_name = 'app-1'
def setUp(self):
BaseCompileApp.setUp(self)
self.opts.set_codizy_download_url(
'{DOWNLOAD_URL}/codizy/{CODIZY_VERSION}/{CODIZY_PACKAGE}')
os.environ['CODIZY_INSTALL'] = 'True'
def test_compile_php_with_codizy(self):
# helpers to confirm the environment
bp = BuildPackAssertHelper()
codizy = CodizyAssertHelper()
httpd = HttpdAssertHelper()
php = PhpAssertHelper()
# set web server to httpd, since that's what we're expecting here
self.opts.set_web_server('httpd')
# run the compile step of the build pack
output = ErrorHelper().compile(self.bp)
# confirm downloads
DownloadAssertHelper(28, 2).assert_downloads_from_output(output)
# confirm start script
bp.assert_start_script_is_correct(self.build_dir)
httpd.assert_start_script_is_correct(self.build_dir)
php.assert_start_script_is_correct(self.build_dir)
# confirm bp utils installed
bp.assert_scripts_are_installed(self.build_dir)
bp.assert_config_options(self.build_dir)
# check env & proc files
httpd.assert_contents_of_procs_file(self.build_dir)
httpd.assert_contents_of_env_file(self.build_dir)
php.assert_contents_of_procs_file(self.build_dir)
php.assert_contents_of_env_file(self.build_dir)
# webdir exists
httpd.assert_web_dir_exists(self.build_dir, self.opts.get_webdir())
# check php & httpd installed
httpd.assert_files_installed(self.build_dir)
php.assert_files_installed(self.build_dir)
codizy.assert_files_installed(self.build_dir)
| |
# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from .errors import StreamNotFoundError
from .time_utils import json_serial
from datetime import datetime
import logging
import sys
from bidict import bidict, ValueDuplicationError
from future.utils import python_2_unicode_compatible
from treelib.tree import Tree, NodePropertyAbsentError, NodeIDAbsentError
import json
# To restore default stdout and stderr channels after reloading sys
# source: https://github.com/ipython/ipython/issues/8354
# Backing up references to the current stdout and stderr
default_stdout = sys.stdout
default_stderr = sys.stderr
# The next two lines are to fix the "UnicodeDecodeError: 'ascii' codec can't
# decode byte" error
# http://stackoverflow.com/questions/21129020/how-to-fix-unicodedecodeerror-ascii-codec-cant-decode-byte
try:
reload(sys) # Python 2.7
# noinspection PyUnresolvedReferences
sys.setdefaultencoding('utf8')
except NameError:
pass
# Restoring references to the previous stdout and stderr
sys.stdout = default_stdout
sys.stderr = default_stderr
@python_2_unicode_compatible
class MetaDataTree(Tree):
(ROOT, DEPTH, WIDTH, ZIGZAG) = list(range(4))
def __str__(self):
self.reader = u"\n"
def write(line):
self.reader += line.decode('utf-8') + "\n"
self._Tree__print_backend(func=write, idhidden=False)
return self.reader
def __repr__(self):
return "{} with {} nodes and depth {}".format(self.__class__.__name__, len(self.nodes), self.tree_depth)
@property
def tree_depth(self):
return max(self.depth(n) for n in self.nodes)
# noinspection PyPep8Naming,SpellCheckingInspection
def _Tree__print_backend(self, nid=None, level=ROOT, idhidden=True, queue_filter=None,
key=None, reverse=False, line_type='ascii-emv',
data_property=None, func=print, iflast=None):
"""
Another implementation of printing tree using Stack
Print tree structure in hierarchy style.
For example:
Root
|___ C01
| |___ C11
| |___ C111
| |___ C112
|___ C02
|___ C03
| |___ C31
A more elegant way to achieve this function using Stack
structure, for constructing the Nodes Stack push and pop nodes
with additional level info.
UPDATE: the @key @reverse is present to sort node at each
level.
"""
line_types = \
{'ascii': ('|', '|-- ', '+-- '),
'ascii-ex': ('\u2502', '\u251c\u2500\u2500 ', '\u2514\u2500\u2500 '),
'ascii-exr': ('\u2502', '\u251c\u2500\u2500 ', '\u2570\u2500\u2500 '),
'ascii-em': ('\u2551', '\u2560\u2550\u2550 ', '\u255a\u2550\u2550 '),
'ascii-emv': ('\u2551', '\u255f\u2500\u2500 ', '\u2559\u2500\u2500 '),
'ascii-emh': ('\u2502', '\u255e\u2550\u2550 ', '\u2558\u2550\u2550 ')}
DT_VLINE, DT_LINE_BOX, DT_LINE_COR = line_types[line_type]
nid = self.root if (nid is None) else nid
if not self.contains(nid):
raise NodeIDAbsentError("Node '%s' is not in the tree" % nid)
if data_property is not None and hasattr(self[nid].data, data_property):
display_value = getattr(self[nid].data, data_property)
elif data_property is None:
display_value = self[nid].tag
else:
raise NodePropertyAbsentError("Node '%s' does not have data property '%s'" % (nid, data_property))
label = ('{0}'.format(display_value)) \
if idhidden \
else ('{0}[{1}:{2}]'.format(display_value, self[nid].identifier, str(self[nid].data)))
queue_filter = self._Tree__real_true if (queue_filter is None) else queue_filter
if not iflast:
iflast = []
if level == self.ROOT:
func(label.encode('utf8'))
else:
leading = ''.join(map(lambda x: DT_VLINE + ' ' * 3 if not x else ' ' * 4, iflast[0:-1]))
lasting = DT_LINE_COR if iflast[-1] else DT_LINE_BOX
func('{0}{1}{2}'.format(leading, lasting, label).encode('utf-8'))
if queue_filter(self[nid]) and self[nid].expanded:
queue = [self[i] for i in self[nid].fpointer if queue_filter(self[i])]
key = (lambda x: x) if (key is None) else key
queue.sort(key=key, reverse=reverse)
level += 1
for element in queue:
iflast.append(queue.index(element) == len(queue) - 1)
self._Tree__print_backend(element.identifier, level, idhidden,
queue_filter, key, reverse, line_type, data_property, func, iflast)
iflast.pop()
@python_2_unicode_compatible
class Printable(object):
"""
A base class for default printing
"""
def __str__(self):
# pp = pprint.PrettyPrinter(indent=4)
# return pp.pformat({self.__class__.__name__: self.__dict__})
return repr(self)
def __repr__(self):
name = self.__class__.__name__
values = ", ".join("{}={}".format(k, self.format(v)) for k, v in sorted(self.__dict__.items()) if k[0] != "_")
return "{}({})".format(name, values)
@staticmethod
def format(v):
if isinstance(v, datetime):
if v.tzname() == 'UTC':
return 'datetime.datetime({}, {}, {}, {}, {}, {}, {}, tzinfo={}'.format(
v.year, v.month, v.day, v.hour, v.minute, v.second, v.microsecond, 'UTC'
)
else:
return repr(v)
else:
return repr(v)
class Hashable(object):
_name = None
"""
A base class that creates hashes based on the __dict__
Requires keys to be strings to work properly. It will first try to use json.dumps, but if that fails because one of
the values is not json serializable (e.g. datetime.datetime) then it will fall back on repr
"""
@property
def name(self):
return self._name if self._name is not None else self.__class__.__name__
@name.setter
def name(self, name):
self._name = str(name)
def __hash__(self):
try:
return hash((self.name, json.dumps(self.__dict__, sort_keys=True, default=json_serial)))
except TypeError:
return hash((self.name, repr(sorted(self.__dict__.items()))))
class TypedBiDict(Printable):
"""
Custom strongly typed bi-directional dictionary where keys and values must be a specific type.
Raises ValueDuplicationError if the same value is added again
"""
def __init__(self, key_type, value_type, *args, **kwargs):
if not isinstance(key_type, type):
raise ValueError("expected type, got {}", type(key_type))
if not isinstance(value_type, type):
raise ValueError("expected type, got {}", type(value_type))
self._store = bidict(*args, **kwargs)
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return "{}(key_type={}, value_type={})".format(
self.__class__.__name__,
repr(self.key_type),
repr(self.value_type))
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __getitem__(self, key):
if not isinstance(key, self.key_type):
raise TypeError("expected {}, got {}".format(self.key_type, type(key)))
try:
return self._store[key]
except KeyError:
# for debugging
raise StreamNotFoundError(repr(key))
def __setitem__(self, key, value):
if not isinstance(key, self.key_type):
raise TypeError("expected {}, got {}".format(self.key_type, type(key)))
if not isinstance(value, self.value_type):
raise ValueError("expected {}, got {}".format(self.value_type, type(value)))
try:
self._store[key] = value
except ValueDuplicationError as e:
# TODO: debugging
raise e
def __delitem__(self, key):
self._store.__delitem__(key)
# del self._store[key]
def __contains__(self, item):
return item in self._store
def keys(self):
return self._store.keys()
def values(self):
return self._store.values()
def items(self):
return self._store.items()
def iterkeys(self):
return self._store.iterkeys()
def itervalues(self):
return self._store.itervalues()
def iteritems(self):
return self._store.iteritems()
class FrozenKeyDict(dict):
def __setitem__(self, key, value):
if key in self:
# Try to reconcile the new value with the old
old = self[key]
if isinstance(value, dict) and isinstance(old, dict):
for k in value:
if k in old:
try:
if value[k] != old[k]:
raise KeyError(
"Key {} has already been set with value {}, new value {}"
.format(key, self[key], value))
except ValueError:
try:
if not all(map(lambda x: x[0] == x[1], zip(value[k], old[k]))):
raise KeyError(
"Key {} has already been set with value {}, new value {}"
.format(key, self[key], value))
except ValueError as e:
logging.error('Unable to compare values for key {}:'
' old {}, new {}, error {}, overwriting'
.format(key, self[key], value, e))
self[key][k] = value[k]
continue
self[key][k] = value[k]
else:
if self[key] != value:
raise KeyError("Key {} has already been set with value {}, new value {}".format(
key, self[key], value))
return
super(FrozenKeyDict, self).__setitem__(key, value)
class TypedFrozenKeyDict(FrozenKeyDict):
def __init__(self, key_type, *args, **kwargs):
if not isinstance(key_type, type):
raise ValueError("Expected type, got {}".format(type(key_type)))
self.key_type = key_type
super(TypedFrozenKeyDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
if not isinstance(key, self.key_type):
raise KeyError("Expected type {}, got {}".format(self.key_type, type(key)))
super(TypedFrozenKeyDict, self).__setitem__(key, value)
class _Singleton(type):
"""
Singleton class, py2k and p3k compatible
See:
https://stackoverflow.com/a/33201/1038264
https://stackoverflow.com/questions/6760685/creating-a-singleton-in-python
"""
# noinspection PyInitNewSignature
def __init__(cls, name, bases, dictionary):
super(_Singleton, cls).__init__(name, bases, dictionary)
cls.instance = None
def __call__(cls, *args, **kwargs):
if cls.instance is None:
cls.instance = super(_Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class Singleton(type):
# noinspection PyInitNewSignature
def __init__(cls, name, bases, dictionary):
super(Singleton, cls).__init__(name, bases, dictionary)
cls._instance = None
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instance
class ToolContainer(Printable):
"""
Dummy class for holding tool objects for easy access
"""
pass
class FactorContainer(Printable):
"""
Dummy class for holding factor creation functions
"""
class PluginContainer(Printable):
"""
Dummy class for holding plugins
"""
class PluginWrapper(Printable):
"""
Dummy class for a plugins containing tool objects for easy access
"""
def __init__(self):
self.tools = ToolContainer()
self.factors = FactorContainer()
| |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A learned optimizer which has a per tensor LSTM predicting Adam hparams.
Each iteration, this optimizer applies the learned LSTM on various features
computed for each tensor. These are quantities like average weight value,
average gradient value, and so on. This LSTM then produces 4 values per tensor
-- one for each of the Adam hparams (lr, beta1, beta2, epsilon). We then use
these values to move the Adam hparams relative to the previous values. These new
values are then used with a fixed form update equation similar to Adam.
Sometimes when picking Adam hparams, both LR and epsilon can increasingly grow.
This has the effect of turning off the second moment based updates which
can increase optiimzation performance comes at cost of needing really large
values of both lr and epsilon which balance each other out. This makes
meta-optimization difficult. To fix this, we make use a modified update below
so to make it easier to turn off the second moment updates without requiring
the learning rate to grow.
Adam updates:
step = lr * 1.0 / (eps + sqrt(rms + 1e-10)) * mom
Our modifid update:
step = (lr * (1.0 + eps)) * 1.0 / (eps + sqrt(rms + 1e-10)) * mom
"""
import collections
import functools
from typing import Any, Optional, Callable, Mapping, Sequence
import flax
import gin
import haiku as hk
import jax
from jax import lax
import jax.numpy as jnp
from learned_optimization import summary
from learned_optimization import tree_utils
from learned_optimization.learned_optimizers import base as lopt_base
from learned_optimization.optimizers import base as opt_base
PRNGKey = jnp.ndarray
@flax.struct.dataclass
class MeanAndMeanSquareAccumulator:
m: Any
rms: Any
t: jnp.ndarray
@flax.struct.dataclass # pylint: disable=g-classes-have-attributes
class NNAdamState:
"""State of the NN adam optimizer.
Args:
params: Parameter pytree of the problem being optimized
state: The state / non-learnable params of the problem being optimized
iteration: Number of inner-steps applied.
rolling_features: Momentum, and second moment gradient accumulators.
per_layer_lr: PyTree of scalars matching params containing current lr value
per_layer_beta1: PyTree of scalars matching params containing current beta1
value
per_layer_beta2: PyTree of scalars matching params containing current beta2
value
per_layer_epsilon: PyTree of scalars matching params containing current
epsilon value
lstm_hidden_state: The LSTM hidden state.
"""
params: Any
state: Any
iteration: int
rolling_features: MeanAndMeanSquareAccumulator
per_layer_lr: Any
per_layer_beta1: Any
per_layer_beta2: Any
per_layer_epsilon: Any
lstm_hidden_state: Any
_InitUpdate = collections.namedtuple("_InitUpdate", ["init", "update"])
def _first_second_rolling() -> _InitUpdate:
"""Helper to compute first and second moment accumulators of gradients.
Unlike the ones in common, this is designed to operate on a pytree
of beta1, and beta2.
Returns:
A pair of functions to initialize, and update the accumulators.
"""
def init_fn(p):
return MeanAndMeanSquareAccumulator(
m=jax.tree_map(jnp.zeros_like, p),
rms=jax.tree_map(jnp.zeros_like, p),
t=jnp.asarray(0))
def update_fn(state, grad, beta1, beta2):
m = jax.tree_map(lambda a, b, b1: b1 * a + (1 - b1) * b, state.m, grad,
beta1)
rms = jax.tree_map(lambda a, b, b2: b2 * a + (1 - b2) * (b * b), state.rms,
grad, beta2)
return MeanAndMeanSquareAccumulator(m=m, rms=rms, t=state.t + 1)
return _InitUpdate(init_fn, update_fn)
class _Invertable:
"""Base class to help manage hparam transformations."""
def __init__(self, forward: Callable[[jnp.ndarray], jnp.ndarray],
inverse: Callable[[jnp.ndarray], jnp.ndarray]):
self.forward = jax.jit(forward)
self.inverse = jax.jit(inverse)
@functools.partial(jax.jit, static_argnums=0)
def tree_inverse_forward(self, val):
f = lambda v: self.forward(self.inverse(v))
return jax.tree_map(f, val)
_scaled_lr = _Invertable(
forward=lambda x: 0.1 * jnp.log(x),
inverse=lambda x: jnp.clip(jnp.exp(10. * x), 1e-8, 1e3))
_scaled_epsilon = _Invertable(
forward=lambda x: 0.1 * jnp.log(x),
inverse=lambda x: jnp.clip(jnp.exp(10. * x), 1e-11, 1e4))
_scaled_one_minus_log = _Invertable(
forward=lambda x: 0.1 * jnp.log(1 - x),
inverse=lambda x: (1 - jnp.exp(jnp.clip(10 * x, -10, 0))))
def _clip_log_abs(value: jnp.ndarray) -> jnp.ndarray:
mag = jnp.log(1e-8 + jnp.abs(value))
return jnp.clip(mag, -5, 5) * 0.5
def _sorted_values(
value_dict: Mapping[str, jnp.ndarray]) -> Sequence[jnp.ndarray]:
return list(zip(*sorted(value_dict.items(), key=lambda x: x[0])))[1]
@gin.configurable
class NNAdam(lopt_base.LearnedOptimizer):
"""Adam with different, hyper parameters per layer controlled by an LSTM.
See module level docstring for more info.
"""
def __init__(self,
output_scale: float = 0.01,
initial_learning_rate: float = 1e-4,
initial_beta1: float = 0.9,
initial_beta2: float = 0.999,
initial_epsilon: float = 1e-8):
"""Initalizer.
Args:
output_scale: Multiplier which controls the rate of change on adam hparams
initial_learning_rate: Initialization of learning rate in meta-params.
initial_beta1: Initialization of beta1 in meta-params.
initial_beta2: Initialization of beta2 in meta-params.
initial_epsilon: Initialization of epsilon in meta-params.
"""
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.initial_beta1 = initial_beta1
self.initial_beta2 = initial_beta2
self.initial_epsilon = initial_epsilon
self.output_scale = output_scale
self.lstm_hidden_size = 32
# This hardcoded value matches the number of features created by the
# lstm_features_for_tensor function.
self.rnn_input_features = 19
self.lstm_fn = lambda: hk.LSTM(self.lstm_hidden_size, name="rnn")
self.rnn_init, self.rnn_apply = hk.without_apply_rng(
hk.transform(lambda x, state: self.lstm_fn()(x, state))) # pylint: disable=unnecessary-lambda
# Map from lstm output to the values which control hyper parameters.
@hk.without_apply_rng
@hk.transform
def _rnn_to_controls(hidden):
mod = hk.Linear(
4, name="rnn_to_controls", w_init=hk.initializers.Constant(0.))
return mod(hidden)
self.rnn_to_controls = _rnn_to_controls
def init(self, key: PRNGKey) -> lopt_base.MetaParams:
key1, key2, key3 = jax.random.split(key, 3)
lstm_inital_state = hk.transform(
lambda: self.lstm_fn().initial_state(1))[1](None, key1)
return flax.core.FrozenDict({
"lstm_init_state":
lstm_inital_state,
"rnn_params":
self.rnn_init(key2, jnp.zeros([1, self.rnn_input_features]),
lstm_inital_state),
"rnn_to_controls_params":
self.rnn_to_controls.init(key3,
jnp.zeros([0, self.lstm_hidden_size])),
"per_layer_lr":
_scaled_lr.forward(self.initial_learning_rate),
"per_layer_beta1":
_scaled_one_minus_log.forward(self.initial_beta1),
"per_layer_beta2":
_scaled_one_minus_log.forward(self.initial_beta2),
"per_layer_epsilon":
_scaled_epsilon.forward(self.initial_epsilon),
})
def opt_fn(self,
theta: lopt_base.MetaParams,
is_training: bool = True) -> opt_base.Optimizer:
rolling = _first_second_rolling()
parent = self
class _Opt(opt_base.Optimizer):
"""Optimizer capturing the meta params."""
def __init__(self, theta: lopt_base.MetaParams):
self.theta = theta
def init(
self,
params: opt_base.Params,
model_state: Optional[opt_base.ModelState] = None,
num_steps: Optional[int] = None,
key: Optional[PRNGKey] = None,
) -> NNAdamState:
theta = self.theta
if num_steps is None:
raise ValueError("Must specify number of steps for this lopt!")
n_states = len(jax.tree_leaves(params))
lstm_hidden_state = jax.tree_map(
lambda x: jnp.tile(x, [n_states] + [1] * len(x.shape[1:])),
theta["lstm_init_state"])
return NNAdamState(
params=params,
rolling_features=rolling.init(params),
iteration=jnp.asarray(0, dtype=jnp.int32),
state=model_state,
lstm_hidden_state=lstm_hidden_state,
per_layer_lr=jax.tree_map(lambda x: theta["per_layer_lr"], params),
per_layer_beta1=jax.tree_map(lambda x: theta["per_layer_beta1"],
params),
per_layer_beta2=jax.tree_map(lambda x: theta["per_layer_beta2"],
params),
per_layer_epsilon=jax.tree_map(lambda x: theta["per_layer_epsilon"],
params),
)
def lstm_features_for_tensor(self, p: jnp.ndarray, g: jnp.ndarray,
m: jnp.ndarray, rms: jnp.ndarray,
lr: jnp.ndarray, beta1: jnp.ndarray,
beta2: jnp.ndarray,
epsilon: jnp.ndarray) -> jnp.ndarray:
"""Compute features from a tensor which are passed into the RNN."""
inputs = {}
mean_m = jnp.mean(m)
inputs["mean_m_mag"] = _clip_log_abs(mean_m)
inputs["mean_m_sign"] = jnp.sign(mean_m)
var_m = jnp.mean(jnp.square(m - mean_m))
inputs["var_m"] = _clip_log_abs(var_m)
mean_rms = jnp.mean(rms)
inputs["mean_rms"] = _clip_log_abs(mean_rms)
inputs["mean_sign"] = jnp.sign(mean_rms)
var_rms = jnp.mean(jnp.square(rms - mean_rms))
inputs["var_rms"] = _clip_log_abs(var_rms)
mean_p = jnp.mean(p)
inputs["mean_p_mag"] = _clip_log_abs(mean_p)
inputs["mean_p_sign"] = jnp.sign(mean_p)
var_p = jnp.mean(jnp.square(p - mean_p))
inputs["var_p"] = _clip_log_abs(var_p)
mean_g = jnp.mean(g)
inputs["mean_g_mag"] = _clip_log_abs(mean_g)
inputs["mean_g_sign"] = jnp.sign(mean_g)
var_g = jnp.mean(jnp.square(g - mean_g))
inputs["var_g"] = _clip_log_abs(var_g)
mean_g_abs = jnp.mean(jnp.abs(g))
inputs["mean_gabs_mag"] = _clip_log_abs(mean_g_abs)
inputs["is_scalar"] = jnp.asarray(1.0 if len(p.shape) == 0 else -1.0) # pylint: disable=g-explicit-length-test
inputs["is_bias"] = jnp.asarray(1.0 if len(p.shape) == 1 else -1.0)
inputs["lr"] = jnp.clip(lr, -5, 5) * 0.2
inputs["beta1"] = jnp.clip(beta1, -5, 5) * 0.2
inputs["beta2"] = jnp.clip(beta2, -5, 5) * 0.2
inputs["epsilon"] = jnp.clip(epsilon, -5, 5) * 0.2
# We must sort values here to ensure that the dictionary order is
# consistent.
values = _sorted_values(inputs)
return jnp.asarray(values)
@functools.partial(jax.jit, static_argnums=(0,))
def update(self,
opt_state: NNAdamState,
grads: Any,
loss: jnp.ndarray,
model_state: Optional[opt_base.ModelState] = None,
**kwargs) -> NNAdamState:
theta = self.theta
grads = jax.tree_map(lambda x: jnp.clip(x, -1000., 1000.), grads)
summary.tree_scalar_mean("beta1_pre", opt_state.per_layer_beta1)
summary.tree_scalar_mean("beta2_pre", opt_state.per_layer_beta2)
summary.tree_scalar_mean("epsilon_pre", opt_state.per_layer_epsilon)
summary.tree_scalar_mean("lr_pre", opt_state.per_layer_lr)
# Map the current hparams to the correct space so that we can apply
# an update.
b1 = jax.tree_map(_scaled_one_minus_log.inverse,
opt_state.per_layer_beta1)
b2 = jax.tree_map(_scaled_one_minus_log.inverse,
opt_state.per_layer_beta2)
epsilon = jax.tree_map(_scaled_epsilon.inverse,
opt_state.per_layer_epsilon)
lr = jax.tree_map(_scaled_lr.inverse, opt_state.per_layer_lr)
summary.tree_scalar_mean("beta1_post", b1)
summary.tree_scalar_mean("beta2_post", b2)
summary.tree_scalar_mean("epsilon_post", epsilon)
summary.tree_scalar_mean("lr_post", lr)
# Update the accumulators with the current b1 and b2 values (different
# per tensor).
next_rolling_features = rolling.update(opt_state.rolling_features,
grads, b1, b2)
m = next_rolling_features.m
rms = next_rolling_features.rms
def compute_step_for_tensor(m, rms, lr, epsilon):
# When epsilon is small, this update recovers Adam.
# When epsilon is large, this resembles SGDM more.
step = (lr *
(1.0 + epsilon)) * 1.0 / (epsilon + lax.sqrt(rms + 1e-10)) * m
return step
# Compute a update step.
update = jax.tree_map(compute_step_for_tensor, m, rms, lr, epsilon)
summary.tree_step("nn_adam_step", update)
# apply the step to current parameters
new_params = jax.tree_map(lambda a, b: a - b, opt_state.params, update)
# run the LSTM on transformed features
rnn_inputs = jax.tree_map(self.lstm_features_for_tensor,
opt_state.params, grads, m, rms,
opt_state.per_layer_lr,
opt_state.per_layer_beta1,
opt_state.per_layer_beta2,
opt_state.per_layer_epsilon)
stack = jnp.asarray(jax.tree_leaves(rnn_inputs))
lstm_out, next_lstm_hidden_state = parent.rnn_apply(
theta["rnn_params"], stack, opt_state.lstm_hidden_state)
deltas = parent.rnn_to_controls.apply(theta["rnn_to_controls_params"],
lstm_out)
treedef = jax.tree_structure(opt_state.params)
deltas = jax.tree_unflatten(treedef, list(deltas))
deltas = jax.tree_map(lambda x: x * parent.output_scale, deltas)
# Extract out the values wich we use to update the hparams
assert jax.tree_leaves(deltas)[0].shape[0] == 4
lr = jax.tree_map(lambda x: x[0], deltas)
beta1 = jax.tree_map(lambda x: x[1], deltas)
beta2 = jax.tree_map(lambda x: x[2], deltas)
epsilon = jax.tree_map(lambda x: x[3], deltas)
summary.tree_scalar_mean("lr_step", lr)
summary.tree_scalar_mean("beta1_step", beta1)
summary.tree_scalar_mean("beta2_step", beta2)
summary.tree_scalar_mean("epsilon_step", epsilon)
# Update the current hparams by adding the prediction to the previous
# value.
tree_add = lambda a, b: jax.tree_map(lambda x, y: x + y, a, b)
new_lr = tree_add(opt_state.per_layer_lr, lr)
new_b1 = tree_add(opt_state.per_layer_beta1, beta1)
new_b2 = tree_add(opt_state.per_layer_beta2, beta2)
new_epsilon = tree_add(opt_state.per_layer_epsilon, epsilon)
# Clip the hparams by running a inverse and forward of transforms.
# This ensures only valid values are used.
new_lr = _scaled_lr.tree_inverse_forward(new_lr)
new_b1 = _scaled_one_minus_log.tree_inverse_forward(new_b1)
new_b2 = _scaled_one_minus_log.tree_inverse_forward(new_b2)
new_epsilon = _scaled_epsilon.tree_inverse_forward(new_epsilon)
next_opt_state = NNAdamState(
params=new_params,
rolling_features=next_rolling_features,
iteration=opt_state.iteration + 1,
state=model_state,
lstm_hidden_state=next_lstm_hidden_state,
per_layer_lr=new_lr,
per_layer_beta1=new_b1,
per_layer_beta2=new_b2,
per_layer_epsilon=new_epsilon)
return tree_utils.match_type(next_opt_state, opt_state)
return _Opt(theta)
| |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities shared by dxpy modules.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import os, json, collections, concurrent.futures, traceback, sys, time, gc
from multiprocessing import cpu_count
import dateutil.parser
from .. import logger
from ..compat import basestring, THREAD_TIMEOUT_MAX
def _force_quit(signum, frame):
# traceback.print_stack(frame)
os._exit(os.EX_IOERR)
def get_futures_threadpool(max_workers):
return concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
def wait_for_a_future(futures, print_traceback=False):
"""
Return the next future that completes. If a KeyboardInterrupt is
received, then the entire process is exited immediately. See
wait_for_all_futures for more notes.
"""
while True:
try:
future = next(concurrent.futures.as_completed(futures, timeout=THREAD_TIMEOUT_MAX))
break
except concurrent.futures.TimeoutError:
pass
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
return future
def wait_for_all_futures(futures, print_traceback=False):
"""
Wait indefinitely for all futures in the input iterable to complete.
Use a timeout to enable interrupt handling.
Call os._exit() in case of KeyboardInterrupt. Otherwise, the atexit registered handler in concurrent.futures.thread
will run, and issue blocking join() on all worker threads, requiring us to listen to events in worker threads
in order to enable timely exit in response to Ctrl-C.
Note: This still doesn't handle situations where Ctrl-C is pressed elsewhere in the code and there are worker
threads with long-running tasks.
Note: os._exit() doesn't work well with interactive mode (e.g. ipython). This may help:
import __main__ as main; if hasattr(main, '__file__'): os._exit() else: os.exit()
"""
try:
while True:
waited_futures = concurrent.futures.wait(futures, timeout=60)
if len(waited_futures.not_done) == 0:
break
except KeyboardInterrupt:
if print_traceback:
traceback.print_stack()
else:
print('')
os._exit(os.EX_IOERR)
def response_iterator(request_iterator, thread_pool, max_active_tasks=None):
"""
:param request_iterator: An iterator producing inputs for consumption by the worker pool.
:type request_iterator: iterator of callable, args, kwargs
:param thread_pool: thread pool to submit the requests to
:type thread_pool: concurrent.futures.thread.ThreadPoolExecutor
:param max_active_tasks:
The maximum number of tasks that may be either running or waiting for consumption of their result.
If not given, defaults to the number of CPU cores on the machine.
:type max_active_tasks: int
Rate-limited asynchronous multithreaded task runner.
Consumes tasks from *request_iterator*. Yields their results in order, while allowing up to *max_active_tasks* to run
simultaneously. Unlike concurrent.futures.Executor.map, prevents new tasks from starting while there are
*max_active_tasks* or more unconsumed results.
"""
tasks_in_progress = collections.deque()
if max_active_tasks is None:
max_active_tasks = cpu_count()
# The following two functions facilitate GC by not adding extra variables to the enclosing scope.
def submit_task(task_iterator, executor, futures_queue):
task_callable, task_args, task_kwargs = next(task_iterator)
task_future = executor.submit(task_callable, *task_args, **task_kwargs)
futures_queue.append(task_future)
def next_result(tasks_in_progress):
future = tasks_in_progress.popleft()
try:
result = future.result(timeout=THREAD_TIMEOUT_MAX)
except KeyboardInterrupt:
print('')
os._exit(os.EX_IOERR)
return result
for _i in range(max_active_tasks):
try:
submit_task(request_iterator, thread_pool, tasks_in_progress)
except StopIteration:
break
while len(tasks_in_progress) > 0:
result = next_result(tasks_in_progress)
try:
submit_task(request_iterator, thread_pool, tasks_in_progress)
except StopIteration:
pass
yield result
del result
def string_buffer_length(buf):
orig_pos = buf.tell()
buf.seek(0, os.SEEK_END)
buf_len = buf.tell()
buf.seek(orig_pos)
return buf_len
def normalize_time_input(t, future=False, default_unit='ms'):
"""
:param default_unit: units of the input time *t*; must be one of "s" or
"ms". This param is only respected if *t* looks like an int (e.g.
"12345", 12345).
:type default_unit: string
Converts inputs such as:
"2012-05-01"
"-5d"
1352863174
"1352863174"
to milliseconds since epoch. See http://labix.org/python-dateutil and :meth:`normalize_timedelta`.
"""
error_msg = 'Error: Expected an int timestamp, a date format (e.g. YYYY-MM-DD), or an int with a single-letter suffix (s=seconds, m=minutes, h=hours, d=days, w=weeks, M=months, y=years; e.g. "-10d" indicates 10 days ago); but got {t}'
if isinstance(t, basestring) and t.isdigit():
t = int(t)
if isinstance(t, basestring):
try:
t = normalize_timedelta(t)
except ValueError:
try:
t = int(time.mktime(dateutil.parser.parse(t).timetuple())*1000)
assert t > 0
except (ValueError, OverflowError, AssertionError):
raise ValueError(error_msg.format(t=t))
elif isinstance(t, int):
units_multipliers = {'ms': 1, 's': 1000}
if default_unit not in units_multipliers:
raise ValueError("Expected default_unit to be one of 's' or 'ms'")
t = t * units_multipliers[default_unit]
else:
raise ValueError(error_msg.format(t=t))
now = int(time.time()*1000)
if t < 0 or (future and t < now):
t += now
return t
def normalize_timedelta(timedelta):
"""
Given a string like "1w" or "-5d", convert it to an integer in milliseconds.
Integers without a suffix are interpreted as seconds.
Note: not related to the datetime timedelta class.
"""
try:
return int(timedelta) * 1000
except ValueError as e:
t, suffix = timedelta[:-1], timedelta[-1:]
suffix_multipliers = {'s': 1000, 'm': 1000*60, 'h': 1000*60*60, 'd': 1000*60*60*24, 'w': 1000*60*60*24*7,
'M': 1000*60*60*24*30, 'y': 1000*60*60*24*365}
if suffix not in suffix_multipliers:
raise ValueError()
return int(t) * suffix_multipliers[suffix]
# See http://stackoverflow.com/questions/4126348
class OrderedDefaultdict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
newdefault = None
newargs = ()
if args:
newdefault = args[0]
if not (newdefault is None or callable(newdefault)):
raise TypeError('first argument must be callable or None')
newargs = args[1:]
self.default_factory = newdefault
super(self.__class__, self).__init__(*newargs, **kwargs)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
args = self.default_factory if self.default_factory else tuple()
return type(self), args, None, None, self.items()
def group_array_by_field(array, field='group'):
groups = OrderedDefaultdict(list)
for item in array:
if field not in item and None not in groups:
groups[None] = []
groups[item.get(field)].append(item)
return groups
def merge(d, u):
"""
Recursively updates a dictionary.
Example: merge({"a": {"b": 1, "c": 2}}, {"a": {"b": 3}}) = {"a": {"b": 3, "c": 2}}
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = merge(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def _dict_raise_on_duplicates(ordered_pairs):
"""
Reject duplicate keys.
"""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def json_load_raise_on_duplicates(*args, **kwargs):
"""
Like json.load(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.load(*args, **kwargs)
def json_loads_raise_on_duplicates(*args, **kwargs):
"""
Like json.loads(), but raises an error on duplicate keys.
"""
kwargs['object_pairs_hook'] = _dict_raise_on_duplicates
return json.loads(*args, **kwargs)
def warn(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Moved to the bottom due to circular imports
from .exec_utils import run, convert_handlers_to_dxlinks, parse_args_as_job_input, entry_point, DXJSONEncoder
| |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from perfkitbenchmarker import virtual_machine, linux_virtual_machine
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import providers
from perfkitbenchmarker.providers.openstack import os_disk
from perfkitbenchmarker.providers.openstack import os_network
from perfkitbenchmarker.providers.openstack import utils as os_utils
RHEL_IMAGE = 'rhel-7.2'
UBUNTU_IMAGE = 'ubuntu-14.04'
NONE = 'None'
FLAGS = flags.FLAGS
class OpenStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an OpenStack Virtual Machine"""
CLOUD = providers.OPENSTACK
# Subclasses should override the default image.
DEFAULT_IMAGE = None
_floating_ip_lock = threading.Lock()
def __init__(self, vm_spec):
"""Initialize an OpenStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(OpenStackVirtualMachine, self).__init__(vm_spec)
self.firewall = os_network.OpenStackFirewall.GetFirewall()
self.firewall.AllowICMP(self)
self.firewall.AllowPort(self, 1, os_network.MAX_PORT)
self.name = 'perfkit_vm_%d_%s' % (self.instance_number, FLAGS.run_uri)
self.key_name = 'perfkit_key_%d_%s' % (self.instance_number,
FLAGS.run_uri)
self.client = os_utils.NovaClient()
# FIXME(meteorfox): Remove --openstack_public_network and
# --openstack_private_network once depreciation time has expired
self.network_name = (FLAGS.openstack_network or
FLAGS.openstack_private_network)
self.floating_ip_pool_name = (FLAGS.openstack_floating_ip_pool or
FLAGS.openstack_public_network)
self.public_network = os_network.OpenStackPublicNetwork(
FLAGS.openstack_floating_ip_pool)
self.id = None
self.pk = None
self.user_name = FLAGS.openstack_image_username
self.boot_wait_time = None
self.image = self.image or self.DEFAULT_IMAGE
self.public_net = None
self.private_net = None
self.floating_ip = None
def _Create(self):
image = self.client.images.findall(name=self.image)[0]
flavor = self.client.flavors.findall(name=self.machine_type)[0]
self.private_net = self.client.networks.find(label=self.network_name)
if self.floating_ip_pool_name:
self.public_net = self.client.networks.find(
label=self.floating_ip_pool_name)
if not self.private_net:
if self.public_net:
raise errors.Error(
'Cannot associate floating-ip address from pool %s without '
'an internally routable network. Make sure '
'--openstack_network flag is set.')
else:
raise errors.Error(
'Cannot build instance without a network. Make sure to set '
'either just --openstack_network or both '
'--openstack_network and --openstack_floating_ip_pool '
'flags.')
nics = [{'net-id': self.private_net.id}]
image_id = image.id
boot_from_vol = []
scheduler_hints = self._GetSchedulerHints()
if FLAGS.openstack_boot_from_volume:
volume_size = FLAGS.openstack_volume_size or flavor.disk
image_id = None
boot_from_vol = [{'boot_index': 0,
'uuid': image.id,
'volume_size': volume_size,
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination': True}]
vm = self.client.servers.create(
name=self.name,
image=image_id,
flavor=flavor.id,
key_name=self.key_name,
security_groups=['perfkit_sc_group'],
nics=nics,
availability_zone=self.zone,
block_device_mapping_v2=boot_from_vol,
scheduler_hints=scheduler_hints,
config_drive=FLAGS.openstack_config_drive)
self.id = vm.id
def _GetSchedulerHints(self):
scheduler_hints = None
if FLAGS.openstack_scheduler_policy != NONE:
group_name = 'perfkit_%s' % FLAGS.run_uri
try:
group = self.client.server_groups.findall(name=group_name)[0]
except IndexError:
group = self.client.server_groups.create(
policies=[FLAGS.openstack_scheduler_policy],
name=group_name)
scheduler_hints = {'group': group.id}
return scheduler_hints
@vm_util.Retry(max_retries=4, poll_interval=2)
def _PostCreate(self):
status = 'BUILD'
instance = None
while status == 'BUILD':
time.sleep(5)
instance = self.client.servers.get(self.id)
status = instance.status
# Unlikely to be false, previously checked to be true in self._Create()
assert self.private_net is not None, '--openstack_network must be set.'
self.internal_ip = instance.networks[self.network_name][0]
self.ip_address = self.internal_ip
if self.public_net:
self.floating_ip = self._AllocateFloatingIP(instance)
self.ip_address = self.floating_ip.ip
def _AllocateFloatingIP(self, instance):
with self._floating_ip_lock:
floating_ip = self.public_network.get_or_create()
instance.add_floating_ip(floating_ip)
logging.info(
'floating-ip associated: {}'.format(floating_ip.ip))
while not self.public_network.is_attached(floating_ip):
time.sleep(1)
return floating_ip
def _Delete(self):
from novaclient.exceptions import NotFound
try:
self.client.servers.delete(self.id)
self._WaitForDeleteCompletion()
except NotFound:
logging.info('Instance not found, may have been already deleted')
if self.floating_ip:
self.public_network.release(self.floating_ip)
def _Exists(self):
from novaclient.exceptions import NotFound
try:
return self.client.servers.get(self.id) is not None
except NotFound:
return False
def WaitForBootCompletion(self):
# Do one longer sleep, then check at shorter intervals.
if self.boot_wait_time is None:
self.boot_wait_time = 15
time.sleep(self.boot_wait_time)
self.boot_wait_time = 5
resp, _ = self.RemoteCommand('hostname', retries=1)
if self.bootable_time is None:
self.bootable_time = time.time()
if self.hostname is None:
self.hostname = resp[:-1]
@vm_util.Retry(poll_interval=5, max_retries=-1, timeout=300,
log_errors=False,
retryable_exceptions=(
errors.Resource.RetryableDeletionError,))
def _WaitForDeleteCompletion(self):
instance = self.client.servers.get(self.id)
if instance and instance.status == 'ACTIVE':
raise errors.Resource.RetryableDeletionError(
'VM: %s has not been deleted. Retrying to check status.'
% self.name)
def CreateScratchDisk(self, disk_spec):
disks_names = ('%s-data-%d-%d'
% (self.name, len(self.scratch_disks), i)
for i in range(disk_spec.num_striped_disks))
disks = [os_disk.OpenStackDisk(disk_spec, name, self.zone)
for name in disks_names]
self._CreateScratchDiskFromDisks(disk_spec, disks)
def _CreateDependencies(self):
self.ImportKeyfile()
self.AllowRemoteAccessPorts()
def _DeleteDependencies(self):
self.DeleteKeyfile()
def ImportKeyfile(self):
if not (self.client.keypairs.findall(name=self.key_name)):
cat_cmd = ['cat',
vm_util.GetPublicKeyPath()]
key_file, _ = vm_util.IssueRetryableCommand(cat_cmd)
pk = self.client.keypairs.create(self.key_name,
public_key=key_file)
else:
pk = self.client.keypairs.findall(name=self.key_name)[0]
self.pk = pk
def DeleteKeyfile(self):
from novaclient.exceptions import NotFound
try:
self.client.keypairs.delete(self.pk)
except NotFound:
logging.info("Deleting key doesn't exists")
class DebianBasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.DebianMixin):
DEFAULT_IMAGE = UBUNTU_IMAGE
class RhelBasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.RhelMixin):
DEFAULT_IMAGE = RHEL_IMAGE
| |
## @package sbol
# A Python wrapper for libSBOLc, a module for reading, writing, and constructing
# genetic designs according to the standardized specifications of the Synthetic Biology Open Language
#
# @file sbol.py
# Implements a high-level, Pythonic interface for the SWIG-Python classes in libsbol
#
# @namespace sbol.sbol
# High level wrappers for libSBOLc
#
# @namespace sbol.libsbol
# Low level SWIG-Python wrappers for libSBOLc
#
# @namespace sbol.sbol_test
# Unit tests
import libsbol
import sys
from cStringIO import StringIO
# SO terms
PROMOTER = "http://purl.obolibrary.org/obo/SO_0000167"
RBS = "http://purl.obolibrary.org/obo/SO_0000552"
CDS = "http://purl.obolibrary.org/obo/SO_0000316"
TERMINATOR = "http://purl.obolibrary.org/obo/SO_0000141"
USER_DEFINED = "http://purl.obolibrary.org/obo/SO_0000001"
DESIGN = "http://purl.obolibrary.org/obo/SO_0000546"
SCAR = "http://purl.obolibrary.org/obo/SO_0001953"
class SBOLError(Exception): 'Problem with SBOL'
class InternalError(SBOLError): 'Encountered a bug'
class URIError(SBOLError): 'Invalid URI'
class PositionError(SBOLError): 'Invalid position'
class StrandError(SBOLError): 'Invalid strand polarity'
__all__ = (
'SBOLError',
'InternalError',
'URIError',
'PositionError',
'StrandError',
'Document',
'DNASequence',
'SequenceAnnotation',
'DNAComponent',
'Collection' )
## The SBOL print functions use printf() to print directly
# to stdout; this captures that output and returns it for
# use in Python's __str__ methods.
def capture_stdout(fn, *args, **kwargs):
backup = sys.stdout
sys.stdout = StringIO()
fn(*args, **kwargs)
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = backup
return output
## An array that partly supports Python list-like operations including
# slice indexing. A proxy for libSBOLc PointerArrays.
# Members of the array may be one type of SBOL core objects (eg, DNAComponents)
# Members of the array may be referenced by numerical index (eg,
# sbol.Document.components[0]). In addition, the array behaves like a dictionary
# indexed by an object's URI
# (eg, sbol.Document.components['http://examples.com/0001']).<br>
# The sbol.Document class contains 4 instances of SBOLObjectArrays that
# each register a core type of SBOL object: DNAComponents, DNASequences,
# SequenceAnnotations, and Collections. Each instance of a SBOLObjectArray
# is initialized to contain one of these type of objects. The accessor methods
# specific to the type of object are passed as callback function arguments to
# the SBOLObjectArray constructor.<br>
# Accessor methods used by the SBOLObjectArray conform to the following pattern:
# |Accessor | Return value | Example callback |
# |----------|------------------------------------------------|----------------------------|
# |get_uri_fn| Returns the object's URI | libsbol.getDNAComponentURI |
# |remove_fn | Remove the object from the array | libsbol.removeDNAComponent |
# |get_num_fn| Get the num of objects in the array | libsbol.getNumDNAComponents|
# |get_nth_fn| Get the array element whose index is specified | libsbol.getNthDNAComponent |
# <br>
# Instances of SBOL objects are registered in a Document automatically when
# created and cannot exist independently of a parent Document. Consequently,
# SBOLObjectArrays do not require an "add child to parent" accessor function
# (see SBOLObjectExtendableArray)
class SBOLObjectArray(object):
## Construct an SBOLObjectArray
# @param obj A pointer to the PySBOL object that will own this array.
# Typically the obj will be the parent Document object.
# @param get_uri Callback function for retrieving an SBOL object's URI
# @param remove Callback function that will remove an object from this array
# @param num Callback function that returns the number of objects in the array
# @param nth Callback function that returns an object with the specified index
def __init__(self, obj, get_uri, remove, num, nth):
## A pointer to the PySBOL object owns this array.
self.ptr = obj.ptr
if isinstance(obj, Document):
self.doc = obj
else:
self.doc = obj.doc
# Each type of SBOL object has its own array functions,
# which need to be set for the wrapper to work.
## Accessor method used by SBOLObjectArray to retrieve an element object's URI
self.get_uri_fun = get_uri
## Accessor method used by SBOLObjectArray to remove an element object from the array
self.remove_fn = remove
## Accessor method used by SBOLObjectArray to get the number of objects in the array
self.get_num_fn = num
## Accessor method used by SBOLObjectArray to retrieve an object of specified index
# from array
self.get_nth_fn = nth
## implements 'len(array)'
def __len__(self):
return self.get_num_fn(self.ptr)
## Checks if key is indices or a URI
# distinguishes 'array[index]' from 'array[start:end:step]
def __getitem__(self, key):
if isinstance(key, slice):
indices = key.indices( len(self) )
return [self._getsingle_(n) for n in range(*indices)]
elif isinstance(key, str):
# Make a list of URIs for the objects in this array
uris = [ self.get_uri_fun(self.get_nth_fn(self.ptr, n)) for n in range(self.get_num_fn(self.ptr))]
# Then search the list for an element matching the key
ind = uris.index(key)
ptr = self.get_nth_fn(self.ptr, ind)
obj = self.doc._proxy(ptr)
return obj
else: # assume int-like object
if key < 0: # indexed from end
key += len(self)
return self._getsingle_(key)
## implements 'array[index]'
def _getsingle_(self, index):
num = self.get_num_fn(self.ptr)
if index >= num:
raise IndexError
ptr = self.get_nth_fn(self.ptr, index)
obj = self.doc._proxy(ptr)
return obj
## implements 'for obj in array:'
def __iter__(self):
num = self.get_num_fn(self.ptr)
for n in range(num):
ptr = self.get_nth_fn(self.ptr, n)
obj = self.doc._proxy(ptr)
yield obj
## implements 'obj in array'
def __contains__(self, obj):
for candidate_obj in self:
if candidate_obj == obj:
return True
return False
## implements 'print array'
def __str__(self):
if len(self) == 0:
return '[]'
output = []
output.append('[')
for obj in self[:-1]:
output.append(obj.__repr__())
output.append(', ')
output.append(self[-1].__repr__())
output.append(']')
return ''.join(output)
## implements 'array' (print in the interpreter)
def __repr__(self):
return self.__str__()
## implements array.remove(obj)
def remove(self, obj):
self.remove_fn(self.ptr, obj.ptr)
## The ExtendableSBOLObjectArray contains a list of SBOL objects
# that CAN exist independently of a parent, in contrast to the SBOLObjectArray
# in which a parent-child relationship is mandated. As a result the
# ExtendableSBOLObjectArray has one extra accessor function compared to
# an SBOLObjectArray, the add_fn, that allows the client user to manually
# add a child object to a parent.
# <br>
# Accessor methods used by the ExtendableSBOLObjectArray conform to the
# following pattern:
# |Accessor | Return value | Example callback |
# |----------|------------------------------------------|--------------------------------------------------|
# |get_uri_fn| Returns the object's URI | libsbol.getSequenceAnnotationURI |
# |add_fn | Add this child object to a parent | libsbol.addSequenceAnnotation |
# |remove_fn | Remove the object from the array | libsbol.removeSequenceAnnotationFromDNAComponent |
# |get_num_fn| Get the num of objects in the array | libsbol.getNumSequenceAnnotationsFor |
# |get_nth_fn| Get the array element of specified index | libsbol.getNthSequenceAnnotationFor |
class ExtendableSBOLObjectArray(SBOLObjectArray):
## Construct an ExtendableSBOLObjectArray
# @param obj A pointer to the PySBOL object that owns this array
# @param get_uri Callback function for retrieving an SBOL object's URI
# @param add Callback function that will register this object in a parent's
# ExtendableSBOLObjectArray
# @param remove Callback function that will remove an object from this array
# @param num Callback function that returns the number of objects in the array
# @param nth Callback function that returns an object with the specified index
def __init__(self, obj, get_uri, add, remove, num, nth):
SBOLObjectArray.__init__(self, obj, get_uri, remove, num, nth)
## Accessor method used by ExtendableSBOLObjectArray to add a child object
# to a parent
self.add_fn = add
## implements 'array += obj'
def __iadd__(self, obj):
if obj in self:
raise SBOLError('Duplicate obj %s' % obj)
self.add_fn(self.ptr, obj.ptr)
return self
## implements 'array.append(obj)'
def append(self, obj):
self.__iadd__(obj)
## implements 'array += obj_list'
def __extend__(self, obj_list):
for obj in obj_list:
self += obj
## URIToSBOLObjectAssociativeArray
#
#class Precedes(ExtendableSBOLObjectArray):
# #def __init__(self, doc, uri, get_uri, add, remove, num, nth):
# # obj = doc._getSBOLObjectByURI(doc, uri)
# # ExtendableSBOLObjectArray.__init__(self, obj, get_uri, remove, num, nth)
#
# def _getSBOLObjectByURI(doc, uri):
# obj = None
# for ann in doc.annotations:
# if ann.uri == uri:
# obj = ann
# return obj
## Represents an SBOL document that can be read from or written to a file.
# It also holds a registry of all the SBOL objects in the document,
# so it can be used for iterating through all the objects of a certain kind,
# retrieving the object with a certain URI, checking the type of an object, etc.
# Deleting a Document also deletes the SBOL objects it contains.
# Each SBOL object must be associated with a document, for two main reasons:
# to ensure that its URI is unique, and to make memory management simpler.
# @todo Objects should be able to belong to multiple documents
class Document(object):
## Construct a Document
def __init__(self):
# create document
## Pointer to the encapsulated libSBOLc Document object
self.ptr = libsbol.createDocument()
# create sequences array
fns = (libsbol.getDNASequenceURI,
libsbol.removeDNASequence,
libsbol.getNumDNASequences,
libsbol.getNthDNASequence)
## Registers all DNASequence objects in Document
self.sequences = SBOLObjectArray(self, *fns)
# create annotations array
fns = (libsbol.getSequenceAnnotationURI,
libsbol.removeSequenceAnnotation,
libsbol.getNumSequenceAnnotations,
libsbol.getNthSequenceAnnotation)
## Registers all SequenceAnnotation objects in Document
self.annotations = SBOLObjectArray(self, *fns)
# create components array
fns = (libsbol.getDNAComponentURI,
libsbol.removeDNAComponent,
libsbol.getNumDNAComponents,
libsbol.getNthDNAComponent)
## Registers all DNAComponent objects in Document
self.components = SBOLObjectArray(self, *fns)
# create collections array
fns = (libsbol.getCollectionURI,
libsbol.removeCollection,
libsbol.getNumCollections,
libsbol.getNthCollection)
## Registers all Collection objects in Document
self.collections = SBOLObjectArray(self, *fns)
# create lists of Python proxy objects to keep them
# from being garbage collected, and for looking up
# objects from SWIG pointers
## Registers all PySBOL DNASequence wrapper objects in Document
self._sequences = []
## Registers all PySBOL SequenceAnnotation wrapper objects in Document
self._annotations = []
## Registers all PySBOL DNAComponent wrapper objects in Document
self._components = []
## Registers all PySBOL Collection wrapper objects in Document
self._collections = []
## Delete this Document and all associated objects, freeing libSBOL memory
def close(self):
if self.ptr:
libsbol.deleteDocument(self.ptr)
## Print summary of Document
def __str__(self):
return capture_stdout(libsbol.printDocument, self.ptr)
## Read an SBOL file
# @param filename A string containing the full file name
def read(self, filename):
libsbol.readDocument(self.ptr, filename)
# Instantiate python proxy objects for each C object in file
for i in range(0, libsbol.getNumDNASequences(self.ptr)):
ptr = libsbol.getNthDNASequence(self.ptr, i)
uri = libsbol.getDNASequenceURI(ptr)
seq = DNASequence(self, uri, ptr)
for i in range(0, libsbol.getNumSequenceAnnotations(self.ptr)):
ptr = libsbol.getNthSequenceAnnotation(self.ptr, i)
uri = libsbol.getSequenceAnnotationURI(ptr)
seq_annotation = SequenceAnnotation(self, uri, ptr)
for i in range(0, libsbol.getNumDNAComponents(self.ptr)):
ptr = libsbol.getNthDNAComponent(self.ptr, i)
uri = libsbol.getDNAComponentURI(ptr)
component = DNAComponent(self, uri, ptr)
for i in range(0, libsbol.getNumCollections(self.ptr)):
ptr = libsbol.getNthCollection(self.ptr, i)
uri = libsbol.getCollectionURI(ptr)
collection = SequenceAnnotation(self, uri, ptr)
## Write an SBOL file
# @param filename A string containing the full file name
def write(self, filename):
libsbol.writeDocumentToFile(self.ptr, filename)
## Total number of objects owned by this Document
@property
def num_sbol_objects(self):
return len(self.sequences) \
+ len(self.annotations) \
+ len(self.components) \
+ len(self.collections)
## URIs of all object instances owned by this Document
@property
def uris(self):
output = []
for array in (self._sequences,
self._annotations,
self._components,
self._collections):
for obj in array:
output.append(obj.uri)
return output
## Find the Python proxy for an unknown pointer
def _proxy(self, ptr):
for array in (self._sequences,
self._annotations,
self._components,
self._collections):
for obj in array:
if obj.ptr == ptr:
return obj
return None
## Instances of the DNASequence class contain the actual DNA sequence string.
# This specifies the sequence of nucleotides that comprise the DNAComponent
# being described.
class DNASequence(object):
## Constructor for a PySBOL DNASequence. A PySBOL DNASequence wraps a
# libSBOLc DNASequence. By default the constructor will instantiate both
# a libSBOLc object and its wrapper Python object. However, if a libSBOLc
# DNASequence already exists, it can be wrapped by specifying the optional
# argument ptr
# @param doc The Document to which this sequence will belong
# @param uri A unique string identifier
# @param ptr Optional. A SWIGPython-libSBOLc object to be wrapped with
# this DNASequence
def __init__(self, doc, uri, ptr=None):
## The SWIGPython-libSBOLc object wrapped by this DNASequence object
self.ptr = None
if not ptr:
# create the C object if it doesn't exist already
self.ptr = libsbol.createDNASequence(doc.ptr, uri)
else:
# wrap a C object if it already exists, necessary for input from file
self.ptr = ptr
if self.ptr == None:
raise URIError("Duplicate URI '%s'" % uri)
# register the Python proxy
## the Document to which this sequence object belongs
self.doc = doc
self.doc._sequences.append(self)
## Clean-up this wrapper and its object
def __del__(self):
if self.ptr:
libsbol.deleteDNASequence(self.ptr)
self.doc._sequences.remove(self)
## Print summary of this DNASequence object
def __str__(self):
return capture_stdout(libsbol.printDNASequence, self.ptr, 0)
## Print the URI of this DNASequence object
def __repr__(self):
return "<%s uri='%s'>" % (self.__class__.__name__, self.uri)
## Copy the properties of this DNASequence object to a new DNASequence
# object. The new object is automatically assigned its URI based on the
# self object's URI augmented with the id_modifier
# @param id_modifier A string of characters added to an object's URI to
# designate the new object's URI.
def deepcopy(self, id_modifier):
copy_ptr = libsbol.copyDNASequence(self.ptr, id_modifier)
return DNASequence(self.doc, self.uri + id_modifier, copy_ptr)
## A string that uniquely identifies a DNASequence instance
@property
def uri(self):
return libsbol.getDNASequenceURI(self.ptr)
## This property specifies the sequence of nucleotides that comprise the
# parent DNAComponent. The base pairs MUST be represented by a sequence of
# lowercase characters corresponding to the 5' to 3' order of nucleotides in
# the DNA segment described, eg. "actg". The string value MUST conform to the
# restrictions listed below:
# a. The DNA sequence MUST use the Nomenclature for incompletely specified
# bases in nucleic acid sequences (Cornish-Bowden 1985). Rules adopted by IUPAC.
# | Symbol | Meaning |
# |--------|--------------------|
# | a | a; adenine |
# | c | c; cytosine |
# | g | g; guanine |
# | t | t; thymine |
# | m | a or c |
# | r | a or g |
# | w | a or t |
# | s | c or g |
# | y | c or t |
# | k | g or t |
# | v | a or c or g; not t |
# | h | a or c or t; not g |
# | d | a or g or t; not c |
# | b | c or g or t; not a |
# | n | a or c or g or t |
# b. Blank lines, spaces, or other symbols must not be included in the sequence text.
# c. The sequence text must be in ASCII or UTF-8 encoding. For the alphabets used,
# the two are identical.<br>
@property
def nucleotides(self):
return libsbol.getDNASequenceNucleotides(self.ptr)
@nucleotides.setter
def nucleotides(self, value):
libsbol.setDNASequenceNucleotides(self.ptr, value)
## Analogous to 'a feature' in other systems, the SequenceAnnotation
# indicates information about the parent DNAComponent at the position
# specified by the SequenceAnnotation's location data properties. The
# SequenceAnnotation location CAN be specified by the start and end
# positions of the subComponent, along with the DNA sequence. Alternatively,
# the partial order of SequenceAnnotations along a DNAComponent can be
# specified by indicating the precedes relationship to other SequenceAnnotations.
class SequenceAnnotation(object):
## Constructor for a PySBOL SequenceAnnotation. A PySBOL SequenceAnnotation
# wraps a libSBOLc SequenceAnnotation. By default the constructor will instantiate
# both a libSBOLc object and its wrapper Python object. However, if a libSBOLc
# SequenceAnnotation already exists, it can be wrapped by specifying the optional
# argument ptr
# @param doc The Document to which this annotation will belong
# @param uri A unique string identifier
# @param ptr Optional. A SWIGPython-libSBOLc object to be wrapped with
# this SequenceAnnotation
def __init__(self, doc, uri, ptr=None):
## The SWIGPython-libSBOLc object wrapped by this SequenceAnnotation
self.ptr = None
if not ptr:
# create the C object if it doesn't exist already
self.ptr = libsbol.createSequenceAnnotation(doc.ptr, uri)
else:
# wrap a libSBOLc object if it already exists (most likely due to file import)
self.ptr = ptr
if self.ptr == None:
raise URIError("Duplicate URI '%s'" % uri)
# register the Python proxy
## the Document to which this annotation belongs
self.doc = doc
self.doc._annotations.append(self)
# finish the Python proxy
self.doc._annotations.append(self)
fns = (libsbol.getSequenceAnnotationURI,
libsbol.addPrecedesRelationship,
libsbol.removePrecedesRelationship,
libsbol.getNumPrecedes,
libsbol.getNthPrecedes)
# This SequenceAnnotation object precedes all the SequenceAnnotation objects
# whose object references are included in this array property. Thus, the
# precedes property specifies this annotation's position relative to others
# that belong to the same parent DNAComponent. This property can be operated
# on with Python list operators and slice indexing.
self.precedes = ExtendableSBOLObjectArray(self, *fns)
## Clean-up this wrapper and its object
def __del__(self):
if self.ptr:
libsbol.deleteSequenceAnnotation(self.ptr)
self.doc._annotations.remove(self)
## Print summary of this SequenceAnnotation object
def __str__(self):
return capture_stdout(libsbol.printSequenceAnnotation, self.ptr, 0)
## Print the URI of this SequenceAnnotation object
def __repr__(self):
return "<%s uri='%s'>" % (self.__class__.__name__, self.uri)
## Determines if this SequenceAnnotation is upstream (precedes) the object
# annotation
# @param object A SequenceAnnotation whose position relative to self is unknown
# @return True if this SequenceAnnotation is upstream of the object annotation,
# or False if it is downstream
def isUpstream(self, object):
return bool(libsbol.precedes(self.ptr, object.ptr))
## A SequenceAnnotation is downstream of the object annotation
# if the object annotation precedes it
# @param object A SequenceAnnotation whose position relative to self is unknown
# @return True if this SequenceAnnotation is downstream of the
# object annotation, or False if it is upstream
def isDownstream(self, object):
return bool(libsbol.precedes(object.ptr, self.ptr))
## A string that uniquely identifies a SequenceAnnotation instance
@property
def uri(self):
return libsbol.getSequenceAnnotationURI(self.ptr)
## Positive integer coordinate of the position of the first base of the
# subcomponent on the DNAComponent. As a convention, numerical coordinates in
# this class use position 1 (not 0) to indicate the initial base pair of a
# DNA sequence, a convention often used in molecular biology. The start
# coordinate is relative to the parent sequence.
@property
def start(self):
start = libsbol.getSequenceAnnotationStart(self.ptr)
if start == -1:
return None
else:
return start
## Positive integer coordinate of the position of the last base of the
# subcomponent on the DNAComponent. The end coordinate is relative to the
# parent sequence.
@property
def end(self):
end = libsbol.getSequenceAnnotationEnd(self.ptr)
if end == -1:
return None
else:
return end
## The strand orientation, or direction, of the subComponent's sequence
# relative to the parent DnaComponent is specified by the strand [+/-].
# For strand: '+' the sequence of the subComponent is the exact sub-sequence,
# and for '-' it is the reverse-complement of the parent DnaComponent's sequence
# in that region.
@property
def strand(self):
polarity = libsbol.getSequenceAnnotationStrand(self.ptr)
if polarity == libsbol.STRAND_FORWARD:
return '+'
elif polarity == libsbol.STRAND_BIDIRECTIONAL:
return '*'
elif polarity == libsbol.STRAND_REVERSE:
return '-'
else:
raise InternalError('Got invalid strand polarity %i' % polarity )
## This property specifies a child DNAComponent which is nested inside a
# parent DNAComponent's sequence. The location data properties contained
# in a SequenceAnnotation object describe the position of the subcomponent
# within its parent's sequence. The sequence of the subcomponent SHOULD be
# logically consistent its parent's sequence, and logically consistent
# with the strand value, though the API does not enforce these restrictions.
@property
def subcomponent(self):
ptr = libsbol.getSequenceAnnotationSubComponent(self.ptr)
return self.doc._proxy(ptr)
@subcomponent.setter
def subcomponent(self, subcomponent):
libsbol.setSequenceAnnotationSubComponent(self.ptr, subcomponent.ptr)
@start.setter
def start(self, index):
if index == None:
index = -1
elif index < 0:
raise PositionError('Negative position %i' % index)
libsbol.setSequenceAnnotationStart(self.ptr, index)
@end.setter
def end(self, index):
if index == None:
index = -1
elif index < 0:
raise PositionError('Negative position %i' % index)
libsbol.setSequenceAnnotationEnd(self.ptr, index)
@strand.setter
def strand(self, polarity):
if polarity == '+':
polarity = libsbol.STRAND_FORWARD
elif polarity == '*':
polarity = libsbol.STRAND_BIDIRECTIONAL
elif polarity == '-':
polarity = libsbol.STRAND_REVERSE
else:
raise StrandError('Invalid polarity %s' % polarity)
libsbol.setSequenceAnnotationStrand(self.ptr, polarity)
@subcomponent.setter
def subcomponent(self, com):
if com:
libsbol.setSequenceAnnotationSubComponent(self.ptr, com.ptr)
else:
libsbol.setSequenceAnnotationSubComponent(self.ptr, None)
## Instances of the DNAComponent class represent segments of DNA. A component's
# DNA sequence can be annotated using SequenceAnnotation instances, positionally
# defined descriptors of the sequence which specify additional DnaComponent
# instances as subComponents. A DNAComponent MAY specify one DnaSequence
# instance it abstracts. DNAComponent instances MAY be grouped into Collections.
class DNAComponent(object):
## Constructor for a PySBOL DNAComponent. A PySBOL DNAComponent wraps a
# libSBOLc DNAComponent. By default the constructor will instantiate both
# a libSBOLc object and its wrapper Python object. However, if a libSBOLc
# DNAComponent already exists, it can be wrapped by specifying the optional
# argument ptr
# @param doc The Document to which this component will belong
# @param uri A unique string identifier
# @param ptr Optional. A SWIGPython-libSBOLc object to be wrapped with
# this DNAComponent
def __init__(self, doc, uri, ptr=None):
## The SWIGPython-libSBOLc object wrapped by this DNAComponent
self.ptr = None
if not ptr:
# create the C object if it doesn't exist already
self.ptr = libsbol.createDNAComponent(doc.ptr, uri)
else:
# wrap a C object if it already exists, necessary for input from file
self.ptr = ptr
if self.ptr == None:
raise URIError("Duplicate URI '%s'" % uri)
# register the Python proxy
## the Document to which this component belongs
self.doc = doc
self.doc._components.append(self)
# finish the Python proxy
fns = (libsbol.getSequenceAnnotationURI,
libsbol.addSequenceAnnotation,
libsbol.removeSequenceAnnotationFromDNAComponent,
libsbol.getNumSequenceAnnotationsFor,
libsbol.getNthSequenceAnnotationFor)
## Zero or more values of type SequenceAnnotation. This property links
# to SequenceAnnotation instances, each of which specifies the position
# and strand orientation of a DnaComponent that describes a subComponent
# of this DNA component.
self.annotations = ExtendableSBOLObjectArray(self, *fns) ## Zero or more values of type SequenceAnnotation.
## Clean-up this wrapper and its object
def __del__(self):
if self.ptr:
libsbol.deleteDNAComponent(self.ptr)
self.doc._components.remove(self)
## Print summary of this DNAComponent object
def __str__(self):
return capture_stdout(libsbol.printDNAComponent, self.ptr, 0)
## Print the URI of this DNAComponent object
def __repr__(self):
return "<%s uri='%s'>" % (self.__class__.__name__, self.uri)
## Copy a "genetic design". This DNAComponent and all its children objects
# are recursively copied. Not fully implemented, does not copy references
def deepcopy(self, id_modifier):
copy_ptr = libsbol.copyDNAComponent(self.ptr, id_modifier)
copy_uri = self.uri + id_modifier
copy = DNAComponent(self.doc, copy_uri, copy_ptr)
seq_copy_ptr = libsbol.getDNAComponentSequence(copy_ptr)
if seq_copy_ptr:
seq_copy_uri = libsbol.getDNASequenceURI(seq_copy_ptr)
seq_copy = DNASequence(self.doc, seq_copy_uri, ptr=seq_copy_ptr)
for i_ann in range(libsbol.getNumSequenceAnnotationsFor(copy.ptr)):
SA_copy_ptr = libsbol.getNthSequenceAnnotationFor(copy.ptr, i_ann)
SA_copy_uri = libsbol.getSequenceAnnotationURI(SA_copy_ptr)
SA_copy = SequenceAnnotation(self.doc, SA_copy_uri, ptr=SA_copy_ptr)
return copy
## This property uniquely identifies the instance, and is intended to be
# used whenever a reference to the instance is needed, such as when
# referring to a DNAComponent stored on a server from another location.
@property
def uri(self):
return libsbol.getDNAComponentURI(self.ptr)
## The displayId is a human readable identifier for display to users.
@property
def display_id(self):
return libsbol.getDNAComponentDisplayID(self.ptr)
## The name of the DNA component is a human-readable string providing
# the most recognizable identifier used to refer to this DnaComponent.
# It often confers meaning of what the component is in biological contexts
# to a human user. A name may be ambiguous, in that multiple, distinct
# DnaComponents may share the same name. For example, acronyms are
# sometimes used (eg. pLac-O1) which may have more than one instantiation
# in terms of exact DNA sequence composition.
@property
def name(self):
return libsbol.getDNAComponentName(self.ptr)
## The description is a free-text field that contains text such as a title
# or longer free-text-based description for users. This text is used to
# clarify what the DnaComponent is to potential users (eg. engineered Lac
# promoter, repressible by LacI).
@property
def description(self):
return libsbol.getDNAComponentDescription(self.ptr)
## Zero or one DNASequence object. This property specifies the DNA sequence
# which this DnaComponent object represents.
@property
def sequence(self):
ptr = libsbol.getDNAComponentSequence(self.ptr)
return self.doc._proxy(ptr)
@display_id.setter
def display_id(self, displayid):
libsbol.setDNAComponentDisplayID(self.ptr, displayid)
@name.setter
def name(self, name):
libsbol.setDNAComponentName(self.ptr, name)
@description.setter
def description(self, descr):
libsbol.setDNAComponentDescription(self.ptr, descr)
@sequence.setter
def sequence(self, seq):
libsbol.setDNAComponentSequence(self.ptr, seq.ptr)
## A URI referencing the Sequence Ontology
@property
def type(self):
return libsbol.getDNAComponentType(self.ptr)
@type.setter
def type(self, typ):
return libsbol.setDNAComponentType(self.ptr, typ)
# TODO: does not copy sublevels contained in SequenceAnnotations
def move(self, new_doc):
if self.sequence:
self.sequence.doc = new_doc
new_doc._sequences.append(self.sequence)
libsbol.moveDNAComponent(self.ptr, new_doc.ptr)
self.doc = new_doc
new_doc._components.append(self)
## Individual instances of the Collection class represent an organizational
# container which helps users and developers conceptualize a set of
# DNAComponents as a group. Any combination of these instances CAN be added
# to a Collection instance, annotated with a displayID, name, and description
# and be published or exchanged directly.<br>
# For example, a set of restriction enzyme recognition sites, such as the
# components commonly used for BBF RFC 10 BioBricks could be placed into a
# single Collection. A Collection might contain DNA components used in a
# specific project, lab, or custom grouping specified by the user
class Collection(object):
## Constructor for a PySBOL Collection. A PySBOL Collection wraps a
# libSBOLc Collection. By default the constructor will instantiate both
# a libSBOLc object and its wrapper Python object. However, if a libSBOLc
# Collection already exists, it can be wrapped by specifying the optional
# argument ptr
# @param doc The Document to which this collection will belong
# @param uri A unique string identifier
# @param ptr Optional. A SWIGPython-libSBOLc object to be wrapped with
# this Collection
def __init__(self, doc, uri, ptr=None):
## The SWIGPython-libSBOLc object wrapped by this Collection
self.ptr = None
if not ptr:
# create the C object if it doesn't exist already
self.ptr = libsbol.createCollection(doc.ptr, uri)
else:
# wrap a C object if it already exists
self.ptr = ptr
if self.ptr == None:
raise URIError("Duplicate URI '%s'" % uri)
# register the Python proxy
## the Document to which this Collection object belongs
self.doc = doc
self.doc._collections.append(self)
# finish the Python proxy
fns = (libsbol.getDNAComponentURI,
libsbol.addDNAComponentToCollection,
libsbol.removeDNAComponentFromCollection,
libsbol.getNumDNAComponentsIn,
libsbol.getNthDNAComponentIn)
## An array of zero or more instances of type DNAComponent which are members
# of this Collection representing DNA segments for engineering biological
# systems. For example, standard biological parts, BioBricks, pBAD, B0015,
# BioBrick Scar, Insertion Element, or any other DNA segment of interest as
# a building block of biological systems. This property supports Python
# list-like operations, slice indexing, as well as indexing by URI.
self.components = ExtendableSBOLObjectArray(self, *fns)
## Clean-up this wrapper and its object
def __del__(self):
if self.ptr:
libsbol.deleteCollection(self.ptr)
self.doc._collections.remove(self)
## Print summary of this Collection object
def __str__(self):
return capture_stdout(libsbol.printCollection, self.ptr, 0)
## Print the URI of this Collection object
def __repr__(self):
return "<%s uri='%s'>" % (self.__class__.__name__, self.uri)
## This property uniquely identifies the instance, and is intended to be
# used whenever a reference to the instance is needed, such as when
# referring to a Collection stored on a server from another location.
@property
def uri(self):
return libsbol.getCollectionURI(self.ptr)
## The displayID is a human-readable identifier for display to users
@property
def display_id(self):
return libsbol.getCollectionDisplayID(self.ptr)
## The common name of the Collection is the most recognizable identifier used
# to refer to this Collection. It SHOULD confer what is contained in the
# Collection and is often ambiguous (eg, My Bookmarked Parts).
@property
def name(self):
return libsbol.getCollectionName(self.ptr)
## The description is a human-readable, free-text field, that SHOULD provide
# a hint about why the member DNAComponents are grouped into this Collection.
@property
def description(self):
return libsbol.getCollectionDescription(self.ptr)
@display_id.setter
def display_id(self, displayid):
libsbol.setCollectionDisplayID(self.ptr, displayid)
@name.setter
def name(self, name):
libsbol.setCollectionName(self.ptr, name)
@description.setter
def description(self, descr):
libsbol.setCollectionDescription(self.ptr, descr)
| |
"""
A :class:`~glue.core.subset_group.SubsetGroup` unites a group of
:class:`~glue.core.subset.Subset` instances together with a consistent state,
label, and style.
While subsets are internally associated with particular datasets, it's
confusing for the user to juggle multiple similar or identical
subsets, applied to different datasets. Because of this, the GUI
manages SubsetGroups, and presents each group to the user as a single
entity. The individual subsets are held in-sync by the SubsetGroup.
Client code should *only* create Subset Groups via
DataCollection.new_subset_group. It should *not* call Data.add_subset
or Data.new_subset directly
"""
from warnings import warn
from . import Subset
from .subset import SubsetState
from .util import Pointer
from .hub import HubListener
from .visual import VisualAttributes, RED
from .message import (DataCollectionAddMessage,
DataCollectionDeleteMessage
)
from .contracts import contract
from ..external import six
__all__ = ['GroupedSubset', 'SubsetGroup']
class GroupedSubset(Subset):
"""
A member of a SubsetGroup, whose internal representation
is shared with other group members
"""
subset_state = Pointer('group.subset_state')
label = Pointer('group.label')
def __init__(self, data, group):
"""
:param data: :class:`~glue.core.data.Data` instance to bind to
:param group: :class:`~glue.core.subset_group.SubsetGroup`
"""
self.group = group
super(GroupedSubset, self).__init__(data, label=group.label,
color=group.style.color,
alpha=group.style.alpha)
def _setup(self, color, alpha, label):
self.color = color
self.label = label # trigger disambiguation
self.style = VisualAttributes(parent=self)
self.style.markersize *= 2.5
self.style.color = color
self.style.alpha = alpha
# skip state setting here
@property
def verbose_label(self):
return "%s (%s)" % (self.label, self.data.label)
def sync_style(self, other):
self.style.set(other)
def __eq__(self, other):
return other is self
# In Python 3, if __eq__ is defined, then __hash__ has to be re-defined
if six.PY3:
__hash__ = object.__hash__
def __gluestate__(self, context):
return dict(group=context.id(self.group),
style=context.do(self.style))
@classmethod
def __setgluestate__(cls, rec, context):
dummy_grp = SubsetGroup() # __init__ needs group.label
self = cls(None, dummy_grp)
yield self
self.group = context.object(rec['group'])
self.style = context.object(rec['style'])
class SubsetGroup(HubListener):
def __init__(self, color=RED, alpha=0.5, label=None, subset_state=None):
"""
Create a new empty SubsetGroup
Note: By convention, SubsetGroups should be created via
DataCollection.new_subset.
"""
self.subsets = []
if subset_state is None:
subset_state = SubsetState()
self.subset_state = subset_state
self.label = label
self._style = None
self.style = VisualAttributes(parent=self)
self.style.markersize *= 2.5
self.style.color = color
self.style.alpha = alpha
@contract(data='isinstance(DataCollection)')
def register(self, data):
"""
Register to a :class:`~glue.core.data_collection.DataCollection`
This is called automatically by
:meth:`glue.core.data_collection.DataCollection.new_subset_group`
"""
self.register_to_hub(data.hub)
# add to self, then register, so fully populated by first
# broadcast
for d in data:
s = GroupedSubset(d, self)
self.subsets.append(s)
for d, s in zip(data, self.subsets):
d.add_subset(s)
def paste(self, other_subset):
"""paste subset state from other_subset onto self """
state = other_subset.subset_state.copy()
self.subset_state = state
def _add_data(self, data):
# add a new data object to group
s = GroupedSubset(data, self)
data.add_subset(s)
self.subsets.append(s)
def _remove_data(self, data):
# remove a data object from group
for s in list(self.subsets):
if s.data is data:
self.subsets.remove(s)
def register_to_hub(self, hub):
hub.subscribe(self, DataCollectionAddMessage,
lambda x: self._add_data(x.data))
hub.subscribe(self, DataCollectionDeleteMessage,
lambda x: self._remove_data(x.data))
@property
def style(self):
return self._style
@style.setter
def style(self, value):
self._style = value
self._sync_style()
def _sync_style(self):
for s in self.subsets:
s.sync_style(self.style)
@contract(item='string')
def broadcast(self, item):
# used by __setattr__ and VisualAttributes.__setattr__
if item == 'style':
self._sync_style()
return
for s in self.subsets:
s.broadcast(item)
def __setattr__(self, attr, value):
object.__setattr__(self, attr, value)
if attr in ['subset_state', 'label', 'style']:
self.broadcast(attr)
def __gluestate__(self, context):
return dict(label=self.label,
state=context.id(self.subset_state),
style=context.do(self.style),
subsets=list(map(context.id, self.subsets)))
@classmethod
def __setgluestate__(cls, rec, context):
result = cls()
yield result
result.subset_state = context.object(rec['state'])
result.label = rec['label']
result.style = context.object(rec['style'])
result.style.parent = result
result.subsets = list(map(context.object, rec['subsets']))
def __and__(self, other):
return self.subset_state & other.subset_state
def __or__(self, other):
return self.subset_state | other.subset_state
def __xor__(self, other):
return self.subset_state ^ other.subset_state
def __invert__(self):
return ~self.subset_state
def coerce_subset_groups(collect):
"""
If necessary, reassign non-grouped subsets in a DataCollection
into SubsetGroups.
This is used to support DataCollections saved with
version 1 of glue.core.state.save_data_collection
"""
for data in collect:
for subset in data.subsets:
if not isinstance(subset, GroupedSubset):
warn("DataCollection has subsets outside of "
"subset groups, which are no longer supported. "
"Moving to subset groups")
subset.delete()
grp = collect.new_subset_group()
grp.subset_state = subset.subset_state
grp.style = subset.style
grp.label = subset.label
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def _flatten(list_of_lists):
return [x for y in list_of_lists for x in y]
class Plus1RNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
def state(self, _):
return tf.zeros(tf.pack([self._batch_size, self._state_size]))
def save_state(self, _, state):
self.saved_state = state
return tf.identity(state)
class RNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(
values[-1],
max_length * np.ones((batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("drop_scope"):
dropped_outputs, _ = tf.nn.rnn(
full_dropout_cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(dropped_outputs,
feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = tf.placeholder(tf.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(dynamic_outputs,
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
dynamic_state_value = sess.run([dynamic_state],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(
dynamic_values[2],
np.vstack((
np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(
dynamic_state_value[0],
np.vstack((
1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
class GRUTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testDynamic(self, use_gpu):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(
tf.float32, shape=(time_steps, batch_size, input_size))
cell = tf.nn.rnn_cell.GRUCell(num_units=num_units, input_size=input_size)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def testDynamic(self):
self._testDynamic(use_gpu=False)
self._testDynamic(use_gpu=True)
class LSTMTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testNoProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testCellClipping(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
cell_clip=0.0, initializer=initializer)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def _testNoProjNoShardingSimpleStateSaver(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=False, initializer=initializer)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state],
feed_dict={inputs[0]: input_value})
self.assertAllEqual(last_state_value, saved_state_value)
def _testProjNoSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
num_proj=num_proj, initializer=initializer)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testProjSharding(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
input_size=input_size,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testTooManyShards(self, use_gpu):
num_units = 3
input_size = 5
num_proj = 4
num_proj_shards = 4
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()):
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
input_size=input_size,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer)
with self.assertRaises(ValueError):
tf.nn.rnn(cell, inputs, dtype=tf.float32)
def _testDoubleInput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
input_size=input_size,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer)
outputs, _ = tf.nn.rnn(
cell, inputs, initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run()
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def _testShardNoShardEquivalentOutput(self, use_gpu):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
initializer = tf.constant_initializer(0.001)
cell_noshard = tf.nn.rnn_cell.LSTMCell(
num_units, input_size,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards)
cell_shard = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
initializer=initializer, num_proj=num_proj)
with tf.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = tf.nn.rnn(
cell_noshard, inputs, dtype=tf.float32)
with tf.variable_scope("shard_scope"):
outputs_shard, state_shard = tf.nn.rnn(
cell_shard, inputs, dtype=tf.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def _testDoubleInputWithDropoutAndDynamicCalculation(
self, use_gpu):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
sequence_length = tf.placeholder(tf.int64)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float64, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units,
input_size=input_size,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer)
dropout_cell = tf.nn.rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = tf.nn.rnn(
dropout_cell, inputs, sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, tf.float64))
self.assertEqual(len(outputs), len(inputs))
tf.initialize_all_variables().run(feed_dict={sequence_length: [2, 3]})
input_value = np.asarray(np.random.randn(batch_size, input_size),
dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
state_value = sess.run([state], feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = tf.random_uniform_initializer(-1, 1, seed=self._seed+1)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
num_proj=num_proj, initializer=initializer)
cell_d = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
num_proj=num_proj, initializer=initializer_d)
with tf.variable_scope("share_scope"):
outputs0, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("diff_scope"):
outputs2, _ = tf.nn.rnn(cell_d, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2*max_length]
outputs2_values = output_values[2*max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1-o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=tf.Graph()) as sess:
initializer = tf.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
tf.placeholder(tf.float32, shape=(None, input_size))]
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
num_proj=num_proj, initializer=initializer)
with tf.name_scope("scope0"):
with tf.variable_scope("share_scope"):
outputs0, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.name_scope("scope1"):
with tf.variable_scope("share_scope", reuse=True):
outputs1, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
tf.initialize_all_variables().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={inputs[0]: input_value})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def _testDynamicEquivalentToStaticRNN(self, use_gpu, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unpack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
initializer=initializer, num_proj=num_proj)
with tf.variable_scope("dynamic_scope"):
outputs_static, state_static = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = tf.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = _flatten([
tf.gradients(y, [concat_inputs])
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = _flatten([
tf.gradients(y, trainable_variables)
for y in [outputs_static[0],
outputs_static[-1],
state_static]])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
concat_inputs = tf.placeholder(tf.float32,
shape=(time_steps, batch_size, input_size))
inputs = tf.unpack(concat_inputs)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
cell = tf.nn.rnn_cell.LSTMCell(
num_units, input_size, use_peepholes=True,
initializer=initializer, num_proj=num_proj)
with tf.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = tf.nn.dynamic_rnn(
cell, inputs=concat_inputs, sequence_length=sequence_length,
time_major=True, dtype=tf.float32)
split_outputs_dynamic = tf.unpack(outputs_dynamic, time_steps)
feeds = {concat_inputs: input_values}
# Initialize
tf.initialize_all_variables().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = tf.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = _flatten([
tf.gradients(y, [concat_inputs])
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = _flatten([
tf.gradients(y, trainable_variables)
for y in [split_outputs_dynamic[0],
split_outputs_dynamic[-1],
state_dynamic]])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run(
(state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(zip(static_individual_grad_values,
dynamic_individual_grad_values)):
tf.logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf.logging.info(
"Comparing individual variable gradients iteraiton %d" % i)
self.assertAllEqual(a, b)
def testNoProjNoShardingSimpleStateSaver(self):
self._testNoProjNoShardingSimpleStateSaver(use_gpu=False)
self._testNoProjNoShardingSimpleStateSaver(use_gpu=True)
def testNoProjNoSharding(self):
self._testNoProjNoSharding(use_gpu=False)
self._testNoProjNoSharding(use_gpu=True)
def testCellClipping(self):
self._testCellClipping(use_gpu=False)
self._testCellClipping(use_gpu=True)
def testProjNoSharding(self):
self._testProjNoSharding(use_gpu=False)
self._testProjNoSharding(use_gpu=True)
def testProjSharding(self):
self._testProjSharding(use_gpu=False)
self._testProjSharding(use_gpu=True)
def testTooManyShards(self):
self._testTooManyShards(use_gpu=False)
self._testTooManyShards(use_gpu=True)
def testShardNoShardEquivalentOutput(self):
self._testShardNoShardEquivalentOutput(use_gpu=False)
self._testShardNoShardEquivalentOutput(use_gpu=True)
def testDoubleInput(self):
self._testDoubleInput(use_gpu=False)
self._testDoubleInput(use_gpu=True)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=False)
self._testDoubleInputWithDropoutAndDynamicCalculation(use_gpu=True)
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=False)
self._testDynamicEquivalentToStaticRNN(
use_gpu=False, use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(
use_gpu=True, use_sequence_length=True)
class BidirectionalRNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_gpu, use_shape, use_sequence_length):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=self._seed)
sequence_length = tf.placeholder(tf.int64) if use_sequence_length else None
cell_fw = tf.nn.rnn_cell.LSTMCell(num_units,
input_size,
initializer=initializer)
cell_bw = tf.nn.rnn_cell.LSTMCell(num_units,
input_size,
initializer=initializer)
inputs = max_length * [
tf.placeholder(
tf.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = tf.nn.bidirectional_rnn(cell_fw,
cell_bw,
inputs,
dtype=tf.float32,
sequence_length=sequence_length)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(
out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = tf.pack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_gpu, use_shape, True))
tf.initialize_all_variables().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_gpu, use_shape):
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = self._createBidirectionalRNN(
use_gpu, use_shape, False)
tf.initialize_all_variables().run()
out, s_fw, s_bw = sess.run([outputs, state_fw, state_bw],
feed_dict={inputs[0]: input_value})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_gpu=False, use_shape=False)
self._testBidirectionalRNN(use_gpu=True, use_shape=False)
self._testBidirectionalRNN(use_gpu=False, use_shape=True)
self._testBidirectionalRNN(use_gpu=True, use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=False,
use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=True,
use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=False,
use_shape=True)
self._testBidirectionalRNNWithoutSequenceLength(use_gpu=True,
use_shape=True)
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, input_size=input_size, use_peepholes=True,
initializer=initializer)
outputs, final_state = tf.nn.rnn(
cell, inputs_list_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(outputs + [final_state], trainable_variables)
return tf.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, input_size=input_size, use_peepholes=True,
initializer=initializer)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = tf.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [tf.constant(x) for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.constant(inputs)
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 10
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start)/float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
if not use_gpu:
with tf.device("/cpu:0"):
inputs_list_t = [tf.constant(x) for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
else:
inputs_list_t = [tf.constant(x) for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.initialize_all_variables().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with tf.Session(config=config, graph=tf.Graph()) as sess:
if not use_gpu:
with tf.device("/cpu:0"):
inputs_t = tf.constant(inputs)
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
else:
inputs_t = tf.constant(inputs)
ops = _static_vs_dynamic_rnn_benchmark_dynamic(
inputs_t, sequence_length)
tf.initialize_all_variables().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static,
delta_dynamic, delta_dynamic/delta_static))
return delta_static, delta_dynamic
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length,
swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = tf.nn.rnn_cell.LSTMCell(
num_units=input_size, input_size=input_size, use_peepholes=True,
initializer=initializer)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length,
swap_memory=swap_memory, dtype=tf.float32)
trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients([outputs, final_state], trainable_variables)
return tf.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.constant(inputs)
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
tf.initialize_all_variables().run()
no_swap = _timer(sess, ops)
# Memory swap
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.constant(inputs)
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
tf.initialize_all_variables().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap/no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory):
config = tf.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(5):
if dynamic:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_t = tf.constant(inputs)
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
tf.initialize_all_variables().run()
elapsed = _timer(sess, ops)
else:
with tf.Session(config=config, graph=tf.Graph()) as sess:
inputs_list_t = [tf.constant(x) for x in inputs_list]
ops = _static_vs_dynamic_rnn_benchmark_static(
inputs_list_t, sequence_length)
tf.initialize_all_variables().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" %
(batch_size, seqlen, num_units, dynamic, elapsed,
elapsed/seqlen))
class BenchmarkRNN(tf.test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(name="graph_creation_time_static_T%02d" % max_time,
iters=5, wall_time=s_dt)
self.report_benchmark(name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5, wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(
batch_size, max_time, num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=10, wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s"
% (max_time, batch_size, num_units, use_gpu),
iters=10, wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(
batch_size, max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=10, wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d"
% (max_time, batch_size, num_units),
iters=10, wall_time=swap)
if __name__ == "__main__":
tf.test.main()
| |
# 10.07.2007, c
# last revision: 25.03.2008
from sfepy import data_dir
filename_meshes = ['/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_hexa.mesh']
filename_meshes = [data_dir + name for name in filename_meshes]
all_your_bases = [1, 2, 1]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : None,
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim / 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return lam, mu
material_1 = {
'name' : 'solid',
'values' : {
'lam' : get_pars( 3 )[0],
'mu' : get_pars( 3 )[1],
'Dijkl' : get_pars( 3, True ),
}
}
material_2 = {
'name' : 'spring',
'values' : {
'.pars' : {'stiffness' : 1e0, 'projection' : None},
}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'nodes in (z < -0.499)',
}
region_2 = {
'name' : 'Top',
'select' : 'nodes in (z > 0.499)',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d3',
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i1.Omega( solid.lam, solid.mu, v, u )
= dw_point_lspring.i1.Bottom( spring.pars, v, u )""",
}
equations_general = {
'balance_of_forces' :
"""dw_lin_elastic.i1.Omega( solid.Dijkl, v, u )
= dw_point_lspring.i1.Bottom( spring.pars, v, u )""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
'eps_r' : 1.0,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 1000
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors( sols[0], sols[1],
label1 = 'isotropic',
label2 = 'general' )
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.solvers.generic import solve_stationary
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, approx_order in enumerate(all_your_bases):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
fields = {'field_1' : {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : approx_order,
}
}
self.conf.edit('fields', fields)
self.report( 'mesh: %s, base: %s' % (fname, approx_order) )
status = IndexedStruct()
self.report( 'isotropic' )
self.conf.equations = self.conf.equations_iso
problem, vec1 = solve_stationary(self.conf, nls_status=status)
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.report( 'general' )
self.conf.equations = self.conf.equations_general
problem, vec2 = solve_stationary(self.conf, nls_status=status)
converged = status.condition == 0
ok = ok and converged
self.report( 'converged: %s' % converged )
self.solutions.append( (vec1, vec2) )
name = op.join(self.options.out_dir,
'_'.join(('test_elasticity_small_strain',
op.splitext(op.basename(fname))[0],
'%d' % approx_order))
+ '.vtk')
problem.save_state( name, vec1 )
## trunk = op.join( self.options.out_dir,
## op.splitext( op.basename( fname ) )[0] )
## problem.save_field_meshes( trunk )
## problem.save_regions( trunk )
return ok
| |
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.stub_fiptables
~~~~~~~~~~~~
Stub version of the fiptables module.
"""
from calico.felix.futils import IPV4, IPV6
import difflib
from collections import namedtuple
#*****************************************************************************#
#* The following is so that rule.target.name can be used to identify rules; *#
#* this is the subset of the Target object from iptc that is actually *#
#* required by calling code. *#
#*****************************************************************************#
RuleTarget = namedtuple('RuleTarget', ['name'])
# Special value to mean "put this rule at the end".
RULE_POSN_LAST = -1
#*****************************************************************************#
#* The range of definitions below mimic fiptables. *#
#*****************************************************************************#
class Rule(object):
"""
Fake rule object.
"""
def __init__(self, type, target_name=None):
self.type = type
self.target_name = target_name
self.target = RuleTarget(target_name)
self.target_args = {}
self.match_name = None
self.match_args = {}
self.protocol = None
self.src = None
self.in_interface = None
self.out_interface = None
def create_target(self, name, parameters=None):
self.target = RuleTarget(name)
self.target_name = name
if parameters is not None:
for key in parameters:
self.target_args[key] = parameters[key]
def create_tcp_match(self, dport):
self.match_name = "tcp"
self.match_args["dport"] = dport
def create_icmp6_match(self, icmp_type):
self.match_name = "icmp6"
self.match_args["icmpv6_type"] = icmp_type
def create_conntrack_match(self, state):
self.match_name = "conntrack"
self.match_args["ctstate"] = state
def create_mark_match(self, mark):
self.match_name = "mark"
self.match_args["mark"] = mark
def create_mac_match(self, mac_source):
self.match_name = "mac"
self.match_args["mac_source"] = mac_source
def create_set_match(self, match_set):
self.match_name = "set"
self.match_args["match_set"] = match_set
def create_udp_match(self, sport, dport):
self.match_name = "udp"
self.match_args["sport"] = sport
self.match_args["dport"] = dport
def __eq__(self, other):
if (self.protocol != other.protocol or
self.src != other.src or
self.in_interface != other.in_interface or
self.out_interface != other.out_interface or
self.target_name != other.target_name or
self.match_name != other.match_name):
return False
if (len(self.match_args) != len(other.match_args) or
len(self.target_args) != len(other.target_args)):
return False
if self.match_args != other.match_args:
return False
if self.target_args != other.target_args:
return False
return True
def __str__(self):
output = self.target_name
if self.target_args:
output += " " + str(self.target_args)
output += " " + (self.protocol if self.protocol else "all")
output += " " + (self.src if self.src else "anywhere")
output += " " + (self.in_interface if self.in_interface else "any_in")
output += " " + (self.out_interface if self.out_interface else "any_out")
if self.match_name:
output += " " + self.match_name
output += ((" " + str(self.match_args)) if self.match_args else "")
return output
def __ne__(self, other):
return not self.__eq__(other)
class Chain(object):
"""
Mimic of an IPTC chain. Rules must be a list (not a set).
"""
def __init__(self, name):
self.name = name
self.rules = []
self.type = None # Not known until put in table.
def flush(self):
del self.rules[:]
def delete_rule(self, rule):
# The rule must exist or it is an error.
self.rules.remove(rule)
def __eq__(self, other):
# Equality deliberately only cares about name.
if self.name == other.name:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(self,other)
class Table(object):
"""
Mimic of an IPTC table.
"""
def __init__(self, type, name):
self.type = type
self.name = name
self._chains_dict = {}
def is_chain(self, name):
return (name in self._chains_dict)
def delete_chain(self, name):
del self._chains_dict[name]
for chain in self.chains:
if chain.name == name:
self.chains.remove(chain)
@property
def chains(self):
# The python-iptables code exposes the list of chains directly.
return self._chains_dict.values()
def get_table(type, name):
"""
Gets a table. This is a simple helper method that returns either
an IP v4 or an IP v6 table according to type.
"""
if type == IPV4:
table = current_state.tables_v4[name]
elif type == IPV6:
table = current_state.tables_v6[name]
else:
raise ValueError("Invalid type %s for table" % type)
return table
def get_chain(table, name):
"""
Gets a chain, creating it first if it does not exist.
"""
if name in table._chains_dict:
chain = table._chains_dict[name]
else:
chain = Chain(name)
table._chains_dict[name] = chain
chain.type = table.type
return chain
def insert_rule(rule, chain, position=0, force_position=True):
"""
Add an iptables rule to a chain if it does not already exist. Position is
the position for the insert as an offset; if set to RULE_POSN_LAST then the
rule is appended.
If force_position is True, then the rule is added at the specified point
unless it already exists there. If force_position is False, then the rule
is added only if it does not exist anywhere in the list of rules.
"""
found = False
rules = chain.rules
# Check the type - python iptables would do this for us.
if rule.type != chain.type:
raise ValueError("Type of rule (%s) does not match chain (%s)" %
(rule.type, chain.type))
if position == RULE_POSN_LAST:
position = len(rules)
if force_position:
if (len(rules) <= position) or (rule != chain.rules[position]):
# Either adding after existing rules, or replacing an existing rule.
chain.rules.insert(position, rule)
else:
#*********************************************************************#
#* The python-iptables code to compare rules does a comparison on *#
#* all the relevant rule parameters (target, match, etc.) excluding *#
#* the offset into the chain. Hence the test below finds whether *#
#* there is a rule with the same parameters anywhere in the chain. *#
#*********************************************************************#
if rule not in chain.rules:
chain.rules.insert(position, rule)
return
#*****************************************************************************#
#* The next few definitions are not exposed to production code. *#
#*****************************************************************************#
def reset_current_state():
current_state.reset()
class UnexpectedStateException(Exception):
def __init__(self, actual, expected):
super(UnexpectedStateException, self).__init__(
"iptables state does not match")
self.diff = "\n".join(difflib.unified_diff(
expected.split("\n"),
actual.split("\n")))
self.actual = actual
self.expected = expected
def __str__(self):
return ("%s\nDIFF:\n%s\nACTUAL:\n%s\nEXPECTED\n%s" %
(self.message, self.diff, self.actual, self.expected))
def check_state(expected_state):
"""
Checks that the current state matches the expected state. Throws an
exception if it does not.
"""
actual = str(current_state)
expected = str(expected_state)
if actual != expected:
raise UnexpectedStateException(actual, expected)
class TableState(object):
"""
Defines the current state of iptables - which rules exist in which
tables. Normally there will be two - the state that the test generates, and
the state that the test expects to have at the end. At the end of the test,
these can be compared.
"""
def __init__(self):
self.tables_v4 = {}
self.tables_v6 = {}
self.reset()
def reset(self):
"""
Clear the state of the tables, getting them back to being empty.
"""
self.tables_v4.clear()
self.tables = []
table = Table(IPV4, "filter")
get_chain(table, "INPUT")
get_chain(table, "OUTPUT")
get_chain(table, "FORWARD")
self.tables_v4["filter"] = table
self.tables.append(table)
table = Table(IPV4, "nat")
get_chain(table, "PREROUTING")
get_chain(table, "POSTROUTING")
get_chain(table, "INPUT")
get_chain(table, "OUTPUT")
self.tables_v4["nat"] = table
self.tables.append(table)
self.tables_v6.clear()
table = Table(IPV6, "filter")
get_chain(table, "INPUT")
get_chain(table, "OUTPUT")
get_chain(table, "FORWARD")
self.tables_v6["filter"] = table
self.tables.append(table)
def __str__(self):
"""
Convert a full state to a readable string to use in matches and compare
for final testing.
"""
output = ""
for table in self.tables:
output += "TABLE %s (%s)\n" % (table.name, table.type)
for chain_name in sorted(table._chains_dict.keys()):
output += " Chain %s\n" % chain_name
chain = table._chains_dict[chain_name]
for rule in chain.rules:
output += " %s\n" % rule
output += "\n"
output += "\n"
return output
# Current state - store in a global.
current_state = TableState()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Register flops statistics for various TensorFlow operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
# List of all ops which have implemented flops statistics.
IMPLEMENTED_OPS = set([
# Unary ops
"Reciprocal", "Square", "Rsqrt", "Log", "Neg", "AssignSub", "AssignAdd",
"L2Loss", "Softmax",
# Binary ops
"Add", "Sub", "Mul", "RealDiv", "Maximum", "Minimum", "Pow", "RsqrtGrad",
"GreaterEqual", "Greater", "LessEqual", "Less", "Equal", "NotEqual",
"SquaredDifference",
# Reduction ops
"Mean", "Sum", "ArgMax", "ArgMin", "BiasAddGrad",
# Convolution and pooling
"AvgPool", "MaxPool", "AvgPoolGrad", "MaxPoolGrad", "Conv2DBackpropInput",
"Conv2DBackpropFilter",
# Other ops
"AddN",
# Ops implemented in core tensorflow:
"MatMul", "Conv2D", "DepthwiseConv2dNative", "BiasAdd", "Dilation2D",
])
def _zero_flops(graph, node):
"""Returns zero flops."""
del graph, node # graph and node are unused
return ops.OpStats("flops", 0)
def _list_product(lst):
"""Computes product of element of the list."""
result = 1
for item in lst:
result *= item
return result
################################################################################
# Unary operations
################################################################################
def _unary_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for unary operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Reciprocal", "flops")
def _reciprocal_flops(graph, node):
"""Compute flops for Reciprocal operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Square", "flops")
def _square_flops(graph, node):
"""Compute flops for Square operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Rsqrt", "flops")
def _rsqrt_flops(graph, node):
"""Compute flops for Rsqrt operation."""
# Rsqrt(x) = 1 / sqrt(x)
return _unary_op_flops(graph, node, ops_per_element=2)
@ops.RegisterStatistics("Log", "flops")
def _log_flops(graph, node):
"""Compute flops for Log operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("Neg", "flops")
def _neg_flops(graph, node):
"""Compute flops for Neg operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignSub", "flops")
def _assign_sub_flops(graph, node):
"""Compute flops for AssignSub operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("AssignAdd", "flops")
def _assign_add_flops(graph, node):
"""Compute flops for AssignAdd operation."""
return _unary_op_flops(graph, node)
@ops.RegisterStatistics("L2Loss", "flops")
def _l2_loss_flops(graph, node):
"""Compute flops for L2Loss operation."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
# Tensorflow uses inefficient implementation, with (3*N-1) flops:
# Optimal implementation is 2*N flops
return ops.OpStats("flops", in_shape.num_elements() * 3 - 1)
@ops.RegisterStatistics("Softmax", "flops")
def _softmax_flops(graph, node):
"""Compute flops for Softmax operation."""
# Softmax implenetation:
#
# Approximate flops breakdown:
# 2*n -- compute shifted logits
# n -- exp of shifted logits
# 2*n -- compute softmax from exp of shifted logits
return _unary_op_flops(graph, node, ops_per_element=5)
################################################################################
# Binary operations
################################################################################
def _binary_per_element_op_flops(graph, node, ops_per_element=1):
"""Common code which compute flops for binary operations."""
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
return ops.OpStats("flops", out_shape.num_elements() * ops_per_element)
@ops.RegisterStatistics("Add", "flops")
def _add_flops(graph, node):
"""Compute flops for Add operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Sub", "flops")
def _sub_flops(graph, node):
"""Compute flops for Sub operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Mul", "flops")
def _mul_flops(graph, node):
"""Compute flops for Mul operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RealDiv", "flops")
def _real_div_flops(graph, node):
"""Compute flops for RealDiv operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Maximum", "flops")
def _maximum_flops(graph, node):
"""Compute flops for Maximum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Minimum", "flops")
def _minimum_flops(graph, node):
"""Compute flops for Minimum operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Pow", "flops")
def _pow_flops(graph, node):
"""Compute flops for Pow operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("RsqrtGrad", "flops")
def _rsqrt_grad_flops(graph, node):
"""Compute flops for RsqrtGrad operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=4)
@ops.RegisterStatistics("GreaterEqual", "flops")
def _greater_equal_flops(graph, node):
"""Compute flops for GreaterEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Greater", "flops")
def _greater_flops(graph, node):
"""Compute flops for Greater operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("LessEqual", "flops")
def _less_equal_flops(graph, node):
"""Compute flops for LessEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Less", "flops")
def _less_flops(graph, node):
"""Compute flops for Less operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("Equal", "flops")
def _equal_flops(graph, node):
"""Compute flops for Equal operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("NotEqual", "flops")
def _not_equal_flops(graph, node):
"""Compute flops for NotEqual operation."""
return _binary_per_element_op_flops(graph, node)
@ops.RegisterStatistics("SquaredDifference", "flops")
def _squared_difference_flops(graph, node):
"""Compute flops for SquaredDifference operation."""
return _binary_per_element_op_flops(graph, node, ops_per_element=2)
################################################################################
# Reduction ops
################################################################################
def _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0):
"""Common code which compute flops for reduction operations."""
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
num_flops = (in_shape.num_elements() * reduce_flops
+ out_shape.num_elements() * (finalize_flops - reduce_flops))
return ops.OpStats("flops", num_flops)
@ops.RegisterStatistics("Mean", "flops")
def _mean_flops(graph, node):
"""Compute flops for Mean operation."""
# reduction - sum, finalization - divide
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=1)
@ops.RegisterStatistics("Sum", "flops")
def _sum_flops(graph, node):
"""Compute flops for Sum operation."""
# reduction - sum, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMax", "flops")
def _arg_max_flops(graph, node):
"""Compute flops for ArgMax operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("ArgMin", "flops")
def _arg_min_flops(graph, node):
"""Compute flops for ArgMin operation."""
# reduction - comparison, no finalization
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
@ops.RegisterStatistics("BiasAddGrad", "flops")
def _bias_add_grad_flops(graph, node):
"""Compute flops for BiasAddGrad operation."""
# Implementation of BiasAddGrad, essentially it's a reduce sum and reshaping:
# So computing flops same way as for "Sum"
return _reduction_op_flops(graph, node, reduce_flops=1, finalize_flops=0)
################################################################################
# Convolution and pooling
# Note: all flops statistics are implemented only for NHWC data format
################################################################################
def _verify_conv_data_format(node):
"""Verifies data format for pooling and convolutional operations."""
# TODO(xpan): P1: Support NCHW
if node.attr["data_format"].s != b"NHWC":
raise ValueError("Only NHWC format is supported in flops computations")
def _pool_flops(graph, node):
"""Common code which compute flops for pooling operations."""
# compute flops for average and max pooling
_verify_conv_data_format(node)
#
# Pooling declaration:
# Inputs:
# - value
# Outputs:
# - output
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
#
# Pooling implenetation:
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
return ops.OpStats("flops", kernel_area * out_shape.num_elements())
@ops.RegisterStatistics("AvgPool", "flops")
def _avg_pool_flops(graph, node):
"""Compute flops for AvgPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("MaxPool", "flops")
def _max_pool_flops(graph, node):
"""Compute flops for MaxPool operation."""
return _pool_flops(graph, node)
@ops.RegisterStatistics("AvgPoolGrad", "flops")
def _avg_pool_grad_flops(graph, node):
"""Compute flops for AvgPoolGrad operation."""
_verify_conv_data_format(node)
# Pooling gradient implementation:
out_backprop_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
out_backprop_shape.assert_is_fully_defined()
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
# TensorFlow multiply each element of pooling window by coefficient,
# then sum up all of them, thus we have 2 flops per element:
# More optimal implementation - if division is done after.
return ops.OpStats("flops",
kernel_area * out_backprop_shape.num_elements() * 2)
@ops.RegisterStatistics("MaxPoolGrad", "flops")
def _max_pool_grad_flops(graph, node):
"""Compute flops for MaxPoolGrad operation."""
_verify_conv_data_format(node)
#
# MaxPoolGrad declaration:
# Inputs:
# - orig_input -- original input tensor (of max_pool)
# - orig_output -- original output tensor (of max_pool)
# - grad -- gradient with respect to output of max_pool
# Outputs:
# - output -- gradient with respect to input of max_pool
# Attributes:
# - ksize
# - strides
# - padding
# - data_format
# It computes MaxPool first, then one flop per each element of original output
#
kernel_shape = list(node.attr["ksize"].list.i)
kernel_area = _list_product(kernel_shape)
orig_out_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
orig_out_shape.assert_is_fully_defined()
max_pool_ops = kernel_area * orig_out_shape.num_elements()
return ops.OpStats("flops", max_pool_ops + orig_out_shape.num_elements())
@ops.RegisterStatistics("Conv2DBackpropInput", "flops")
def _conv_2d_backprop_input_flops(graph, node):
"""Compute flops for Conv2DBackpropInput operation."""
# Formula:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
# Where:
# image_x_dim, image_y_dim and input_depth --- size of input to source (no
# backprop) convolution, in other words they are sizes of backprop output.
# output_depth --- number of filters in the original convolution, thus
# depth of backprop input.
# kernel_x_dim and kernel_y_dim --- sizes of filter in spatial dimension
# image_x_stride and image_x_stride --- strides of the convolution
#
_verify_conv_data_format(node)
# out_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
out_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * out_shape.num_elements()
* kernel_shape.num_elements()
/ (out_shape.dims[-1].value * strides_product)))
@ops.RegisterStatistics("Conv2DBackpropFilter", "flops")
def _conv_2d_backprop_filter_flops(graph, node):
"""Compute flops for Conv2DBackpropFilter operation."""
# Formula same as for Conv2DBackpropInput:
# batch_size * image_x_dim * image_y_dim * kernel_x_dim * kernel_y_dim
# * input_depth * output_depth * 2 / (image_x_stride * image_x_stride)
#
_verify_conv_data_format(node)
# image_shape = [batch_size, image_y_dim, image_x_dim, input_depth]
image_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
image_shape.assert_is_fully_defined()
# kernel_shape = [kernel_y_dim, kernel_x_dim, input_depth, output_depth]
kernel_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
kernel_shape.assert_is_fully_defined()
# strides
strides_shape = list(node.attr["strides"].list.i)
strides_product = strides_shape[1] * strides_shape[2]
return ops.OpStats("flops",
(2 * image_shape.num_elements()
* kernel_shape.num_elements()
/ (image_shape.dims[-1].value * strides_product)))
################################################################################
# Other ops
################################################################################
@ops.RegisterStatistics("AddN", "flops")
def _add_n_flops(graph, node):
"""Compute flops for AddN operation."""
if not node.input:
return _zero_flops(graph, node)
in_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
in_shape.assert_is_fully_defined()
return ops.OpStats("flops", in_shape.num_elements() * (len(node.input) - 1))
| |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from twisted.trial import unittest
#import unittest
from twisted.internet.defer import Deferred, inlineCallbacks
from autobahn import wamp
from autobahn.wamp import message
from autobahn.wamp import serializer
from autobahn.wamp import protocol
from autobahn.wamp import role
from autobahn import util
from autobahn.wamp.exception import ApplicationError, NotAuthorized, InvalidTopic
from autobahn.wamp import types
class MockTransport:
def __init__(self, handler):
self._log = False
self._handler = handler
self._serializer = serializer.JsonSerializer()
self._registrations = {}
self._invocations = {}
self._handler.onOpen(self)
self._my_session_id = util.id()
roles = [
role.RoleBrokerFeatures(),
role.RoleDealerFeatures()
]
msg = message.Welcome(self._my_session_id, roles)
self._handler.onMessage(msg)
def send(self, msg):
if self._log:
bytes, isbinary = self._serializer.serialize(msg)
print("Send: {}".format(bytes))
reply = None
if isinstance(msg, message.Publish):
if msg.topic.startswith('com.myapp'):
if msg.acknowledge:
reply = message.Published(msg.request, util.id())
elif len(msg.topic) == 0:
reply = message.Error(message.Publish.MESSAGE_TYPE, msg.request, 'wamp.error.invalid_topic')
else:
reply = message.Error(message.Publish.MESSAGE_TYPE, msg.request, 'wamp.error.not_authorized')
elif isinstance(msg, message.Call):
if msg.procedure == 'com.myapp.procedure1':
reply = message.Result(msg.request, args = [100])
elif msg.procedure == 'com.myapp.procedure2':
reply = message.Result(msg.request, args = [1, 2, 3])
elif msg.procedure == 'com.myapp.procedure3':
reply = message.Result(msg.request, args = [1, 2, 3], kwargs = {'foo':'bar', 'baz': 23})
elif msg.procedure.startswith('com.myapp.myproc'):
registration = self._registrations[msg.procedure]
request = util.id()
self._invocations[request] = msg.request
reply = message.Invocation(request, registration, args = msg.args, kwargs = msg.kwargs)
else:
reply = message.Error(message.Call.MESSAGE_TYPE, msg.request, 'wamp.error.no_such_procedure')
elif isinstance(msg, message.Yield):
if self._invocations.has_key(msg.request):
request = self._invocations[msg.request]
reply = message.Result(request, args = msg.args, kwargs = msg.kwargs)
elif isinstance(msg, message.Subscribe):
reply = message.Subscribed(msg.request, util.id())
elif isinstance(msg, message.Unsubscribe):
reply = message.Unsubscribed(msg.request)
elif isinstance(msg, message.Register):
registration = util.id()
self._registrations[msg.procedure] = registration
reply = message.Registered(msg.request, registration)
elif isinstance(msg, message.Unregister):
reply = message.Unregistered(msg.request)
if reply:
if self._log:
bytes, isbinary = self._serializer.serialize(reply)
print("Receive: {}".format(bytes))
self._handler.onMessage(reply)
def isOpen(self):
return True
def close(self):
pass
def abort(self):
pass
class TestPublisher(unittest.TestCase):
@inlineCallbacks
def test_publish(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
publication = yield handler.publish('com.myapp.topic1')
self.assertEqual(publication, None)
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3)
self.assertEqual(publication, None)
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3, foo = 23, bar = 'hello')
self.assertEqual(publication, None)
publication = yield handler.publish('com.myapp.topic1', options = types.PublishOptions(excludeMe = False))
self.assertEqual(publication, None)
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3, foo = 23, bar = 'hello', options = types.PublishOptions(excludeMe = False, exclude = [100, 200, 300]))
self.assertEqual(publication, None)
@inlineCallbacks
def test_publish_acknowledged(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
publication = yield handler.publish('com.myapp.topic1', options = types.PublishOptions(acknowledge = True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3, options = types.PublishOptions(acknowledge = True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3, foo = 23, bar = 'hello', options = types.PublishOptions(acknowledge = True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish('com.myapp.topic1', options = types.PublishOptions(excludeMe = False, acknowledge = True))
self.assertTrue(type(publication.id) in (int, long))
publication = yield handler.publish('com.myapp.topic1', 1, 2, 3, foo = 23, bar = 'hello', options = types.PublishOptions(excludeMe = False, exclude = [100, 200, 300], acknowledge = True))
self.assertTrue(type(publication.id) in (int, long))
@inlineCallbacks
def test_publish_undefined_exception(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
options = types.PublishOptions(acknowledge = True)
yield self.assertFailure(handler.publish('de.myapp.topic1', options = options), ApplicationError)
yield self.assertFailure(handler.publish('', options = options), ApplicationError)
@inlineCallbacks
def test_publish_defined_exception(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
options = types.PublishOptions(acknowledge = True)
handler.define(NotAuthorized)
yield self.assertFailure(handler.publish('de.myapp.topic1', options = options), NotAuthorized)
handler.define(InvalidTopic)
yield self.assertFailure(handler.publish('', options = options), InvalidTopic)
@inlineCallbacks
def test_call(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
res = yield handler.call('com.myapp.procedure1')
self.assertEqual(res, 100)
res = yield handler.call('com.myapp.procedure1', 1, 2, 3)
self.assertEqual(res, 100)
res = yield handler.call('com.myapp.procedure1', 1, 2, 3, foo = 23, bar = 'hello')
self.assertEqual(res, 100)
res = yield handler.call('com.myapp.procedure1', options = types.CallOptions(timeout = 10000))
self.assertEqual(res, 100)
res = yield handler.call('com.myapp.procedure1', 1, 2, 3, foo = 23, bar = 'hello', options = types.CallOptions(timeout = 10000))
self.assertEqual(res, 100)
@inlineCallbacks
def test_call_with_complex_result(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
res = yield handler.call('com.myapp.procedure2')
self.assertIsInstance(res, types.CallResult)
self.assertEqual(res.results, (1, 2, 3))
self.assertEqual(res.kwresults, {})
res = yield handler.call('com.myapp.procedure3')
self.assertIsInstance(res, types.CallResult)
self.assertEqual(res.results, (1, 2, 3))
self.assertEqual(res.kwresults, {'foo':'bar', 'baz': 23})
@inlineCallbacks
def test_subscribe(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
def on_event(*args, **kwargs):
print "got event"
subscription = yield handler.subscribe(on_event, 'com.myapp.topic1')
self.assertTrue(type(subscription.id) in (int, long))
subscription = yield handler.subscribe(on_event, 'com.myapp.topic1', options = types.SubscribeOptions(match = 'wildcard'))
self.assertTrue(type(subscription.id) in (int, long))
@inlineCallbacks
def test_unsubscribe(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
def on_event(*args, **kwargs):
print "got event"
subscription = yield handler.subscribe(on_event, 'com.myapp.topic1')
yield subscription.unsubscribe()
@inlineCallbacks
def test_register(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
def on_call(*args, **kwargs):
print "got call"
registration = yield handler.register(on_call, 'com.myapp.procedure1')
self.assertTrue(type(registration.id) in (int, long))
registration = yield handler.register(on_call, 'com.myapp.procedure1', options = types.RegisterOptions(pkeys = [0, 1, 2]))
self.assertTrue(type(registration.id) in (int, long))
@inlineCallbacks
def test_unregister(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
def on_call(*args, **kwargs):
print "got call"
registration = yield handler.register(on_call, 'com.myapp.procedure1')
yield registration.unregister()
@inlineCallbacks
def test_invoke(self):
handler = protocol.ApplicationSession()
transport = MockTransport(handler)
def myproc1():
return 23
yield handler.register(myproc1, 'com.myapp.myproc1')
res = yield handler.call('com.myapp.myproc1')
self.assertEqual(res, 23)
# ## variant 1: works
# def test_publish1(self):
# d = self.handler.publish('de.myapp.topic1')
# self.assertFailure(d, ApplicationError)
# ## variant 2: works
# @inlineCallbacks
# def test_publish2(self):
# yield self.assertFailure(self.handler.publish('de.myapp.topic1'), ApplicationError)
# ## variant 3: does NOT work
# @inlineCallbacks
# def test_publish3(self):
# with self.assertRaises(ApplicationError):
# yield self.handler.publish('de.myapp.topic1')
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
| |
### HiForest Configuration
# Collisions: pp
# Type: Data
# Input: AOD
import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet()
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Run2015E/HighPtJet80/AOD/PromptReco-v1/000/262/173/00000/3E8293B5-9894-E511-90E8-02163E011FA1.root'
#'/store/data/Run2015E/HighPtJet80/AOD/PromptReco-v1/000/262/272/00000/803A4255-7696-E511-B178-02163E0142DD.root'
)
)
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
process.HiForest.GlobalTagLabel = process.GlobalTag.globaltag
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import overrideJEC_pp5020
process = overrideJEC_pp5020(process)
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("MiniForestAOD.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
####################################################################################
#############################
# Jets
#############################
### PP RECO does not include R=3 or R=5 jets.
### re-RECO is only possible for PF, RECO is missing calotowers
from RecoJets.JetProducers.ak5PFJets_cfi import ak5PFJets
ak5PFJets.doAreaFastjet = True
process.ak5PFJets = ak5PFJets
process.ak3PFJets = ak5PFJets.clone(rParam = 0.3)
process.load('HeavyIonsAnalysis.JetAnalysis.jets.ak4CaloJetSequence_pp_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.ak3PFJetSequence_pp_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.ak4PFJetSequence_pp_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.ak5PFJetSequence_pp_data_cff')
process.highPurityTracks = cms.EDFilter("TrackSelector",
src = cms.InputTag("generalTracks"),
cut = cms.string('quality("highPurity")')
)
process.jetSequences = cms.Sequence(
process.ak3PFJets +
process.ak5PFJets +
process.highPurityTracks +
process.ak4CaloJetSequence +
process.ak3PFJetSequence +
process.ak4PFJetSequence +
process.ak5PFJetSequence
)
#####################################################################################
############################
# Event Analysis
############################
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_data_cfi')
process.load('HeavyIonsAnalysis.EventAnalysis.hltobject_cfi')
process.hiEvtAnalyzer.Vertex = cms.InputTag("offlinePrimaryVertices")
process.hiEvtAnalyzer.doCentrality = cms.bool(False)
process.hiEvtAnalyzer.doEvtPlane = cms.bool(False)
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
from HeavyIonsAnalysis.EventAnalysis.dummybranches_cff import addHLTdummybranchesForPP
addHLTdummybranchesForPP(process)
##process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
##process.pfcandAnalyzer.skipCharged = False
##process.pfcandAnalyzer.pfPtMin = 0
##process.pfcandAnalyzer.pfCandidateLabel = cms.InputTag("particleFlow")
##process.pfcandAnalyzer.doVS = cms.untracked.bool(False)
##process.pfcandAnalyzer.doUEraw_ = cms.untracked.bool(False)
##process.pfcandAnalyzer.genLabel = cms.InputTag("genParticles")
process.load("HeavyIonsAnalysis.JetAnalysis.hcalNoise_cff")
#####################################################################################
#########################
# Track Analyzer
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_cff')
####################################################################################
#####################
# Photons
#####################
##process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
##process.ggHiNtuplizer.gsfElectronLabel = cms.InputTag("gedGsfElectrons")
##process.ggHiNtuplizer.recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerpp')
##process.ggHiNtuplizer.VtxLabel = cms.InputTag("offlinePrimaryVertices")
##process.ggHiNtuplizer.particleFlowCollection = cms.InputTag("particleFlow")
##process.ggHiNtuplizer.doVsIso = cms.bool(False)
##process.ggHiNtuplizer.doGenParticles = False
##process.ggHiNtuplizer.doElectronVID = cms.bool(True)
##process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotons'),
## recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerppGED'))
####################################################################################
#####################
# Electron ID
#####################
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
# turn on VID producer, indicate data format to be processed
# DataFormat.AOD or DataFormat.MiniAOD
dataFormat = DataFormat.AOD
switchOnVIDElectronIdProducer(process, dataFormat)
# define which IDs we want to produce. Check here https://twiki.cern.ch/twiki/bin/viewauth/CMS/CutBasedElectronIdentificationRun2#Recipe_for_regular_users_for_7_4
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_25ns_V1_cff']
#add them to the VID producer
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
#####################################################################################
#####################
# tupel and necessary PAT sequences
#####################
##process.load("HeavyIonsAnalysis.VectorBosonAnalysis.tupelSequence_pp_cff")
#####################################################################################
#########################
# Main analysis list
#########################
process.ana_step = cms.Path(process.hltanalysis *
process.hltobject *
process.hiEvtAnalyzer *
process.jetSequences +
process.egmGsfElectronIDSequence + #Should be added in the path for VID module
## process.ggHiNtuplizer +
## process.ggHiNtuplizerGED +
## process.pfcandAnalyzer +
process.HiForest +
process.trackSequencesPP
## process.tupelPatSequence
)
#####################################################################################
#########################
# Event Selection
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.HBHENoiseFilterResult = cms.Path(process.fHBHENoiseFilterResult)
process.HBHENoiseFilterResultRun1 = cms.Path(process.fHBHENoiseFilterResultRun1)
process.HBHENoiseFilterResultRun2Loose = cms.Path(process.fHBHENoiseFilterResultRun2Loose)
process.HBHENoiseFilterResultRun2Tight = cms.Path(process.fHBHENoiseFilterResultRun2Tight)
process.HBHEIsoNoiseFilterResult = cms.Path(process.fHBHEIsoNoiseFilterResult)
process.PAprimaryVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && abs(z) <= 25 && position.Rho <= 2 && tracksSize >= 2"),
filter = cms.bool(True), # otherwise it won't filter the events
)
process.NoScraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
process.pPAprimaryVertexFilter = cms.Path(process.PAprimaryVertexFilter)
process.pBeamScrapingFilter=cms.Path(process.NoScraping)
process.load("HeavyIonsAnalysis.VertexAnalysis.PAPileUpVertexFilter_cff")
process.pVertexFilterCutG = cms.Path(process.pileupVertexFilterCutG)
process.pVertexFilterCutGloose = cms.Path(process.pileupVertexFilterCutGloose)
process.pVertexFilterCutGtight = cms.Path(process.pileupVertexFilterCutGtight)
process.pVertexFilterCutGplus = cms.Path(process.pileupVertexFilterCutGplus)
process.pVertexFilterCutE = cms.Path(process.pileupVertexFilterCutE)
process.pVertexFilterCutEandG = cms.Path(process.pileupVertexFilterCutEandG)
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
| |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestAccessEntry(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.dataset import AccessEntry
return AccessEntry
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
entry = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
self.assertEqual(entry.role, 'OWNER')
self.assertEqual(entry.entity_type, 'userByEmail')
self.assertEqual(entry.entity_id, 'phred@example.com')
def test_ctor_bad_entity_type(self):
with self.assertRaises(ValueError):
self._make_one(None, 'unknown', None)
def test_ctor_view_with_role(self):
role = 'READER'
entity_type = 'view'
with self.assertRaises(ValueError):
self._make_one(role, entity_type, None)
def test_ctor_view_success(self):
role = None
entity_type = 'view'
entity_id = object()
entry = self._make_one(role, entity_type, entity_id)
self.assertEqual(entry.role, role)
self.assertEqual(entry.entity_type, entity_type)
self.assertEqual(entry.entity_id, entity_id)
def test_ctor_nonview_without_role(self):
role = None
entity_type = 'userByEmail'
with self.assertRaises(ValueError):
self._make_one(role, entity_type, None)
def test___eq___role_mismatch(self):
entry = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
other = self._make_one('WRITER', 'userByEmail', 'phred@example.com')
self.assertNotEqual(entry, other)
def test___eq___entity_type_mismatch(self):
entry = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
other = self._make_one('OWNER', 'groupByEmail', 'phred@example.com')
self.assertNotEqual(entry, other)
def test___eq___entity_id_mismatch(self):
entry = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
other = self._make_one('OWNER', 'userByEmail', 'bharney@example.com')
self.assertNotEqual(entry, other)
def test___eq___hit(self):
entry = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
other = self._make_one('OWNER', 'userByEmail', 'phred@example.com')
self.assertEqual(entry, other)
def test__eq___type_mismatch(self):
entry = self._make_one('OWNER', 'userByEmail', 'silly@example.com')
self.assertNotEqual(entry, object())
self.assertEqual(entry, mock.ANY)
def test_to_api_repr(self):
entry = self._make_one('OWNER', 'userByEmail', 'salmon@example.com')
resource = entry.to_api_repr()
exp_resource = {'role': 'OWNER', 'userByEmail': 'salmon@example.com'}
self.assertEqual(resource, exp_resource)
def test_to_api_repr_view(self):
view = {
'projectId': 'my-project',
'datasetId': 'my_dataset',
'tableId': 'my_table'
}
entry = self._make_one(None, 'view', view)
resource = entry.to_api_repr()
exp_resource = {'view': view}
self.assertEqual(resource, exp_resource)
def test_from_api_repr(self):
resource = {'role': 'OWNER', 'userByEmail': 'salmon@example.com'}
entry = self._get_target_class().from_api_repr(resource)
self.assertEqual(entry.role, 'OWNER')
self.assertEqual(entry.entity_type, 'userByEmail')
self.assertEqual(entry.entity_id, 'salmon@example.com')
def test_from_api_repr_w_unknown_entity_type(self):
resource = {'role': 'READER', 'unknown': 'UNKNOWN'}
with self.assertRaises(ValueError):
self._get_target_class().from_api_repr(resource)
def test_from_api_repr_entries_w_extra_keys(self):
resource = {
'role': 'READER',
'specialGroup': 'projectReaders',
'userByEmail': 'salmon@example.com',
}
with self.assertRaises(ValueError):
self._get_target_class().from_api_repr(resource)
class TestDatasetReference(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.dataset import DatasetReference
return DatasetReference
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
dataset_ref = self._make_one('some-project-1', 'dataset_1')
self.assertEqual(dataset_ref.project, 'some-project-1')
self.assertEqual(dataset_ref.dataset_id, 'dataset_1')
def test_ctor_bad_args(self):
with self.assertRaises(ValueError):
self._make_one(1, 'd')
with self.assertRaises(ValueError):
self._make_one('p', 2)
def test_table(self):
dataset_ref = self._make_one('some-project-1', 'dataset_1')
table_ref = dataset_ref.table('table_1')
self.assertEqual(table_ref.dataset_id, 'dataset_1')
self.assertEqual(table_ref.project, 'some-project-1')
self.assertEqual(table_ref.table_id, 'table_1')
def test_to_api_repr(self):
dataset = self._make_one('project_1', 'dataset_1')
resource = dataset.to_api_repr()
self.assertEqual(
resource,
{
'projectId': 'project_1',
'datasetId': 'dataset_1',
})
def test_from_api_repr(self):
cls = self._get_target_class()
expected = self._make_one('project_1', 'dataset_1')
got = cls.from_api_repr(
{
'projectId': 'project_1',
'datasetId': 'dataset_1',
})
self.assertEqual(expected, got)
def test_from_string(self):
cls = self._get_target_class()
got = cls.from_string('string-project.string_dataset')
self.assertEqual(got.project, 'string-project')
self.assertEqual(got.dataset_id, 'string_dataset')
def test_from_string_legacy_string(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string('string-project:string_dataset')
def test_from_string_not_fully_qualified(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string('string_dataset')
with self.assertRaises(ValueError):
cls.from_string('a.b.c')
def test_from_string_with_default_project(self):
cls = self._get_target_class()
got = cls.from_string(
'string_dataset', default_project='default-project')
self.assertEqual(got.project, 'default-project')
self.assertEqual(got.dataset_id, 'string_dataset')
def test_from_string_ignores_default_project(self):
cls = self._get_target_class()
got = cls.from_string(
'string-project.string_dataset', default_project='default-project')
self.assertEqual(got.project, 'string-project')
self.assertEqual(got.dataset_id, 'string_dataset')
def test___eq___wrong_type(self):
dataset = self._make_one('project_1', 'dataset_1')
other = object()
self.assertNotEqual(dataset, other)
self.assertEqual(dataset, mock.ANY)
def test___eq___project_mismatch(self):
dataset = self._make_one('project_1', 'dataset_1')
other = self._make_one('project_2', 'dataset_1')
self.assertNotEqual(dataset, other)
def test___eq___dataset_mismatch(self):
dataset = self._make_one('project_1', 'dataset_1')
other = self._make_one('project_1', 'dataset_2')
self.assertNotEqual(dataset, other)
def test___eq___equality(self):
dataset = self._make_one('project_1', 'dataset_1')
other = self._make_one('project_1', 'dataset_1')
self.assertEqual(dataset, other)
def test___hash__set_equality(self):
dataset1 = self._make_one('project_1', 'dataset_1')
dataset2 = self._make_one('project_1', 'dataset_2')
set_one = {dataset1, dataset2}
set_two = {dataset1, dataset2}
self.assertEqual(set_one, set_two)
def test___hash__not_equals(self):
dataset1 = self._make_one('project_1', 'dataset_1')
dataset2 = self._make_one('project_1', 'dataset_2')
set_one = {dataset1}
set_two = {dataset2}
self.assertNotEqual(set_one, set_two)
def test___repr__(self):
dataset = self._make_one('project1', 'dataset1')
expected = "DatasetReference('project1', 'dataset1')"
self.assertEqual(repr(dataset), expected)
class TestDataset(unittest.TestCase):
from google.cloud.bigquery.dataset import DatasetReference
PROJECT = 'project'
DS_ID = 'dataset-id'
DS_REF = DatasetReference(PROJECT, DS_ID)
@staticmethod
def _get_target_class():
from google.cloud.bigquery.dataset import Dataset
return Dataset
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(
tzinfo=UTC)
self.ETAG = 'ETAG'
self.DS_FULL_ID = '%s:%s' % (self.PROJECT, self.DS_ID)
self.RESOURCE_URL = 'http://example.com/path/to/resource'
def _make_resource(self):
self._setUpConstants()
USER_EMAIL = 'phred@example.com'
GROUP_EMAIL = 'group-name@lists.example.com'
return {
'creationTime': self.WHEN_TS * 1000,
'datasetReference':
{'projectId': self.PROJECT, 'datasetId': self.DS_ID},
'etag': self.ETAG,
'id': self.DS_FULL_ID,
'lastModifiedTime': self.WHEN_TS * 1000,
'location': 'US',
'selfLink': self.RESOURCE_URL,
'defaultTableExpirationMs': 3600,
'access': [
{'role': 'OWNER', 'userByEmail': USER_EMAIL},
{'role': 'OWNER', 'groupByEmail': GROUP_EMAIL},
{'role': 'WRITER', 'specialGroup': 'projectWriters'},
{'role': 'READER', 'specialGroup': 'projectReaders'}],
}
def _verify_access_entry(self, access_entries, resource):
r_entries = []
for r_entry in resource['access']:
role = r_entry.pop('role')
for entity_type, entity_id in sorted(r_entry.items()):
r_entries.append({
'role': role,
'entity_type': entity_type,
'entity_id': entity_id})
self.assertEqual(len(access_entries), len(r_entries))
for a_entry, r_entry in zip(access_entries, r_entries):
self.assertEqual(a_entry.role, r_entry['role'])
self.assertEqual(a_entry.entity_type, r_entry['entity_type'])
self.assertEqual(a_entry.entity_id, r_entry['entity_id'])
def _verify_readonly_resource_properties(self, dataset, resource):
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.reference.project, self.PROJECT)
self.assertEqual(dataset.reference.dataset_id, self.DS_ID)
if 'creationTime' in resource:
self.assertEqual(dataset.created, self.WHEN)
else:
self.assertIsNone(dataset.created)
if 'etag' in resource:
self.assertEqual(dataset.etag, self.ETAG)
else:
self.assertIsNone(dataset.etag)
if 'lastModifiedTime' in resource:
self.assertEqual(dataset.modified, self.WHEN)
else:
self.assertIsNone(dataset.modified)
if 'selfLink' in resource:
self.assertEqual(dataset.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(dataset.self_link)
def _verify_resource_properties(self, dataset, resource):
self._verify_readonly_resource_properties(dataset, resource)
if 'defaultTableExpirationMs' in resource:
self.assertEqual(dataset.default_table_expiration_ms,
int(resource.get('defaultTableExpirationMs')))
else:
self.assertIsNone(dataset.default_table_expiration_ms)
self.assertEqual(dataset.description, resource.get('description'))
self.assertEqual(dataset.friendly_name, resource.get('friendlyName'))
self.assertEqual(dataset.location, resource.get('location'))
if 'access' in resource:
self._verify_access_entry(dataset.access_entries, resource)
else:
self.assertEqual(dataset.access_entries, [])
def test_ctor_defaults(self):
dataset = self._make_one(self.DS_REF)
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, self.PROJECT)
self.assertEqual(
dataset.path,
'/projects/%s/datasets/%s' % (self.PROJECT, self.DS_ID))
self.assertEqual(dataset.access_entries, [])
self.assertIsNone(dataset.created)
self.assertIsNone(dataset.full_dataset_id)
self.assertIsNone(dataset.etag)
self.assertIsNone(dataset.modified)
self.assertIsNone(dataset.self_link)
self.assertIsNone(dataset.default_table_expiration_ms)
self.assertIsNone(dataset.description)
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.location)
def test_ctor_explicit(self):
from google.cloud.bigquery.dataset import DatasetReference, AccessEntry
phred = AccessEntry('OWNER', 'userByEmail', 'phred@example.com')
bharney = AccessEntry('OWNER', 'userByEmail', 'bharney@example.com')
entries = [phred, bharney]
OTHER_PROJECT = 'foo-bar-123'
dataset = self._make_one(DatasetReference(OTHER_PROJECT, self.DS_ID))
dataset.access_entries = entries
self.assertEqual(dataset.dataset_id, self.DS_ID)
self.assertEqual(dataset.project, OTHER_PROJECT)
self.assertEqual(
dataset.path,
'/projects/%s/datasets/%s' % (OTHER_PROJECT, self.DS_ID))
self.assertEqual(dataset.access_entries, entries)
self.assertIsNone(dataset.created)
self.assertIsNone(dataset.full_dataset_id)
self.assertIsNone(dataset.etag)
self.assertIsNone(dataset.modified)
self.assertIsNone(dataset.self_link)
self.assertIsNone(dataset.default_table_expiration_ms)
self.assertIsNone(dataset.description)
self.assertIsNone(dataset.friendly_name)
self.assertIsNone(dataset.location)
def test_access_entries_setter_non_list(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(TypeError):
dataset.access_entries = object()
def test_access_entries_setter_invalid_field(self):
from google.cloud.bigquery.dataset import AccessEntry
dataset = self._make_one(self.DS_REF)
phred = AccessEntry('OWNER', 'userByEmail', 'phred@example.com')
with self.assertRaises(ValueError):
dataset.access_entries = [phred, object()]
def test_access_entries_setter(self):
from google.cloud.bigquery.dataset import AccessEntry
dataset = self._make_one(self.DS_REF)
phred = AccessEntry('OWNER', 'userByEmail', 'phred@example.com')
bharney = AccessEntry('OWNER', 'userByEmail', 'bharney@example.com')
dataset.access_entries = [phred, bharney]
self.assertEqual(dataset.access_entries, [phred, bharney])
def test_default_table_expiration_ms_setter_bad_value(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(ValueError):
dataset.default_table_expiration_ms = 'bogus'
def test_default_table_expiration_ms_setter(self):
dataset = self._make_one(self.DS_REF)
dataset.default_table_expiration_ms = 12345
self.assertEqual(dataset.default_table_expiration_ms, 12345)
def test_description_setter_bad_value(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(ValueError):
dataset.description = 12345
def test_description_setter(self):
dataset = self._make_one(self.DS_REF)
dataset.description = 'DESCRIPTION'
self.assertEqual(dataset.description, 'DESCRIPTION')
def test_friendly_name_setter_bad_value(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(ValueError):
dataset.friendly_name = 12345
def test_friendly_name_setter(self):
dataset = self._make_one(self.DS_REF)
dataset.friendly_name = 'FRIENDLY'
self.assertEqual(dataset.friendly_name, 'FRIENDLY')
def test_location_setter_bad_value(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(ValueError):
dataset.location = 12345
def test_location_setter(self):
dataset = self._make_one(self.DS_REF)
dataset.location = 'LOCATION'
self.assertEqual(dataset.location, 'LOCATION')
def test_labels_update_in_place(self):
dataset = self._make_one(self.DS_REF)
del dataset._properties['labels'] # don't start w/ existing dict
labels = dataset.labels
labels['foo'] = 'bar' # update in place
self.assertEqual(dataset.labels, {'foo': 'bar'})
def test_labels_setter(self):
dataset = self._make_one(self.DS_REF)
dataset.labels = {'color': 'green'}
self.assertEqual(dataset.labels, {'color': 'green'})
def test_labels_setter_bad_value(self):
dataset = self._make_one(self.DS_REF)
with self.assertRaises(ValueError):
dataset.labels = None
def test_labels_getter_missing_value(self):
dataset = self._make_one(self.DS_REF)
self.assertEqual(dataset.labels, {})
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE)
def test_from_api_repr_bare(self):
self._setUpConstants()
RESOURCE = {
'id': '%s:%s' % (self.PROJECT, self.DS_ID),
'datasetReference': {
'projectId': self.PROJECT,
'datasetId': self.DS_ID,
}
}
klass = self._get_target_class()
dataset = klass.from_api_repr(RESOURCE)
self._verify_resource_properties(dataset, RESOURCE)
def test_from_api_repr_w_properties(self):
RESOURCE = self._make_resource()
klass = self._get_target_class()
dataset = klass.from_api_repr(RESOURCE)
self._verify_resource_properties(dataset, RESOURCE)
def test_to_api_repr_w_custom_field(self):
dataset = self._make_one(self.DS_REF)
dataset._properties['newAlphaProperty'] = 'unreleased property'
resource = dataset.to_api_repr()
exp_resource = {
'datasetReference': self.DS_REF.to_api_repr(),
'labels': {},
'newAlphaProperty': 'unreleased property',
}
self.assertEqual(resource, exp_resource)
def test_from_string(self):
cls = self._get_target_class()
got = cls.from_string('string-project.string_dataset')
self.assertEqual(got.project, 'string-project')
self.assertEqual(got.dataset_id, 'string_dataset')
def test_from_string_legacy_string(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string('string-project:string_dataset')
def test__build_resource_w_custom_field(self):
dataset = self._make_one(self.DS_REF)
dataset._properties['newAlphaProperty'] = 'unreleased property'
resource = dataset._build_resource(['newAlphaProperty'])
exp_resource = {
'newAlphaProperty': 'unreleased property'
}
self.assertEqual(resource, exp_resource)
def test__build_resource_w_custom_field_not_in__properties(self):
dataset = self._make_one(self.DS_REF)
dataset.bad = 'value'
with self.assertRaises(ValueError):
dataset._build_resource(['bad'])
def test_table(self):
from google.cloud.bigquery.table import TableReference
dataset = self._make_one(self.DS_REF)
table = dataset.table('table_id')
self.assertIsInstance(table, TableReference)
self.assertEqual(table.table_id, 'table_id')
self.assertEqual(table.dataset_id, self.DS_ID)
self.assertEqual(table.project, self.PROJECT)
def test___repr__(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = self._make_one(DatasetReference('project1', 'dataset1'))
expected = "Dataset(DatasetReference('project1', 'dataset1'))"
self.assertEqual(repr(dataset), expected)
class TestDatasetListItem(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.dataset import DatasetListItem
return DatasetListItem
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
project = 'test-project'
dataset_id = 'test_dataset'
resource = {
'kind': 'bigquery#dataset',
'id': '{}:{}'.format(project, dataset_id),
'datasetReference': {
'projectId': project,
'datasetId': dataset_id,
},
'friendlyName': 'Data of the Test',
'labels': {
'some-stuff': 'this-is-a-label',
},
}
dataset = self._make_one(resource)
self.assertEqual(dataset.project, project)
self.assertEqual(dataset.dataset_id, dataset_id)
self.assertEqual(
dataset.full_dataset_id,
'{}:{}'.format(project, dataset_id))
self.assertEqual(dataset.reference.project, project)
self.assertEqual(dataset.reference.dataset_id, dataset_id)
self.assertEqual(dataset.friendly_name, 'Data of the Test')
self.assertEqual(dataset.labels['some-stuff'], 'this-is-a-label')
def test_ctor_missing_properties(self):
resource = {
'datasetReference': {
'projectId': 'testproject',
'datasetId': 'testdataset',
},
}
dataset = self._make_one(resource)
self.assertEqual(dataset.project, 'testproject')
self.assertEqual(dataset.dataset_id, 'testdataset')
self.assertIsNone(dataset.full_dataset_id)
self.assertIsNone(dataset.friendly_name)
self.assertEqual(dataset.labels, {})
def test_ctor_wo_project(self):
resource = {
'datasetReference': {
'datasetId': 'testdataset',
},
}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_ctor_wo_dataset(self):
resource = {
'datasetReference': {
'projectId': 'testproject',
},
}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_ctor_wo_reference(self):
with self.assertRaises(ValueError):
self._make_one({})
def test_labels_update_in_place(self):
resource = {
'datasetReference': {
'projectId': 'testproject',
'datasetId': 'testdataset',
},
}
dataset = self._make_one(resource)
labels = dataset.labels
labels['foo'] = 'bar' # update in place
self.assertEqual(dataset.labels, {'foo': 'bar'})
def test_table(self):
from google.cloud.bigquery.table import TableReference
project = 'test-project'
dataset_id = 'test_dataset'
resource = {
'datasetReference': {
'projectId': project,
'datasetId': dataset_id,
},
}
dataset = self._make_one(resource)
table = dataset.table('table_id')
self.assertIsInstance(table, TableReference)
self.assertEqual(table.table_id, 'table_id')
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.project, project)
| |
from __future__ import division
# coding: utf-8
# In[ ]:
#Imports
import math
import numpy as np
from math import exp
from math import factorial
from math import log
from math import sqrt
from math import pi
from math import gamma
from math import ceil
from scipy.special import digamma
from scipy.stats import poisson
from scipy import stats
from statsmodels.base.model import GenericLikelihoodModel
from scipy.stats import norm
from scipy.stats import invgauss
import matplotlib.pyplot as plt
import time
import random
import tensorflow as tf
from edward.models import Poisson,Gamma
# In[ ]:
#- - - -Maximum Likelihood estimation code- - - -
#For Normal Distribution
mu_def = 0;
sigma_def = 1;
def normal_pdf(x,mu=mu_def,sigma=sigma_def):
return norm.pdf(x, loc=mu, scale=sigma);
class _normal(GenericLikelihoodModel):
def __init__(self, endog, exog=None, **kwds):
if exog is None:
exog = np.zeros_like(endog);
super(_normal, self).__init__(endog, exog, **kwds)
def nloglikeobs(self,params):
mu = params[0];
sigma = params[1];
return -np.log(normal_pdf(self.endog, mu = mu, sigma = sigma));
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
if start_params == None:
# Reasonable starting values (??)
start_params = [0,1];
return super(_normal, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
mu_def = 1;
rate_def=1;
def inv_gaussian_pdf(x,mu=mu_def,rate=rate_def):
#change to double-param form
y = (x * mu**2)/rate;
return invgauss.pdf(y, mu);
class _inv_gaussian(GenericLikelihoodModel):
def __init__(self, endog, exog=None, **kwds):
if exog is None:
exog = np.zeros_like(endog);
super(_inv_gaussian, self).__init__(endog, exog, **kwds)
def nloglikeobs(self,params):
mu = params[0];
rate = params[1];
return -np.log(inv_gaussian_pdf(self.endog,mu=mu,rate=rate));
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
if start_params == None:
# Reasonable starting values (??)
start_params = [1,1];
return super(_inv_gaussian, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
#Write and check for other distributions
#(at least Poisson, Gamma, Binomial)
#- - -- - - END- - - - - - - - - - - - - -
# In[ ]:
epsilon=0.0000001; #to avoid math errors wherever applicable!
#HCPF
#load data [todo]
X = np.loadtxt('./pre_processed_data/bibtex/X_train.txt',delimiter=',');
train_mask = np.loadtxt('./pre_processed_data/bibtex/x_train_mask.txt');
#X_masked = X*train_mask;
y = X*train_mask;
#random data
#actual data!
rows = len(X); # <- Anomaly causing!
cols = len(X[0]);
C_u = rows;
C_i = cols;
#fake data y -> responses
#reproducable results
#np.random.seed(42)
#y = np.random.normal(loc=0, scale=2, size=[C_u, C_i]);
# In[ ]:
#y_nm = list(); #non-missing entries
#y_m = list(); #missing entries
#y_train_idx_pair = list(); #use all
#y_train_idx_nm_pair = list(); #use all nm
y_test_idx_pair = list(); #use all nm
#missing/non-missing entries
for i in range(C_u):
for j in range(C_i):
#y_train_idx_pair.append([i,j]);
#if(y[i][j]!=0):
# y_nm.append([i,j]);
# y_train_idx_nm_pair.append([i,j]);
#if(y[i][j]==0):
#y_m.append([i,j]);
if(train_mask[i][j]==0):
y_test_idx_pair.append([i,j]);
#train.. val, test split - - - - - - - - - -
y_test_idx_pair = tuple(y_test_idx_pair);
#- - - - - - - - - - -END- - - - - - - - - - -
# In[ ]:
#1% non missing entries as val, 20% for test
#num_val = int(np.floor(0.01 * len(y_nm)));
#num_test = int(np.floor(0.2 * len(y_nm)));
#y_val_idx_pair = list();
#y_test_idx_pair = list();
#assert(num_test>0);
#print num_test
#y_val_entry_idx = np.random.choice(range(0,len(y_nm)),num_val,replace=False); #random sampling without replacement
#for i in range(len(y_val_entry_idx)):
# idx = y_val_entry_idx[i];
# y_val_idx_pair.append(y_nm[idx]);
#remaining_idx = [idx_pair for idx_pair in y_nm if (idx_pair not in y_val_idx_pair)];
#np.random.seed(42);
#y_test_entry_idx = np.random.choice(range(0,len(y_nm)),num_test,replace=False); #random sampling without replacement
#np.random.seed(None);
#y_test_entry_idx = np.random.choice(range(0,len(remaining_idx)),num_test,replace=False);
#for i in range(len(y_test_entry_idx)):
# idx = y_test_entry_idx[i];
# y_test_idx_pair.append(y_nm[idx]);
#remaining_idx = [idx_pair for idx_pair in remaining_idx if (idx_pair not in y_test_idx_pair)];
#y_train_idx_nm_pair = remaining_idx;
#y_train_idx_pair = remaining_idx + y_m; #taining indices - remaining non-missing + missing
# In[ ]:
#Fixed Hyperparameters
K = 160;
rho2 = 0.1;
xi = 0.7;
rho1 = 0.01;
omega = 0.01;
omega_bar = 0.1;
tau = 10000;
zero_mat = np.zeros([C_u, C_i]);
num_non_zero_entries = np.sum(y!=0);#len(y_train_idx_nm_pair); #in training.
num_tr_entries = C_i * C_u; #len(y_train_idx_pair);
num_missing_entries = num_tr_entries - num_non_zero_entries;
E_n_ui = num_non_zero_entries/num_tr_entries; #sparsity to begin with
print E_n_ui;
E_n = np.ones([C_u,C_i])*E_n_ui; #is this a valid initialization?
eta = rho2 * math.sqrt(E_n_ui/K);
zeta = omega_bar * math.sqrt(E_n_ui/K);
theta=0.0; #needs change [*] -> MLE
kappa=0.0; #needs change [*] -> MLE
# In[ ]:
#MLE driver/interface
def perform_mle(dist,data):
if dist=='normal':
model = _normal(data);
results = model.fit()
mu_mle, sigma_mle = results.params
theta = mu_mle/sigma_mle**2;
kappa = sigma_mle**2;
return [theta,kappa];
elif dist=='inv_gaussian':
model = _inv_gaussian(data);
results = model.fit()
mu_mle, rate_mle = results.params
theta = -rate_mle/(2*mu_mle**2);
kappa = sqrt(rate_mle);
return [theta,kappa];
#- - - -END - - - -
# In[ ]:
def dist_mean(dist,n_expected):
if(dist=='inv_gaussian'):
#we have theta, kappa
k = kappa * n_expected;
rate_params = k**2;
mu_params = np.sqrt(-0.5 * rate_params/theta);
#mean is mu_param
return mu_params;
if(dist=='normal'):
mu_params = theta * n_expected * kappa;
return mu_params;
def mae(result):
error = 0.0
count = 0.0
#for i in range(0,C_u):
# for j in range(0,C_i):
# if [i,j] in y_test_idx_pair :
# error += abs(X[i][j]-result[i][j])
# count += 1
for [i,j] in y_test_idx_pair :
error += abs(X[i][j]-result[i][j])
count += 1
error /= count
#error = math.sqrt(error)
return error;
def check(dist):
q_s = Gamma(a_s,b_s)
q_v = Gamma(a_v,b_v)
sess = tf.InteractiveSession()
init = tf.global_variables_initializer()
init.run()
no_sample = 100
s_sample = q_s.sample(no_sample).eval()
v_sample = q_v.sample(no_sample).eval()
n = np.zeros([C_u,C_i]);
result = np.zeros([C_u,C_i]);
n_expected = np.zeros([C_u,C_i]);
for i in range(0,no_sample):
n = np.add(n,np.matmul(s_sample[i],np.transpose(v_sample[i])))
n_expected = n/no_sample; #mean of poisson is rate param. So this is fine.
#sample response
#distribution specific
result = dist_mean(dist,n_expected);
return mae(result)
# In[ ]:
#- - - - - - - call svi on data - - - - - - - -
#globals
N_tr = 10; # <- Should change
q_n = np.zeros([C_u, C_i, N_tr+1]); #need to keep these global; N_tr - 0->end
lambda_ = np.zeros([C_u, C_i]);
a_s=list();
b_s=list();
a_v=list();
b_v=list();
#----------------------------------------------
def log_partition_fn(dist,theta):
if dist == 'normal':
return (theta**2)/2;
elif dist == 'gamma':
return -log(-theta);
elif dist == 'inv_gaussian':
return -sqrt(-2*theta);
elif dist == 'poisson':
return exp(theta);
elif dist == 'binomial':
return log(1+exp(theta));
elif dist == 'neg_binomial':
return -log(1-exp(theta));
elif dist == 'ztp':
return log(exp(exp(theta)) - 1);
def h_const(dist, x, kappa):
if dist == 'normal':
h = (1/sqrt(2*pi*kappa)) * exp(-x**2/(2*kappa));
elif dist == 'gamma':
h = (x**(kappa-1))/gamma(kappa);
elif dist == 'inv_gaussian':
h = (kappa/sqrt(2 * pi * (x+epsilon)**3)) * exp(-kappa**2 / (2*x)); #note the use of epsilon!
elif dist == 'poisson':
h = (kappa**x)/factorial(x);
elif dist == 'binomial':
h = factorial(kappa)/(factorial(x) * factorial(x-kappa));
elif dist == 'neg_binomial':
h = factorial(x+kappa-1)/(factorial(x) * factorial(kappa-1));
elif dist == 'ztp':
h = 0.0;
for j in range(kappa):
h = h + (-1)**j * (kappa-j)**x * factorial(kappa)/(factorial(j)*factorial(kappa-j));
h = h/factorial(x);
return h;
def get_values_from_pair(data_y,index_pairs):
result = list();
result = [data_y[i][j] for i,j in index_pairs];
return result;
def eval_edm(x,dist,theta,kappa):
result = exp(x*theta - kappa*log_partition_fn(dist,theta))*h_const(dist,x,kappa);
return result;
def calc_val_ll(theta, kappa, dist, y_val_idx_pair):
#for all pairs
ll_nm = 0;
global lambda_;
for idx in range(len(y_val_idx_pair)):
u = y_val_idx_pair[idx][0];
i = y_val_idx_pair[idx][1];
marginal = 0;
for n in range(1,N_tr+1): #0 -> N_tr (0 causes div by zero in h_const)
marginal = marginal + eval_edm(y[u][i],dist,theta,n*kappa) * poisson.pmf(n,lambda_[u][i]);
ll_nm = ll_nm + log(marginal+epsilon);
return ll_nm;
#SVI for HCPF
def svi_hcpf(dist,y):
print 'hcpf alloc started'
global val_ll_list;
val_ll_list = list();
#initialize any other Hyperparams:
#MLE for theta, kappa
y_vec = y.flatten();
#y_vec = get_values_from_pair(y,y_train_idx_pair);
global theta, kappa;
#theta, kappa = perform_mle(dist,y_vec);
theta = 1.02922697078;
kappa = 0.0285410730752;
#print theta,kappa;
print 'mle done'
#init parameters [FIXED]
a_r = np.zeros(C_u) + rho1 + K*eta;
a_w = np.zeros(C_i) + omega + K*zeta;
#init parameters [Updates for these, present]
t_u = tau;
t_i = tau;
b_r = np.zeros(C_u) + (rho1/rho2);
global a_s, b_s, a_v, b_v;
a_s = np.zeros([C_u,K]) + eta;
b_s = np.zeros([C_u,K]) + rho2;
b_w = np.zeros(C_i) + omega/omega_bar;
a_v = np.zeros([C_i,K]) + zeta;
b_v = np.zeros([C_i,K]) + omega_bar;
#init local variational parameters
#q_n, lambda_ declared above
phi = np.zeros(K);
print 'alloc done'
#Repeat till convergence
cnt = 0; #placeholder convergence condition
curr_idx = 0;
global q_n,lambda_; #you're accessing global values. - -> relocate to original
while(True):
#Pick a sample uniformly from training portion
#start = time.time();
#tr_idx = np.random.choice(range(1,len(y_train_idx_pair)+1),1,replace=False)[0];
#tr_idx = int(ceil(random.uniform(0.1,len(y_train_idx_pair)))); #faster!
#tr_idx_pair = y_train_idx_pair[tr_idx-1];
#end = time.time()
#print 'random_choice',end-start;
u = int(np.ceil(np.random.uniform(0.01, C_u)))-1;
i = int(np.ceil(np.random.uniform(0.01, C_i)))-1;
#rem = r_idx % C_u;
#div = int(r_idx / C_u)-1;
#if(div<0):
# div=0;
#[u,i] = [tr_idx_pair[0],tr_idx_pair[1]];
#compute local variational parameters
#old_lambda_ = np.zeros([C_u,C_i]); #debugging purpose
#lambda_ = np.zeros([C_u, C_i]);
#start = time.time()
#update lambda_ui
for k in range(K):
lambda_[u][i] = lambda_[u][i] + (a_s[u][k]*a_v[i][k])/(b_s[u][k]*b_v[i][k]);
#end = time.time()
#print 'lambda_',end-start;
#update q_n_ui
#distribution specific
#need this for calculating E_n_ui
#q_n_ui = calc_q_n_ui(dist,N_tr,lambda_,kappa);
#Distribution specific calculation
#calculating q_n[u][i] dist
q_n_ui = np.zeros(N_tr+1); #keep 0th empty for now, for convenience -- not required
#start = time.time()
for n in range(1,N_tr+1): #value 'n' is being used
q_n_ui[n] = exp(-kappa*n*log_partition_fn(dist,theta)) * h_const(dist, y[u][i], n*kappa) * (lambda_[u][i])**n/factorial(n);
#end = time.time()
#print 'q_n',end-start;
#^Normalize
#start = time.time()
#q_n_sum = 0;
#for n in range(N_tr+1):
q_n[u][i] = q_n_ui/np.sum(q_n_ui);
#q_n_sum = q_n_sum + q_n[u][i][n];
#end = time.time()
#print "q_n_norm",end-start;
#Calculate E[n_ui] [check]
#E_n_ui = q_n_sum/N_tr; #Not sure if this is correct way to do
#start = time.time()
E_n_ui=0.0;
for n in range(1,N_tr+1):
E_n_ui = E_n_ui + (n)*(q_n[u][i][n]);
E_n[u][i] = E_n_ui;
#end = time.time()
#print "e_n", end-start;
#update phi_uik
#calculating phi[u][i] dist
#start = time.time()
#for k in range(K):
phi[:] = np.exp(digamma(a_s[u]) - np.log(b_s[u]) + digamma(a_v[i]) - np.log(b_v[i]));
#^doubtful on form of above [to check]
#end = time.time()
#print "phi", end-start;
#^Normalize
#check if all 0
if sum(phi)==0: #set uniform
phi[:] = 1/K;
phi = phi/sum(phi);
#what to do with prop? [--Resolved--]
#start = time.time()
#compute global variational parameters [error prone!]
b_r[u] = (1 - t_u**(-xi)) * b_r[u] + t_u**(-xi) * (rho1/rho2 + np.sum(a_s[u]/b_s[u]));
for k in range(K):
a_s[u][k] = (1 - t_u**(-xi)) * a_s[u][k] + t_u**(-xi) * (eta + C_i * E_n_ui * phi[k]);
b_s[u][k] = (1 - t_u**(-xi)) * b_s[u][k] + t_u**(-xi) * (a_r[u]/b_r[u] + C_i * (a_v[i][k]/b_v[i][k]));
b_w[i] = (1 - t_i**(-xi)) * b_w[i] + t_i**(-xi) * (omega/omega_bar + np.sum(a_v[i]/b_v[i]));
for k in range(K):
a_v[i][k] = (1 - t_i**(-xi)) * a_v[i][k] + t_i**(-xi) * (zeta + C_u * E_n_ui * phi[k]);
b_v[i][k] = (1 - t_i**(-xi)) * b_v[i][k] + t_i**(-xi) * (a_w[i]/b_w[i] + C_u * a_s[u][k]/b_s[u][k]);
#end = time.time()
#print "updates", end-start;
#update learning rates
t_u = t_u + 1;
t_i = t_i + 1;
#optional - update theta, kappa
cnt = cnt + 1;
#check for convergence - [Actual - validation log likelihood converges] todo
#compute validation log likelihood
#val_ll = calc_val_ll(theta, kappa, dist, y_val_idx_pair);
#val_ll_list.append(val_ll);
#mae
#calculate MAE
if cnt%100000==0:
mae_val = check(dist);
print mae_val;
mae_list[curr_idx]=mae_val;
curr_idx = curr_idx + 1;
if(cnt == max_iter):
break;
# In[ ]:
def sample_from_edm(dist, theta, kappa):
if(dist=='normal'):
sigma_2 = kappa;
sigma = sqrt(sigma_2);
mu = theta * sigma_2;
return np.random.normal(loc=mu, scale=sigma, size=1);
#todo for other distributions
# In[ ]:
def calc_test_ll(theta, kappa, dist, y_test_idx_pair):
#for all pairs
ll_nm = 0;
ll_m = 0;
global lambda_;
for idx in range(len(y_test_idx_pair)):
u = y_test_idx_pair[idx][0];
i = y_test_idx_pair[idx][1];
marginal = 0;
for n in range(1,N_tr+1): #0 -> N_tr (0 causes div by zero in h_const)
marginal = marginal + eval_edm(X[u][i],dist,theta,n*kappa) * poisson.pmf(n,lambda_[u][i]);
ll_nm = ll_nm + marginal;
ll_m = ll_m + log(poisson.pmf(0,lambda_[u][i]));
return (0.2*(num_missing_entries)*ll_m)/len(y_test_idx_pair) + ll_nm;
# In[ ]:
# - - - - - -test - - - - -
#test using test-log-likelihood --
dist = 'normal';
#call svi
#Format: svi_hcpf(dist,y):
max_iter = C_u * C_i; #maximum iterations, to break early!
val_ll_list = list();
mae_list = np.zeros(max_iter);
svi_hcpf(dist,y);
print theta,kappa
#Reality Check
#sample and check
y_sampled = np.zeros([C_u, C_i]);
loss = 0;
#test log likelihood
print calc_test_ll(theta, kappa, dist, y_test_idx_pair);
"""
for u in range(C_u):
for i in range(C_i):
#sample count -> use variational approx. q_n[u][i]
#can treat above dist. as multinoulli, and draw from it'
#random.multinomial returns frequency, 1 is in atmost one entry. So argmax gets that
#n_sample = np.random.multinomial(1,q_n[u][i],size=1)[0].argmax();
E_n_ui = E_n[u][i];
y_sampled[u][i] = sample_from_edm(dist,theta, E_n_ui*kappa);
loss = loss + (y_sampled[u][i] - y[u][i])**2;
print 'rms_obtained', sqrt(loss/(C_u*C_i));
#check significance
random_y = np.random.normal(loc=0, scale=2, size=[C_u, C_i]);
loss_r = 0;
for u in range(C_u):
for i in range(C_i):
loss_r = loss_r + (random_y[u][i] - y[u][i])**2;
print 'rms_random', sqrt(loss_r/(C_u*C_i));
"""
#plot val_ll
plt.plot(mae_list);
plt.show();
# In[ ]:
#check
| |
import os
import imp
import random
import json
from collections import OrderedDict
from emonitor.lib.pdf.pdf import getFormFields, getPDFInformation
from flask import render_template, request, current_app
from itertools import chain
from emonitor.extensions import db, scheduler, babel
from emonitor.modules.alarms.alarmutils import AlarmFaxChecker, processFile
from emonitor.modules.alarms.alarm import Alarm
from emonitor.modules.alarms.alarmfield import AlarmField, AFBasic
from emonitor.modules.alarms.alarmtype import AlarmType
from emonitor.modules.alarms.alarmsection import AlarmSection
from emonitor.modules.alarms.alarmreport import AlarmReport
from emonitor.modules.settings.settings import Settings
from emonitor.modules.settings.department import Department
from emonitor.modules.cars.car import Car
def getAdminContent(self, **params):
"""
Deliver admin content of module alarms
:param params: use given parameters of request
:return: rendered template as string
"""
module = request.view_args['module'].split('/')
if len(module) > 1:
if module[1] == 'types':
impl = [] # load implementations of faxchecker
for f in [f for f in os.listdir('%s/emonitor/modules/alarms/inc/' % current_app.config.get('PROJECT_ROOT')) if f.endswith('.py')]:
if not f.startswith('__'):
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % f)
if isinstance(getattr(cls, cls.__all__[0])(), AlarmFaxChecker):
impl.append((f, getattr(cls, cls.__all__[0])(), AlarmType.getAlarmTypeByClassname(f)))
if request.method == 'POST':
if request.form.get('action') == 'createtype': # add type
params.update({'alarmtype': AlarmType('', ''), 'interpreter': impl})
return render_template('admin.alarms.type_actions.html', **params)
elif request.form.get('action').startswith('deleteinterpreter_'): # delete checker
for cls in impl:
if cls[0] == request.form.get('action')[18:]:
if os.path.exists('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), cls[0])):
os.remove('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), cls[0]))
if os.path.exists('%s/emonitor/modules/alarms/inc/%sc' % (current_app.config.get('PROJECT_ROOT'), cls[0])):
os.remove('%s/emonitor/modules/alarms/inc/%sc' % (current_app.config.get('PROJECT_ROOT'), cls[0]))
impl.remove(cls)
elif request.form.get('action').startswith('editalarmtype_'): # edit type
params.update({'alarmtype': AlarmType.getAlarmTypes(id=int(request.form.get('action').split('_')[-1])), 'interpreter': impl})
return render_template('admin.alarms.type_actions.html', **params)
elif request.form.get('action').startswith('deletetype_'): # delete type
db.session.delete(AlarmType.getAlarmTypes(id=int(request.form.get('action').split('_')[-1])))
db.session.commit()
elif request.form.get('action') == 'updatetype': # update type
if request.form.get('type_id') == 'None': # add type
atype = AlarmType('', '')
db.session.add(atype)
else: # update
atype = AlarmType.getAlarmTypes(id=int(request.form.get('type_id')))
atype.name = request.form.get('edit_name')
atype.keywords = request.form.get('edit_keywords')
atype.interpreter = request.form.get('edit_interpreter')
translations = dict()
_vars = request.form.getlist('alarmtypevariables')
_values = request.form.getlist('alarmtypetranslation')
for _var in _vars:
translations[_var] = _values[_vars.index(_var)]
atype.translations = translations
db.session.commit()
if request.form.get('type_id') == 'None': # add predefined keywords and sections
# add pre-defined sections
for checker in [i for i in impl if i[0] == request.form.get('edit_interpreter')]:
if request.form.get('edit_keywords') == "":
atype.keywords = "\n".join(checker[1].getDefaultConfig()['keywords'])
sections = checker[1].getDefaultConfig()['sections']
i = 1
for key in sections:
db.session.add(AlarmSection(atype.id, key, sections[key][0], 1, sections[key][1], i))
i += 1
db.session.commit()
elif request.form.get('action').startswith('createsection_'): # add section
alarmtype = AlarmType.getAlarmTypes(id=int(request.form.get('action').split('_')[-1]))
params.update({'alarmtype': alarmtype, 'section': AlarmSection(alarmtype.id, '', '', 0, '', 0), 'functions': alarmtype.interpreterclass().getEvalMethods()})
return render_template('admin.alarms.sections_actions.html', **params)
elif request.form.get('action') == 'updatesection': # save section
if request.form.get('section_id') == 'None': # add
section = AlarmSection('', '', '', '', '', '')
db.session.add(section)
section.orderpos = 1 + len(AlarmSection.getSections())
else: # update
section = AlarmSection.getSections(id=int(request.form.get('section_id')))
section.orderpos = request.form.get('edit_orderpos')
section.tid = request.form.get('edit_tid')
section.name = request.form.get('edit_name')
section.key = request.form.get('edit_key')
section.method = request.form.get('edit_method')
section.active = request.form.get('edit_active')
db.session.commit()
elif request.form.get('action').startswith('editalarmsection_'): # edit section
section = AlarmSection.getSections(id=int(request.form.get('action').split('_')[-1]))
params.update({'section': section, 'functions': section.alarmtype.interpreterclass().getEvalMethods(), 'alarmtype': AlarmType.getAlarmTypes(section.tid)})
return render_template('admin.alarms.sections_actions.html', **params)
elif request.form.get('action').startswith('deletealarmsection_'): # delete section
section = AlarmSection.getSections(id=int(request.form.get('action').split('_')[-1]))
db.session.delete(section)
db.session.commit()
params.update({'alarmtypes': AlarmType.getAlarmTypes(), 'interpreters': impl})
return render_template('admin.alarms.type.html', **params)
elif module[1] == 'report':
if request.method == 'POST':
if request.form.get('action') == 'createreport': # add report
params.update({'report': AlarmReport('', '', '', 1, []), 'departments': Department.getDepartments(), 'reporttypes': AlarmReport.getReportTypes()})
return render_template('admin.alarms.report_action.html', **params)
elif request.form.get('action') == 'updatereport':
if request.form.get('report_id') == 'None': # add new report
report = AlarmReport('', '', '', '')
db.session.add(report)
else:
report = AlarmReport.getReports(request.form.get('report_id'))
report.name = request.form.get('edit_name')
if not request.form.get('template').startswith(current_app.config.get('PATH_DATA')): # internal template
report._reporttype = 'internal'
report.filename = request.form.get('template').replace("{}/emonitor/modules/alarms/templates/".format(current_app.config.get('PROJECT_ROOT')).replace('\\', '/'), "")
else:
report._reporttype = 'external'
report.filename = request.form.get('template').replace("{}".format(current_app.config.get('PATH_DATA')), "")
report.fields = json.loads(request.form.get('fielddefinition'))
report.departments = request.form.getlist('edit_department')
db.session.commit()
elif request.form.get('action').startswith('editreport_'): # edit report
report = AlarmReport.getReports(request.form.get('action').split('_')[-1])
params.update({'report': report, 'departments': Department.getDepartments(), 'reporttypes': AlarmReport.getReportTypes(), 'alarmfields': AlarmField.getAlarmFields()})
return render_template('admin.alarms.report_action.html', **params)
elif request.form.get('action').startswith('deletereport_'): # delete report
report = AlarmReport.getReports(request.form.get('action').split('_')[-1])
if AlarmReport.query.filter(AlarmReport.filename == report.filename).count() == 1 and os.path.exists(report.filename):
os.remove(report.filename)
db.session.delete(report)
db.session.commit()
params.update({'reports': AlarmReport.getReports(), 'departments': Department.getDepartments()})
return render_template('admin.alarms.report.html', **params)
elif module[1] == 'config':
if request.method == 'POST':
if request.form.get('action') == 'alarmcarsprio':
for k in Alarm.ALARMSTATES.keys():
if 'used_cars{}'.format(k) in request.form.keys():
Settings.set('alarms.spc_cars.{}'.format(k), request.form.get('used_cars{}'.format(k)))
db.session.commit()
elif request.form.get('action') == 'alarmsettings':
Settings.set('alarms.autoclose', request.form.get('settings.autoclose'))
for aalarm in [a for a in Alarm.getAlarms() if a.state == 1]: # only active alarms
aalarm.updateSchedules(reference=1) # use alarmtime as refernce
elif request.form.get('action') == 'archivesettings':
Settings.set('alarms.autoarchive', request.form.get('settings.autoarchive'))
for aalarm in [a for a in Alarm.getAlarms() if a.state == 2]: # only closed alarms
aalarm.updateSchedules(reference=1) # use alarmtime as refernce
elif request.form.get('action').startswith('save_'):
if request.form.get('fieldid') == 'None':
field = AlarmField.getAlarmFieldForType(request.form.get('action').split('_')[1], dept=request.form.get('action').split('_')[2])
db.session.add(field)
else:
field = AlarmField.getAlarmFields(id=request.form.get('fieldid'))
field.saveConfigForm(request)
db.session.commit()
elif request.form.get('action').startswith('field_delete_'): # delete definition of field
db.session.delete(AlarmField.getAlarmFields(id=request.form.get('action').split('_')[-1]))
db.session.commit()
elif request.form.get('action').startswith('field_add_'): # add field for department
field = AlarmField.getAlarmFieldForType(request.form.get('action').split('_')[-2], dept=request.form.get('action').split('_')[-1])
field.name = babel.gettext(field.name)
db.session.add(field)
db.session.commit()
fields = {}
for dept in Department.getDepartments():
fields[dept.id] = AlarmField.getAlarmFieldsForDepartment(dept.id)
params.update({'cars': Car.getCars(), 'alarmstates': Alarm.ALARMSTATES, 'settings': Settings, 'departments': Department.getDepartments(), 'fields': fields})
return render_template('admin.alarms.config.html', **params)
elif module[1] == 'test':
params.update({'uploadfileformat': filter(None, sum([Settings.get('ocr.inputformat', []), Settings.get('ocr.inputtextformat', [])], []))})
return render_template('admin.alarms.test.html', **params)
else:
params.update({'alarms': dict(Alarm.getAlarmCount()), 'alarmstates': Alarm.ALARMSTATES, 'help': self.hasHelp('admin')})
return render_template('admin.alarms.html', **params)
def getAdminData(self):
"""
Deliver admin content of module alarms (ajax)
:return: rendered template as string or json dict
"""
if request.args.get('action') == 'upload':
if request.files:
ufile = request.files['uploadfile']
fname = os.path.join(current_app.config.get('PATH_TMP'), ufile.filename)
ufile.save(fname)
scheduler.add_job(processFile, args=[current_app.config.get('PATH_TMP'), ufile.filename]) # schedule operation
return ""
elif request.args.get('action') == 'uploadchecker':
if request.files:
ufile = request.files['uploadfile']
if not os.path.exists('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename)):
ufile.save('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))
try:
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % ufile.filename)
if isinstance(getattr(cls, cls.__all__[0])(), AlarmFaxChecker):
return "ok"
except:
pass
os.remove('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))
return babel.gettext(u'admin.alarms.checkernotvalid')
return ""
elif request.args.get('action') == 'getkeywords':
for f in [f for f in os.listdir('%s/emonitor/modules/alarms/inc/' % current_app.config.get('PROJECT_ROOT')) if f.endswith('.py')]:
if f == request.args.get('checker'):
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % f)
variables = getattr(cls, cls.__all__[0])().getDefaultConfig()[u'translations']
return {u'keywords': "\n".join(getattr(cls, cls.__all__[0])().getDefaultConfig()[u'keywords']), u'variables': variables}
return ""
elif request.args.get('action') == 'alarmsforstate':
alarms = Alarm.getAlarms(state=int(request.args.get('state')))
return render_template('admin.alarms_alarm.html', alarms=alarms)
elif request.args.get('action') == 'alarmsarchive':
for id in request.args.get('alarmids').split(','):
Alarm.changeState(int(id), 3)
return ""
elif request.args.get('action') == 'savefieldorder': # change order of fields
fields = []
deptid = '0'
for f in request.args.get('order').split(','):
t, deptid, name = f.split('.')
fields.append(name)
if int(deptid):
for dbfield in AlarmField.getAlarmFields(dept=deptid):
dbfield.position = fields.index(dbfield.fieldtype)
db.session.commit()
return ""
elif request.args.get('action') == 'addreport':
f = request.files['template']
fname = "{}.{}".format(random.random(), f.filename.split('.')[-1])
fpath = '{}alarmreports/{}'.format(current_app.config.get('PATH_DATA'), fname[2:])
f.save(fpath)
if f.filename.endswith('pdf'):
fields = getFormFields(fpath)
content = render_template('admin.alarms.report_fields.html', filepath='{}alarmreports/{}'.format(current_app.config.get('PATH_DATA'), fname[2:]), fileinfo=getPDFInformation(fpath), fields=fields, multi=max(fields.values()) > 1)
else:
content = ""
fields = []
return {'filename': fname, 'content': content}
elif request.args.get('action') == 'reportdetails':
return render_template('admin.alarms.report_details.html', report=AlarmReport.getReports(id=request.args.get('reportid')), reporttype=AlarmReport.getReportTypes(request.args.get('template')), departments=request.args.get('departments'))
elif request.args.get('action') == 'reportfieldlookup':
ret = OrderedDict()
ret['basic'] = [] # add basic information from AFBasic class
for f in AFBasic().getFields():
ret['basic'].append({'id': f.name, 'value': f.id})
alarmfields = {}
for alarmfield in AlarmField.getAlarmFields():
if str(alarmfield.dept) not in request.args.get('departments').split(','):
continue
if alarmfield.fieldtype not in alarmfields:
alarmfields[alarmfield.fieldtype] = []
alarmfields[alarmfield.fieldtype].append(alarmfield)
l = ""
for alarmfield in list(chain.from_iterable([f for f in alarmfields.values() if len(f) == len(request.args.get('departments').split(','))])):
if '%s' % alarmfield.name not in ret:
ret['%s' % alarmfield.name] = [{'id': '%s-list' % alarmfield.fieldtype, 'value': '%s (%s)' % (alarmfield.name, babel.gettext('admin.alarms.list'))}]
for f in alarmfield.getFields():
if f.getLabel().strip() not in ["", '<leer>']: # dismiss empty values
if f.name[0] != ' ':
value = '%s' % babel.gettext(f.getLabel())
l = value
else: # add name of kategory
value = '%s > %s' % (l, babel.gettext(f.getLabel()))
ret['%s' % alarmfield.name].append({'id': '%s-%s' % (alarmfield.fieldtype, f.id), 'value': value})
return ret
| |
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.scenarios.cinder import volumes
from tests.unit import test
CINDER_VOLUMES = ("rally.plugins.openstack.scenarios.cinder.volumes"
".CinderVolumes")
class fake_type(object):
name = "fake"
@ddt.ddt
class CinderServersTestCase(test.ScenarioTestCase):
def _get_context(self):
context = test.get_test_context()
context.update({
"user": {"tenant_id": "fake",
"credential": mock.MagicMock()},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid", "size": 1}],
"servers": [1]}})
return context
def test_create_and_list_volume(self):
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock()
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario._list_volumes.assert_called_once_with(True)
def test_list_volumes(self):
scenario = volumes.CinderVolumes(self.context)
scenario._list_volumes = mock.MagicMock()
scenario.list_volumes(True)
scenario._list_volumes.assert_called_once_with(True)
def test_create_and_update_volume(self):
volume_update_args = {"dispaly_name": "_updated"}
scenario = volumes.CinderVolumes()
fake_volume = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._update_volume = mock.MagicMock()
scenario.create_and_update_volume(
1, update_volume_kwargs=volume_update_args)
scenario._create_volume.assert_called_once_with(1)
scenario._update_volume.assert_called_once_with(fake_volume,
**volume_update_args)
def test_create_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(size=1, min_sleep=10, max_sleep=20,
fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
def test_create_volume_and_modify_metadata(self):
scenario = volumes.CinderVolumes(self._get_context())
scenario._set_metadata = mock.Mock()
scenario._delete_metadata = mock.Mock()
scenario.modify_volume_metadata(sets=5, set_size=4,
deletes=3, delete_size=2)
scenario._set_metadata.assert_called_once_with("uuid", 5, 4)
scenario._delete_metadata.assert_called_once_with(
"uuid",
scenario._set_metadata.return_value, 3, 2)
def test_create_and_extend_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._extend_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_extend_volume(1, 2, 10, 20, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
self.assertTrue(scenario._extend_volume.called)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_from_image_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume_from_image(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
def test_create_volume_from_image_and_list(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, "fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._list_volumes.assert_called_once_with(True)
def test_create_from_volume_and_delete_volume(self):
fake_volume = mock.MagicMock()
vol_size = 1
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_from_volume_and_delete_volume(vol_size)
scenario._create_volume.assert_called_once_with(1, source_volid="uuid")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_and_delete_snapshot(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario.sleep_between = mock.MagicMock()
scenario._delete_snapshot = mock.MagicMock()
scenario.create_and_delete_snapshot(False, 10, 20, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
def test_create_and_list_snapshots(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._list_snapshots = mock.MagicMock()
scenario.create_and_list_snapshots(False, True, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario._list_snapshots.assert_called_once_with(True)
def test_create_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(self.context)
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
volume_args = {"some_key": "some_val"}
vm_args = {"some_key": "some_val"}
scenario.create_and_attach_volume(10, "img", "0",
create_volume_params=volume_args,
create_vm_params=vm_args)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume,
fake_attach)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def test_create_and_upload_volume_to_image(self):
fake_volume = mock.Mock()
fake_image = mock.Mock()
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._upload_volume_to_image = mock.MagicMock(
return_value=fake_image)
scenario._delete_volume = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.create_and_upload_volume_to_image(2, image="img",
container_format="fake",
disk_format="disk",
do_delete=False,
fakeargs="fakeargs")
scenario._create_volume.assert_called_once_with(2, imageRef="img",
fakeargs="fakeargs")
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"fake",
"disk")
scenario._create_volume.reset_mock()
scenario._upload_volume_to_image.reset_mock()
scenario.create_and_upload_volume_to_image(1, image=None,
do_delete=True,
fakeargs="fakeargs")
scenario._create_volume.assert_called_once_with(1, fakeargs="fakeargs")
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"bare",
"raw")
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_image.assert_called_once_with(fake_image)
def test_create_snapshot_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
self.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume()
self.assertTrue(scenario._create_volume.called)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume,
fake_attach)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_snapshot_and_attach_volume_use_volume_type(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
fake = fake_type()
self.clients("cinder").volume_types.list = mock.MagicMock(
return_value=[fake])
self.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume(volume_type=True)
# Make sure create volume's second arg was the correct volume type.
# fake or none (randomly selected)
self.assertTrue(scenario._create_volume.called)
vol_type = scenario._create_volume.call_args_list[0][1]["volume_type"]
self.assertTrue(vol_type is fake.name or vol_type is None)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume,
fake_attach)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_nested_snapshots_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(context=self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume()
volume_count = scenario._create_volume.call_count
snapshots_count = scenario._create_snapshot.call_count
attached_count = scenario._attach_volume.call_count
self.assertEqual(scenario._delete_volume.call_count, volume_count)
self.assertEqual(scenario._delete_snapshot.call_count, snapshots_count)
self.assertEqual(scenario._detach_volume.call_count, attached_count)
def test_create_nested_snapshots_and_attach_volume_kwargs(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(context=self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
volume_kwargs = {"volume_type": "type1"}
scenario.create_nested_snapshots_and_attach_volume(
size={"min": 1, "max": 1},
create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
self.assertEqual(fake_volume, scenario._create_volume.return_value)
def test_create_nested_snapshots_and_attach_volume_snapshot_kwargs(self):
fake_volume = mock.MagicMock()
fake_volume.id = "FAKE_ID"
fake_snapshot = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(context=self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
volume_kwargs = {"volume_type": "type1"}
snapshot_kwargs = {"name": "snapshot1", "description": "snaphot one"}
scenario.create_nested_snapshots_and_attach_volume(
size={"min": 1, "max": 1},
create_volume_kwargs=volume_kwargs,
create_snapshot_kwargs=snapshot_kwargs
)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False,
**snapshot_kwargs)
self.assertEqual(fake_snapshot, scenario._create_snapshot.return_value)
def test_create_nested_snapshots_and_attach_volume_deprecate_kwargs(self):
fake_volume = mock.MagicMock()
fake_volume.id = "FAKE_ID"
fake_snapshot = mock.MagicMock()
fake_attach = mock.MagicMock()
scenario = volumes.CinderVolumes(context=self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=fake_attach)
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
volume_kwargs = {"volume_type": "type1"}
snapshot_kwargs = {"name": "snapshot1", "description": "snaphot one"}
scenario.create_nested_snapshots_and_attach_volume(
size={"min": 1, "max": 1},
create_volume_kwargs=volume_kwargs,
**snapshot_kwargs
)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False,
**snapshot_kwargs)
self.assertEqual(fake_snapshot, scenario._create_snapshot.return_value)
def test_create_nested_snapshots_calls_order(self):
fake_volume1 = mock.MagicMock()
fake_volume2 = mock.MagicMock()
fake_snapshot1 = mock.MagicMock()
fake_snapshot2 = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario._attach_volume = mock.MagicMock(return_value=mock.MagicMock())
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(
side_effect=[fake_volume1, fake_volume2])
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(
side_effect=[fake_snapshot1, fake_snapshot2])
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume(
nested_level=2)
vol_delete_calls = [mock.call(fake_volume2), mock.call(fake_volume1)]
snap_delete_calls = [mock.call(fake_snapshot2),
mock.call(fake_snapshot1)]
scenario._delete_volume.assert_has_calls(vol_delete_calls)
scenario._delete_snapshot.assert_has_calls(snap_delete_calls)
@mock.patch("rally.plugins.openstack.scenarios.cinder.volumes.random")
def test_create_nested_snapshots_check_resources_size(self, mock_random):
mock_random.randint.return_value = 3
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes(self._get_context())
scenario.get_random_server = mock.MagicMock(return_value=fake_server)
scenario._attach_volume = mock.MagicMock(return_value=mock.MagicMock())
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario.create_nested_snapshots_and_attach_volume(nested_level=2)
# NOTE: One call for random size
random_call_count = mock_random.randint.call_count
self.assertEqual(1, random_call_count)
calls = scenario._create_volume.mock_calls
expected_calls = [mock.call(3)]
expected_calls.extend(
[mock.call(3, snapshot_id=fake_snapshot.id)])
self.assertEqual(expected_calls, calls)
def test_create_volume_backup(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_volume_backup(
1, do_delete=True, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_volume_backup_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_volume_backup(
1, do_delete=False, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
def _get_scenario(self, fake_volume, fake_backup, fake_restore=None):
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._create_backup = mock.MagicMock(return_value=fake_backup)
scenario._restore_backup = mock.MagicMock(return_value=fake_restore)
scenario._list_backups = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario._delete_backup = mock.MagicMock()
return scenario
def test_create_and_restore_volume_backup(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
fake_restore = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup, fake_restore)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_restore_volume_backup(
1, do_delete=True, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._restore_backup.assert_called_once_with(fake_backup.id)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_and_restore_volume_backup_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
fake_restore = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup, fake_restore)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_restore_volume_backup(
1, do_delete=False, create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._restore_backup.assert_called_once_with(fake_backup.id)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
def test_create_and_list_volume_backups(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_list_volume_backups(
1, detailed=True, do_delete=True,
create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._list_backups.assert_called_once_with(True)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_and_list_volume_backups_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = self._get_scenario(fake_volume, fake_backup)
volume_kwargs = {"some_var": "zaq"}
scenario.create_and_list_volume_backups(
1, detailed=True, do_delete=False,
create_volume_kwargs=volume_kwargs)
scenario._create_volume.assert_called_once_with(1, **volume_kwargs)
scenario._create_backup.assert_called_once_with(fake_volume.id)
scenario._list_backups.assert_called_once_with(True)
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)
@ddt.data({},
{"nested_level": 2},
{"image": "img"})
@ddt.unpack
def test_create_volume_and_clone(self, nested_level=1,
image=None):
create_volumes_count = nested_level + 1
fake_volumes = [mock.Mock(size=1) for i in range(create_volumes_count)]
scenario = volumes.CinderVolumes(self.context)
scenario._create_volume = mock.MagicMock(side_effect=fake_volumes)
scenario.create_volume_and_clone(1, image=image,
nested_level=nested_level,
fakearg="fake")
expected = [mock.call(1, imageRef=image, fakearg="fake")
if image else mock.call(1, fakearg="fake")]
for i in range(nested_level):
expected.append(mock.call(fake_volumes[i].size,
source_volid=fake_volumes[i].id,
atomic_action=False, fakearg="fake")
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"cinder.clone_volume")
scenario._create_volume.assert_has_calls(expected)
def test_create_volume_from_snapshot(self):
fake_snapshot = mock.MagicMock(id=1)
fake_volume = mock.MagicMock()
create_snapshot_args = {"force": False}
scenario = volumes.CinderVolumes(self._get_context())
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_snapshot = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_volume_from_snapshot(fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid")
scenario._create_volume.assert_called_once_with(
1, snapshot_id=fake_snapshot.id, fakearg="f")
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._create_snapshot.reset_mock()
scenario._create_volume.reset_mock()
scenario._delete_snapshot.reset_mock()
scenario._delete_volume.reset_mock()
scenario.create_volume_from_snapshot(
do_delete=False,
create_snapshot_kwargs=create_snapshot_args,
fakearg="f"
)
scenario._create_snapshot.assert_called_once_with(
"uuid", **create_snapshot_args)
scenario._create_volume.assert_called_once_with(
1, snapshot_id=fake_snapshot.id, fakearg="f")
self.assertFalse(scenario._delete_snapshot.called)
self.assertFalse(scenario._delete_volume.called)
| |
import math
import numpy as np
import ROOT
import parspec
class TemplateSource(object):
"""
Accumulate information for a source process to the spectrum. Note that by
convention, low and high refer to absolute values, whereas down and up
refer to differences relative to the central value.
"""
def __init__(self, name, data):
"""
Each source must have a unique name, and a distribution of data it
contributes to the spectrum.
:param name: str
unqiue name for this source
:param data: [float]
distribution contributing to spectrum
"""
self._name = name
self._data = np.array(data)
self._lumi = False
self._stat_errs = list()
self._xsec = tuple()
self._systematics = list()
self._templates = list()
def set_xsec(self, nominal, low=None, high=None):
"""
Assign a cross seciton parameter for this source. If the low and high
uncertainties aren't provided, the parameter won't be regularized.
:param nominal: float
nominal cross section value
:param low: float
cross section value below nominal which causes 1-sigma penalty
:param high: float
cross section value above nominal which causes 1-sigma penalty
"""
self._xsec = (
math.log(nominal),
math.log(low),
math.log(high))
def use_lumi(self):
"""
Allow this source to change with luminosity.
"""
self._lumi = True
def use_stats(self, errs):
"""
The data values in this source are subject to statistical uncertainties
(i.e. the source is simulated with MC).
:param err: [float]
statistical error on each bin
"""
self._stat_errs = list(errs)
def add_syst(self, name, data, stats=None, polarity=None):
"""
Add a systematic variation to this source. This adds a parameter to
the spectrum (or re-uses the parameter if the systematic name has been
introduced in another source). This parameter is regularized such that
the loglikelihood is halved when it reaches +/- 1.
Note: data is given as differences from nominal.
:param name: str
name of the systematic parameter (can be shared with other sources)
:param data: [float]
difference of the nominal spectrum when the systematic parmeter
takes on a value of +/- 1 sigma (depends on polarity)
:param stats: [float]
statistical uncertainty on the *difference* of each bin under the
influence of the systematic shape
:param polarity: {'up', 'down'}
this shape applies only if the systematic parameter is positive
for 'up', or negative for 'down'
"""
if polarity is not None and polarity not in ['up', 'down']:
raise ValueError("Unrecognized polarity %s" % polarity)
data = np.array(data)
if polarity == 'down':
data *= -1
self._systematics.append((name, data, polarity, stats))
def add_template(self, expr, data, pars=None, grads=None):
"""
Add a template variation to this source. This adds parameters to the
spectrum (or re-uses them if they are present in other sources). The
parameters are not regularized, they are allowed to float.
Note: data is given as differences from nominal.
:param expr: str
C++ expression which yields the normalization for the template
:param data: [float]
difference of the nominal spectrum when the template expression
evaluates to 1
:param pars: [str]
names of parameters used in the expression
:param grads: [str]
C++ expression which yields the dexpr/dpar for each parameter
"""
data = np.array(data)
self._templates.append((expr, data, pars, grads))
class TemplateMeasurement(object):
"""
Accumulate information to build a templated spectrum.
"""
def __init__(self, name='Templates'):
"""
Build a templated spectrum with the given name. The name should be
unique in a session and work directory.
:param name: str
name of parametrized spectrum to generate
"""
self._name = name
self._sources = dict() # mapping source names to TemplateSource
self._lumi = tuple() # nominal, low, high prior for luminosity
self.spec = None # ParSpec object
def new_source(self, name, data):
"""
Add a new source to the measurement, and return its object to configure.
:param name: str
TemplateSource name
:param data: [float]
TemplateSource data
"""
if name in self._sources:
raise RuntimeError("Source name already used: %s" % name)
source = TemplateSource(name, data)
self._sources[name] = source
return source
def set_lumi(self, nominal, err):
"""
Set the prior information for the luminosity parameter.
:param nominal: float
nominal luminosity value
:param err: float
+/- uncertainty on luminosity
"""
self._lumi = (
math.log(nominal),
math.log(nominal/(nominal+err)),
math.log(nominal*(nominal+err)))
def prepare(self):
"""
Prepare and return the builder for the spectrum.
"""
builder = parspec.SpecBuilder(self._name)
uses_lumi = False # determine if lumi is needed from sources
for temp_src in self._sources.values():
par_src = parspec.Source(temp_src._data)
# Remember if any source is subject to lumi uncertainty
uses_lumi |= temp_src._lumi
xsec_name = 'xsec_%s' % temp_src._name
if temp_src._lumi and temp_src._xsec:
# Factor is product of lumi and cross section, need to give
# parameter names and derivatives
par_src.set_expression(
'std::exp(lumi+%s)' % xsec_name,
['lumi', xsec_name],
['std::exp(lumi+%s)'%xsec_name]*2)
elif temp_src._lumi:
# Factor is just luminosity, derivative is inferred
par_src.set_expression(
'std::exp(lumi)',
['lumi'],
['std::exp(lumi)'])
elif temp_src._xsec:
# Factor is just xsec, derivative is inferred
par_src.set_expression(
'std::exp(%s)'%xsec_name,
[xsec_name],
['std::exp(%s)'%xsec_name])
if temp_src._stat_errs:
par_src.use_stats(temp_src._stat_errs)
builder.add_source(par_src)
# Add sources for systematic shifts
for syst in temp_src._systematics:
syst_name = 'syst_%s' % syst[0]
src = parspec.Source(syst[1], shapeof=par_src)
if syst[3] is not None:
src.use_stats(syst[3])
src.set_expression(syst_name, polarity=syst[2])
builder.add_source(src)
# Add sources for templates
for temp in temp_src._templates:
src = parspec.Source(temp[1], shapeof=par_src)
src.set_expression(temp[0], pars=temp[2], grads=temp[3])
builder.add_source(src)
# Add regularization for x-section if provided
if temp_src._xsec:
builder.set_prior(
xsec_name,
*temp_src._xsec,
#constraint='lognormal')
constraint='normal')
# Add regularization for systematics (overwrite pervious if the
# same name is in another source, but doesn't matter)
for syst in temp_src._systematics:
syst_name = 'syst_%s' % syst[0]
builder.set_prior(
syst_name,
0, -1, 1,
constraint='normal')
if uses_lumi:
if not self._lumi:
raise RuntimeError("No luminosity uncertainty set")
builder.set_prior(
'lumi',
*self._lumi,
#constraint='lognormal')
constraint='normal')
return builder
def build(self, builder=None):
"""
Build the ParSpec object for this templated measurement.
"""
if builder is None:
builder = self.prepare()
self.spec = builder.build()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.