gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
from official import models, video_level_models
import tensorflow as tf
from official import model_utils as utils
import tensorflow.contrib.slim as slim
from tensorflow import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 30,
"Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
"sample_random_frames", True,
"If true samples random frames (for frame level models). If false, a random"
"sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 8192,
"Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 1024,
"Number of units in the DBoF hidden layer.")
flags.DEFINE_string("dbof_pooling_method", "max",
"The pooling method used in the DBoF cluster layer. "
"Choices are 'average' and 'max'.")
flags.DEFINE_string("video_level_classifier_model", "MoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_integer("lstm_cells", 1024, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")
class FrameLevelLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
class DbofModel(models.BaseModel):
"""Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)
hidden1_weights = tf.get_variable("hidden1_weights",
[cluster_size, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(activation, hidden1_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class LstmModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
## Batch normalize the input
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0, state_is_tuple=False)
for _ in range(number_of_layers)
])
loss = 0.0
with tf.variable_scope("RNN"):
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state,
vocab_size=vocab_size,
**unused_params)
| |
# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.utils
import urllib
from boto.connection import AWSQueryConnection
from boto.rds.dbinstance import DBInstance
from boto.rds.dbsecuritygroup import DBSecurityGroup
from boto.rds.parametergroup import ParameterGroup
from boto.rds.dbsnapshot import DBSnapshot
from boto.rds.event import Event
from boto.rds.regioninfo import RDSRegionInfo
def regions():
"""
Get all available regions for the RDS service.
:rtype: list
:return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo`
"""
return [RDSRegionInfo(name='us-east-1',
endpoint='rds.us-east-1.amazonaws.com'),
RDSRegionInfo(name='eu-west-1',
endpoint='rds.eu-west-1.amazonaws.com'),
RDSRegionInfo(name='us-west-1',
endpoint='rds.us-west-1.amazonaws.com'),
RDSRegionInfo(name='us-west-2',
endpoint='rds.us-west-2.amazonaws.com'),
RDSRegionInfo(name='sa-east-1',
endpoint='rds.sa-east-1.amazonaws.com'),
RDSRegionInfo(name='ap-northeast-1',
endpoint='rds.ap-northeast-1.amazonaws.com'),
RDSRegionInfo(name='ap-southeast-1',
endpoint='rds.ap-southeast-1.amazonaws.com')
]
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.connection.EC2Connection`.
Any additional parameters after the region_name are passed on to
the connect method of the region object.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.connection.EC2Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
#boto.set_stream_logger('rds')
class RDSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'rds.us-east-1.amazonaws.com'
APIVersion = '2011-04-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/'):
if not region:
region = RDSRegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
def _required_auth_capability(self):
return ['rds']
# DB Instance methods
def get_all_dbinstances(self, instance_id=None, max_records=None,
marker=None):
"""
Retrieve all the DBInstances in your account.
:type instance_id: str
:param instance_id: DB Instance identifier. If supplied, only
information this instance will be returned.
Otherwise, info about all DB Instances will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbinstance.DBInstance`
"""
params = {}
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBInstances', params,
[('DBInstance', DBInstance)])
def create_dbinstance(self, id, allocated_storage, instance_class,
master_username, master_password, port=3306,
engine='MySQL5.1', db_name=None, param_group=None,
security_groups=None, availability_zone=None,
preferred_maintenance_window=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
engine_version=None,
auto_minor_version_upgrade=True):
"""
Create a new DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type allocated_storage: int
:param allocated_storage: Initially allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of
the DBInstance. Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type engine: str
:param engine: Name of database engine. Must be MySQL5.1 for now.
:type master_username: str
:param master_username: Name of master user for the DBInstance.
Must be 1-15 alphanumeric characters, first
must be a letter.
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-16 alphanumeric characters.
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type db_name: str
:param db_name: Name of a database to create when the DBInstance
is created. Default is to create no databases.
:type param_group: str
:param param_group: Name of DBParameterGroup to associate with
this DBInstance. If no groups are specified
no parameter groups will be used.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC)
during which maintenance can occur.
Default is Sun:05:00-Sun:09:00
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:type engine_version: str
:param engine_version: Version number of the database engine to use.
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
automatically to the Read Replica
during the maintenance window.
Default is True.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
params = {'DBInstanceIdentifier': id,
'AllocatedStorage': allocated_storage,
'DBInstanceClass': instance_class,
'Engine': engine,
'MasterUsername': master_username,
'MasterUserPassword': master_password,
'Port': port,
'MultiAZ': str(multi_az).lower(),
'AutoMinorVersionUpgrade':
str(auto_minor_version_upgrade).lower()}
if db_name:
params['DBName'] = db_name
if param_group:
params['DBParameterGroupName'] = param_group
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if availability_zone:
params['AvailabilityZone'] = availability_zone
if preferred_maintenance_window:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
if engine_version:
params['EngineVersion'] = engine_version
return self.get_object('CreateDBInstance', params, DBInstance)
def create_dbinstance_read_replica(self, id, source_id,
instance_class=None,
port=3306,
availability_zone=None,
auto_minor_version_upgrade=None):
"""
Create a new DBInstance Read Replica.
:type id: str
:param id: Unique identifier for the new instance.
Must contain 1-63 alphanumeric characters.
First character must be a letter.
May not end with a hyphen or contain two consecutive hyphens
:type source_id: str
:param source_id: Unique identifier for the DB Instance for which this
DB Instance will act as a Read Replica.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Default is to inherit from
the source DB Instance.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Default is to inherit from source DB Instance.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:type auto_minor_version_upgrade: bool
:param auto_minor_version_upgrade: Indicates that minor engine
upgrades will be applied
automatically to the Read Replica
during the maintenance window.
Default is to inherit this value
from the source DB Instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The new db instance.
"""
params = {'DBInstanceIdentifier' : id,
'SourceDBInstanceIdentifier' : source_id}
if instance_class:
params['DBInstanceClass'] = instance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
if auto_minor_version_upgrade is not None:
if auto_minor_version_upgrade is True:
params['AutoMinorVersionUpgrade'] = 'true'
else:
params['AutoMinorVersionUpgrade'] = 'false'
return self.get_object('CreateDBInstanceReadReplica',
params, DBInstance)
def modify_dbinstance(self, id, param_group=None, security_groups=None,
preferred_maintenance_window=None,
master_password=None, allocated_storage=None,
instance_class=None,
backup_retention_period=None,
preferred_backup_window=None,
multi_az=False,
apply_immediately=False):
"""
Modify an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type security_groups: list of str or list of DBSecurityGroup objects
:param security_groups: List of names of DBSecurityGroup to authorize on
this DBInstance.
:type preferred_maintenance_window: str
:param preferred_maintenance_window: The weekly time range (in UTC)
during which maintenance can
occur.
Default is Sun:05:00-Sun:09:00
:type master_password: str
:param master_password: Password of master user for the DBInstance.
Must be 4-15 alphanumeric characters.
:type allocated_storage: int
:param allocated_storage: The new allocated storage size, in GBs.
Valid values are [5-1024]
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Changes will be applied at
next maintenance window unless
apply_immediately is True.
Valid values are:
* db.m1.small
* db.m1.large
* db.m1.xlarge
* db.m2.xlarge
* db.m2.2xlarge
* db.m2.4xlarge
:type apply_immediately: bool
:param apply_immediately: If true, the modifications will be applied
as soon as possible rather than waiting for
the next preferred maintenance window.
:type backup_retention_period: int
:param backup_retention_period: The number of days for which automated
backups are retained. Setting this to
zero disables automated backups.
:type preferred_backup_window: str
:param preferred_backup_window: The daily time range during which
automated backups are created (if
enabled). Must be in h24:mi-hh24:mi
format (UTC).
:type multi_az: bool
:param multi_az: If True, specifies the DB Instance will be
deployed in multiple availability zones.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The modified db instance.
"""
params = {'DBInstanceIdentifier' : id}
if param_group:
params['DBParameterGroupName'] = param_group
if security_groups:
l = []
for group in security_groups:
if isinstance(group, DBSecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'DBSecurityGroups.member')
if preferred_maintenance_window:
params['PreferredMaintenanceWindow'] = preferred_maintenance_window
if master_password:
params['MasterUserPassword'] = master_password
if allocated_storage:
params['AllocatedStorage'] = allocated_storage
if instance_class:
params['DBInstanceClass'] = instance_class
if backup_retention_period is not None:
params['BackupRetentionPeriod'] = backup_retention_period
if preferred_backup_window:
params['PreferredBackupWindow'] = preferred_backup_window
if multi_az:
params['MultiAZ'] = 'true'
if apply_immediately:
params['ApplyImmediately'] = 'true'
return self.get_object('ModifyDBInstance', params, DBInstance)
def delete_dbinstance(self, id, skip_final_snapshot=False,
final_snapshot_id=''):
"""
Delete an existing DBInstance.
:type id: str
:param id: Unique identifier for the new instance.
:type skip_final_snapshot: bool
:param skip_final_snapshot: This parameter determines whether a final
db snapshot is created before the instance
is deleted. If True, no snapshot
is created. If False, a snapshot
is created before deleting the instance.
:type final_snapshot_id: str
:param final_snapshot_id: If a final snapshot is requested, this
is the identifier used for that snapshot.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The deleted db instance.
"""
params = {'DBInstanceIdentifier' : id}
if skip_final_snapshot:
params['SkipFinalSnapshot'] = 'true'
else:
params['SkipFinalSnapshot'] = 'false'
params['FinalDBSnapshotIdentifier'] = final_snapshot_id
return self.get_object('DeleteDBInstance', params, DBInstance)
def reboot_dbinstance(self, id):
"""
Reboot DBInstance.
:type id: str
:param id: Unique identifier of the instance.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The rebooting db instance.
"""
params = {'DBInstanceIdentifier' : id}
return self.get_object('RebootDBInstance', params, DBInstance)
# DBParameterGroup methods
def get_all_dbparameter_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all parameter groups associated with your account in a region.
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
If not provided, all DBParameter groups will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
"""
params = {}
if groupname:
params['DBParameterGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBParameterGroups', params,
[('DBParameterGroup', ParameterGroup)])
def get_all_dbparameters(self, groupname, source=None,
max_records=None, marker=None):
"""
Get all parameters associated with a ParameterGroup
:type groupname: str
:param groupname: The name of the DBParameter group to retrieve.
:type source: str
:param source: Specifies which parameters to return.
If not specified, all parameters will be returned.
Valid values are: user|system|engine-default
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
:return: The ParameterGroup
"""
params = {'DBParameterGroupName' : groupname}
if source:
params['Source'] = source
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
pg = self.get_object('DescribeDBParameters', params, ParameterGroup)
pg.name = groupname
return pg
def create_parameter_group(self, name, engine='MySQL5.1', description=''):
"""
Create a new dbparameter group for your account.
:type name: string
:param name: The name of the new dbparameter group
:type engine: str
:param engine: Name of database engine.
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBParameterGroupName': name,
'DBParameterGroupFamily': engine,
'Description' : description}
return self.get_object('CreateDBParameterGroup', params, ParameterGroup)
def modify_parameter_group(self, name, parameters=None):
"""
Modify a parameter group for your account.
:type name: string
:param name: The name of the new parameter group
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The new parameters
:rtype: :class:`boto.rds.parametergroup.ParameterGroup`
:return: The newly created ParameterGroup
"""
params = {'DBParameterGroupName': name}
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_list('ModifyDBParameterGroup', params,
ParameterGroup, verb='POST')
def reset_parameter_group(self, name, reset_all_params=False,
parameters=None):
"""
Resets some or all of the parameters of a ParameterGroup to the
default value
:type key_name: string
:param key_name: The name of the ParameterGroup to reset
:type parameters: list of :class:`boto.rds.parametergroup.Parameter`
:param parameters: The parameters to reset. If not supplied,
all parameters will be reset.
"""
params = {'DBParameterGroupName':name}
if reset_all_params:
params['ResetAllParameters'] = 'true'
else:
params['ResetAllParameters'] = 'false'
for i in range(0, len(parameters)):
parameter = parameters[i]
parameter.merge(params, i+1)
return self.get_status('ResetDBParameterGroup', params)
def delete_parameter_group(self, name):
"""
Delete a DBSecurityGroup from your account.
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
params = {'DBParameterGroupName':name}
return self.get_status('DeleteDBParameterGroup', params)
# DBSecurityGroup methods
def get_all_dbsecurity_groups(self, groupname=None, max_records=None,
marker=None):
"""
Get all security groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of security groups to retrieve.
If not provided, all security groups will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
"""
params = {}
if groupname:
params['DBSecurityGroupName'] = groupname
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSecurityGroups', params,
[('DBSecurityGroup', DBSecurityGroup)])
def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName':name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params,
DBSecurityGroup)
group.name = name
group.description = description
return group
def delete_dbsecurity_group(self, name):
"""
Delete a DBSecurityGroup from your account.
:type key_name: string
:param key_name: The name of the DBSecurityGroup to delete
"""
params = {'DBSecurityGroupName':name}
return self.get_status('DeleteDBSecurityGroup', params)
def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
ec2_security_group_name=None,
ec2_security_group_owner_id=None):
"""
Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR a CIDR block but not both.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
you are granting access to.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security group you are granting
access to.
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = urllib.quote(cidr_ip)
return self.get_object('AuthorizeDBSecurityGroupIngress', params,
DBSecurityGroup)
def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None,
ec2_security_group_owner_id=None, cidr_ip=None):
"""
Remove an existing rule from an existing security group.
You need to pass in either ec2_security_group_name and
ec2_security_group_owner_id OR a CIDR block.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type ec2_security_group_name: string
:param ec2_security_group_name: The name of the EC2 security group
from which you are removing access.
:type ec2_security_group_owner_id: string
:param ec2_security_group_owner_id: The ID of the owner of the EC2
security from which you are
removing access.
:type cidr_ip: string
:param cidr_ip: The CIDR block from which you are removing access.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:rtype: bool
:return: True if successful.
"""
params = {'DBSecurityGroupName':group_name}
if ec2_security_group_name:
params['EC2SecurityGroupName'] = ec2_security_group_name
if ec2_security_group_owner_id:
params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
if cidr_ip:
params['CIDRIP'] = cidr_ip
return self.get_object('RevokeDBSecurityGroupIngress', params,
DBSecurityGroup)
# For backwards compatibility. This method was improperly named
# in previous versions. I have renamed it to match the others.
revoke_security_group = revoke_dbsecurity_group
# DBSnapshot methods
def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
max_records=None, marker=None):
"""
Get information about DB Snapshots.
:type snapshot_id: str
:param snapshot_id: The unique identifier of an RDS snapshot.
If not provided, all RDS snapshots will be returned.
:type instance_id: str
:param instance_id: The identifier of a DBInstance. If provided,
only the DBSnapshots related to that instance will
be returned.
If not provided, all RDS snapshots will be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot`
"""
params = {}
if snapshot_id:
params['DBSnapshotIdentifier'] = snapshot_id
if instance_id:
params['DBInstanceIdentifier'] = instance_id
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeDBSnapshots', params,
[('DBSnapshot', DBSnapshot)])
def create_dbsnapshot(self, snapshot_id, dbinstance_id):
"""
Create a new DB snapshot.
:type snapshot_id: string
:param snapshot_id: The identifier for the DBSnapshot
:type dbinstance_id: string
:param dbinstance_id: The source identifier for the RDS instance from
which the snapshot is created.
:rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
:return: The newly created DBSnapshot
"""
params = {'DBSnapshotIdentifier' : snapshot_id,
'DBInstanceIdentifier' : dbinstance_id}
return self.get_object('CreateDBSnapshot', params, DBSnapshot)
def delete_dbsnapshot(self, identifier):
"""
Delete a DBSnapshot
:type identifier: string
:param identifier: The identifier of the DBSnapshot to delete
"""
params = {'DBSnapshotIdentifier' : identifier}
return self.get_object('DeleteDBSnapshot', params, DBSnapshot)
def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
instance_class, port=None,
availability_zone=None):
"""
Create a new DBInstance from a DB snapshot.
:type identifier: string
:param identifier: The identifier for the DBSnapshot
:type instance_id: string
:param instance_id: The source identifier for the RDS instance from
which the snapshot is created.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'DBSnapshotIdentifier' : identifier,
'DBInstanceIdentifier' : instance_id,
'DBInstanceClass' : instance_class}
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceFromDBSnapshot',
params, DBInstance)
def restore_dbinstance_from_point_in_time(self, source_instance_id,
target_instance_id,
use_latest=False,
restore_time=None,
dbinstance_class=None,
port=None,
availability_zone=None):
"""
Create a new DBInstance from a point in time.
:type source_instance_id: string
:param source_instance_id: The identifier for the source DBInstance.
:type target_instance_id: string
:param target_instance_id: The identifier of the new DBInstance.
:type use_latest: bool
:param use_latest: If True, the latest snapshot availabile will
be used.
:type restore_time: datetime
:param restore_time: The date and time to restore from. Only
used if use_latest is False.
:type instance_class: str
:param instance_class: The compute and memory capacity of the
DBInstance. Valid values are:
db.m1.small | db.m1.large | db.m1.xlarge |
db.m2.2xlarge | db.m2.4xlarge
:type port: int
:param port: Port number on which database accepts connections.
Valid values [1115-65535]. Defaults to 3306.
:type availability_zone: str
:param availability_zone: Name of the availability zone to place
DBInstance into.
:rtype: :class:`boto.rds.dbinstance.DBInstance`
:return: The newly created DBInstance
"""
params = {'SourceDBInstanceIdentifier' : source_instance_id,
'TargetDBInstanceIdentifier' : target_instance_id}
if use_latest:
params['UseLatestRestorableTime'] = 'true'
elif restore_time:
params['RestoreTime'] = restore_time.isoformat()
if dbinstance_class:
params['DBInstanceClass'] = dbinstance_class
if port:
params['Port'] = port
if availability_zone:
params['AvailabilityZone'] = availability_zone
return self.get_object('RestoreDBInstanceToPointInTime',
params, DBInstance)
# Events
def get_all_events(self, source_identifier=None, source_type=None,
start_time=None, end_time=None,
max_records=None, marker=None):
"""
Get information about events related to your DBInstances,
DBSecurityGroups and DBParameterGroups.
:type source_identifier: str
:param source_identifier: If supplied, the events returned will be
limited to those that apply to the identified
source. The value of this parameter depends
on the value of source_type. If neither
parameter is specified, all events in the time
span will be returned.
:type source_type: str
:param source_type: Specifies how the source_identifier should
be interpreted. Valid values are:
b-instance | db-security-group |
db-parameter-group | db-snapshot
:type start_time: datetime
:param start_time: The beginning of the time interval for events.
If not supplied, all available events will
be returned.
:type end_time: datetime
:param end_time: The ending of the time interval for events.
If not supplied, all available events will
be returned.
:type max_records: int
:param max_records: The maximum number of records to be returned.
If more results are available, a MoreToken will
be returned in the response that can be used to
retrieve additional records. Default is 100.
:type marker: str
:param marker: The marker provided by a previous request.
:rtype: list
:return: A list of class:`boto.rds.event.Event`
"""
params = {}
if source_identifier and source_type:
params['SourceIdentifier'] = source_identifier
params['SourceType'] = source_type
if start_time:
params['StartTime'] = start_time.isoformat()
if end_time:
params['EndTime'] = end_time.isoformat()
if max_records:
params['MaxRecords'] = max_records
if marker:
params['Marker'] = marker
return self.get_list('DescribeEvents', params, [('Event', Event)])
| |
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
class StackAdvisor(object):
"""
Abstract class implemented by all stack advisors. Stack advisors advise on stack specific questions.
Currently stack advisors provide following abilities:
- Recommend where services should be installed in cluster
- Recommend configurations based on host hardware
- Validate user selection of where services are installed on cluster
- Validate user configuration values
Each of the above methods is passed in parameters about services and hosts involved as described below.
@type services: dictionary
@param services: Dictionary containing all information about services selected by the user.
Example: {
"services": [
{
"StackServices": {
"service_name" : "HDFS",
"service_version" : "2.6.0.2.2",
},
"components" : [
{
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "SLAVE",
"component_name" : "DATANODE",
"display_name" : "DataNode",
"service_name" : "HDFS",
"hostnames" : []
},
"dependencies" : []
}, {
"StackServiceComponents" : {
"cardinality" : "1-2",
"component_category" : "MASTER",
"component_name" : "NAMENODE",
"display_name" : "NameNode",
"service_name" : "HDFS",
"hostnames" : []
},
"dependencies" : []
},
...
]
},
...
]
}
@type hosts: dictionary
@param hosts: Dictionary containing all information about hosts in this cluster
Example: {
"items": [
{
Hosts: {
"host_name": "c6401.ambari.apache.org",
"public_host_name" : "c6401.ambari.apache.org",
"ip": "192.168.1.101",
"cpu_count" : 1,
"disk_info" : [
{
"available" : "4564632",
"used" : "5230344",
"percent" : "54%",
"size" : "10319160",
"type" : "ext4",
"mountpoint" : "/"
},
{
"available" : "1832436",
"used" : "0",
"percent" : "0%",
"size" : "1832436",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
],
"host_state" : "HEALTHY",
"os_arch" : "x86_64",
"os_type" : "centos6",
"total_mem" : 3664872
}
},
...
]
}
Each of the methods can either return recommendations or validations.
Recommendations are made in a Ambari Blueprints friendly format.
Validations are an array of validation objects.
"""
def recommendComponentLayout(self, services, hosts):
"""
Returns recommendation of which hosts various service components should be installed on.
This function takes as input all details about services being installed, and hosts
they are being installed into, to generate hostname assignments to various components
of each service.
@type services: dictionary
@param services: Dictionary containing all information about services selected by the user.
@type hosts: dictionary
@param hosts: Dictionary containing all information about hosts in this cluster
@rtype: dictionary
@return: Layout recommendation of service components on cluster hosts in Ambari Blueprints friendly format.
Example: {
"resources" : [
{
"hosts" : [
"c6402.ambari.apache.org",
"c6401.ambari.apache.org"
],
"services" : [
"HDFS"
],
"recommendations" : {
"blueprint" : {
"host_groups" : [
{
"name" : "host-group-2",
"components" : [
{ "name" : "JOURNALNODE" },
{ "name" : "ZKFC" },
{ "name" : "DATANODE" },
{ "name" : "SECONDARY_NAMENODE" }
]
},
{
"name" : "host-group-1",
"components" : [
{ "name" : "HDFS_CLIENT" },
{ "name" : "NAMENODE" },
{ "name" : "JOURNALNODE" },
{ "name" : "ZKFC" },
{ "name" : "DATANODE" }
]
}
]
},
"blueprint_cluster_binding" : {
"host_groups" : [
{
"name" : "host-group-1",
"hosts" : [ { "fqdn" : "c6401.ambari.apache.org" } ]
},
{
"name" : "host-group-2",
"hosts" : [ { "fqdn" : "c6402.ambari.apache.org" } ]
}
]
}
}
}
]
}
"""
pass
def validateComponentLayout(self, services, hosts):
"""
Returns array of Validation issues with service component layout on hosts
This function takes as input all details about services being installed along with
hosts the components are being installed on (hostnames property is populated for
each component).
@type services: dictionary
@param services: Dictionary containing information about services and host layout selected by the user.
@type hosts: dictionary
@param hosts: Dictionary containing all information about hosts in this cluster
@rtype: dictionary
@return: Dictionary containing array of validation items
Example: {
"items": [
{
"type" : "host-group",
"level" : "ERROR",
"message" : "NameNode and Secondary NameNode should not be hosted on the same machine",
"component-name" : "NAMENODE",
"host" : "c6401.ambari.apache.org"
},
...
]
}
"""
pass
def recommendConfigurations(self, services, hosts):
"""
Returns recommendation of service configurations based on host-specific layout of components.
This function takes as input all details about services being installed, and hosts
they are being installed into, to recommend host-specific configurations.
@type services: dictionary
@param services: Dictionary containing all information about services and component layout selected by the user.
@type hosts: dictionary
@param hosts: Dictionary containing all information about hosts in this cluster
@rtype: dictionary
@return: Layout recommendation of service components on cluster hosts in Ambari Blueprints friendly format.
Example: {
"services": [
"HIVE",
"TEZ",
"YARN"
],
"recommendations": {
"blueprint": {
"host_groups": [],
"configurations": {
"yarn-site": {
"properties": {
"yarn.scheduler.minimum-allocation-mb": "682",
"yarn.scheduler.maximum-allocation-mb": "2048",
"yarn.nodemanager.resource.memory-mb": "2048"
}
},
"tez-site": {
"properties": {
"tez.am.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC",
"tez.am.resource.memory.mb": "682"
}
},
"hive-site": {
"properties": {
"hive.tez.container.size": "682",
"hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC",
"hive.auto.convert.join.noconditionaltask.size": "238026752"
}
}
}
},
"blueprint_cluster_binding": {
"host_groups": []
}
},
"hosts": [
"c6401.ambari.apache.org",
"c6402.ambari.apache.org",
"c6403.ambari.apache.org"
]
}
"""
pass
def validateConfigurations(self, services, hosts):
""""
Returns array of Validation issues with configurations provided by user
This function takes as input all details about services being installed along with
configuration values entered by the user. These configurations can be validated against
service requirements, or host hardware to generate validation issues.
@type services: dictionary
@param services: Dictionary containing information about services and user configurations.
@type hosts: dictionary
@param hosts: Dictionary containing all information about hosts in this cluster
@rtype: dictionary
@return: Dictionary containing array of validation items
Example: {
"items": [
{
"config-type": "yarn-site",
"message": "Value is less than the recommended default of 682",
"type": "configuration",
"config-name": "yarn.scheduler.minimum-allocation-mb",
"level": "WARN"
}
]
}
"""
pass
class DefaultStackAdvisor(StackAdvisor):
"""
Default stack advisor implementation.
This implementation is used when a stack-version, or its hierarchy does not
have an advisor. Stack-versions can extend this class to provide their own
implement
"""
def recommendComponentLayout(self, services, hosts):
"""Returns Services object with hostnames array populated for components"""
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
layoutRecommendations = self.createComponentLayoutRecommendations(services, hosts)
recommendations = {
"Versions": {"stack_name": stackName, "stack_version": stackVersion},
"hosts": hostsList,
"services": servicesList,
"recommendations": layoutRecommendations
}
return recommendations
def createComponentLayoutRecommendations(self, services, hosts):
recommendations = {
"blueprint": {
"host_groups": [ ]
},
"blueprint_cluster_binding": {
"host_groups": [ ]
}
}
hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
hostsComponentsMap = {}
for hostName in hostsList:
if hostName not in hostsComponentsMap:
hostsComponentsMap[hostName] = []
#extend 'hostsComponentsMap' with MASTER components
for service in services["services"]:
masterComponents = [component for component in service["components"] if self.isMasterComponent(component)]
for component in masterComponents:
componentName = component["StackServiceComponents"]["component_name"]
if self.isComponentHostsPopulated(component):
hostsForComponent = component["StackServiceComponents"]["hostnames"]
else:
availableHosts = hostsList
if len(hostsList) > 1 and self.isComponentNotPreferableOnAmbariServerHost(component):
availableHosts = [hostName for hostName in hostsList if not self.isLocalHost(hostName)]
if self.isMasterComponentWithMultipleInstances(component):
hostsCount = self.getMinComponentCount(component, hosts)
if hostsCount > 1: # get first 'hostsCount' available hosts
if len(availableHosts) < hostsCount:
hostsCount = len(availableHosts)
hostsForComponent = availableHosts[:hostsCount]
else:
hostsForComponent = [self.getHostForComponent(component, availableHosts)]
else:
hostsForComponent = [self.getHostForComponent(component, availableHosts)]
#extend 'hostsComponentsMap' with 'hostsForComponent'
for hostName in hostsForComponent:
hostsComponentsMap[hostName].append( { "name":componentName } )
#extend 'hostsComponentsMap' with Slave and Client Components
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
utilizedHosts = [item for sublist in usedHostsListList for item in sublist]
freeHosts = [hostName for hostName in hostsList if hostName not in utilizedHosts]
for service in services["services"]:
slaveClientComponents = [component for component in service["components"]
if self.isSlaveComponent(component) or self.isClientComponent(component)]
for component in slaveClientComponents:
componentName = component["StackServiceComponents"]["component_name"]
if self.isComponentHostsPopulated(component):
hostsForComponent = component["StackServiceComponents"]["hostnames"]
elif component["StackServiceComponents"]["cardinality"] == "ALL":
hostsForComponent = hostsList
else:
if len(freeHosts) == 0:
hostsForComponent = hostsList[-1:]
else: # len(freeHosts) >= 1
hostsForComponent = freeHosts
if self.isClientComponent(component):
hostsForComponent = freeHosts[0:1]
#extend 'hostsComponentsMap' with 'hostsForComponent'
for hostName in hostsForComponent:
if hostName not in hostsComponentsMap:
hostsComponentsMap[hostName] = []
hostsComponentsMap[hostName].append( { "name": componentName } )
#prepare 'host-group's from 'hostsComponentsMap'
host_groups = recommendations["blueprint"]["host_groups"]
bindings = recommendations["blueprint_cluster_binding"]["host_groups"]
index = 0
for key in hostsComponentsMap.keys():
index += 1
host_group_name = "host-group-{0}".format(index)
host_groups.append( { "name": host_group_name, "components": hostsComponentsMap[key] } )
bindings.append( { "name": host_group_name, "hosts": [{ "fqdn": socket.getfqdn(key) }] } )
return recommendations
pass
def createValidationResponse(self, services, validationItems):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
validations = {
"Versions": {"stack_name": stackName, "stack_version": stackVersion},
"items": validationItems
}
return validations
def validateComponentLayout(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
validationItems = self.getComponentLayoutValidations(services, hosts)
return self.createValidationResponse(services, validationItems)
def validateConfigurations(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
validationItems = self.getConfigurationsValidationItems(services, hosts)
return self.createValidationResponse(services, validationItems)
def getComponentLayoutValidations(self, services, hosts):
return []
def getConfigurationClusterSummary(self, servicesList, hosts, components):
pass
def getConfigurationsValidationItems(self, services, hosts):
return []
def recommendConfigurations(self, services, hosts):
stackName = services["Versions"]["stack_name"]
stackVersion = services["Versions"]["stack_version"]
hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
components = [component["StackServiceComponents"]["component_name"]
for service in services["services"]
for component in service["components"]]
clusterSummary = self.getConfigurationClusterSummary(servicesList, hosts, components)
recommendations = {
"Versions": {"stack_name": stackName, "stack_version": stackVersion},
"hosts": hostsList,
"services": servicesList,
"recommendations": {
"blueprint": {
"configurations": {},
"host_groups": []
},
"blueprint_cluster_binding": {
"host_groups": []
}
}
}
configurations = recommendations["recommendations"]["blueprint"]["configurations"]
for service in servicesList:
calculation = self.getServiceConfigurationRecommender(service)
if calculation is not None:
calculation(configurations, clusterSummary)
return recommendations
def getServiceConfigurationRecommender(self, service):
return self.getServiceConfigurationRecommenderDict().get(service, None)
def getServiceConfigurationRecommenderDict(self):
return {}
# Recommendation helper methods
def isComponentHostsPopulated(self, component):
hostnames = self.getComponentAttribute(component, "hostnames")
if hostnames is not None:
return len(hostnames) > 0
return False
def isClientComponent(self, component):
return self.getComponentAttribute(component, "component_category") == 'CLIENT'
def isSlaveComponent(self, component):
return self.getComponentAttribute(component, "component_category") == 'SLAVE'
def isMasterComponent(self, component):
return self.getComponentAttribute(component, "is_master")
def getComponentAttribute(self, component, attribute):
serviceComponent = component.get("StackServiceComponents", None)
if serviceComponent is None:
return None
return serviceComponent.get(attribute, None)
def isLocalHost(self, hostName):
return socket.getfqdn(hostName) == socket.getfqdn()
def isMasterComponentWithMultipleInstances(self, component):
componentName = self.getComponentName(component)
masters = self.getMastersWithMultipleInstances()
return componentName in masters
def isComponentNotValuable(self, component):
componentName = self.getComponentName(component)
service = self.getNotValuableComponents()
return componentName in service
def getMinComponentCount(self, component, hosts):
componentName = self.getComponentName(component)
return self.getComponentCardinality(componentName, hosts)["min"]
# Helper dictionaries
def getComponentCardinality(self, componentName, hosts):
return self.getCardinalitiesDict(hosts).get(componentName, {"min": 1, "max": 1})
def getHostForComponent(self, component, hostsList):
componentName = self.getComponentName(component)
if len(hostsList) != 1:
scheme = self.getComponentLayoutScheme(componentName)
if scheme is not None:
for key in scheme.keys():
if isinstance(key, ( int, long )):
if len(hostsList) < key:
return hostsList[scheme[key]]
return hostsList[scheme['else']]
return hostsList[0]
def getComponentLayoutScheme(self, componentName):
"""
Provides a scheme for laying out given component on different number of hosts.
"""
return self.getComponentLayoutSchemes().get(componentName, None)
def getComponentName(self, component):
return self.getComponentAttribute(component, "component_name")
def isComponentNotPreferableOnAmbariServerHost(self, component):
componentName = self.getComponentName(component)
service = self.getNotPreferableOnServerComponents()
return componentName in service
def getMastersWithMultipleInstances(self):
return []
def getNotValuableComponents(self):
return []
def getNotPreferableOnServerComponents(self):
return []
def getCardinalitiesDict(self, hosts):
return {}
def getComponentLayoutSchemes(self):
"""
Provides layout scheme dictionaries for components.
The scheme dictionary basically maps the number of hosts to
host index where component should exist.
"""
return {}
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core.backends.chrome import misc_web_contents_backend
from telemetry.core.forwarders import cros_forwarder
class CrOSBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
def __init__(self, cros_platform_backend, browser_options, cri, is_guest,
extensions_to_load):
super(CrOSBrowserBackend, self).__init__(
cros_platform_backend, supports_tab_control=True,
supports_extensions=not is_guest,
browser_options=browser_options,
output_profile_path=None, extensions_to_load=extensions_to_load)
assert browser_options.IsCrosBrowserOptions()
# Initialize fields so that an explosion during init doesn't break in Close.
self._cri = cri
self._is_guest = is_guest
self._forwarder = None
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(self.wpr_port_pairs.http.local_port,
self.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
https=forwarders.PortPair(self.wpr_port_pairs.https.local_port,
self.GetRemotePort(
self.wpr_port_pairs.http.local_port)),
dns=None)
self._remote_debugging_port = self._cri.GetRemotePort()
self._port = self._remote_debugging_port
# Copy extensions to temp directories on the device.
# Note that we also perform this copy locally to ensure that
# the owner of the extensions is set to chronos.
for e in extensions_to_load:
extension_dir = cri.RunCmdOnDevice(
['mktemp', '-d', '/tmp/extension_XXXXX'])[0].rstrip()
cri.PushFile(e.path, extension_dir)
cri.Chown(extension_dir)
e.local_path = os.path.join(extension_dir, os.path.basename(e.path))
self._cri.RestartUI(self.browser_options.clear_enterprise_policy)
util.WaitFor(self.IsBrowserRunning, 20)
# Delete test user's cryptohome vault (user data directory).
if not self.browser_options.dont_override_profile:
self._cri.RunCmdOnDevice(['cryptohome', '--action=remove', '--force',
'--user=%s' % self._username])
if self.browser_options.profile_dir:
cri.RmRF(self.profile_directory)
cri.PushFile(self.browser_options.profile_dir + '/Default',
self.profile_directory)
cri.Chown(self.profile_directory)
def GetBrowserStartupArgs(self):
args = super(CrOSBrowserBackend, self).GetBrowserStartupArgs()
args.extend([
'--enable-smooth-scrolling',
'--enable-threaded-compositing',
'--enable-per-tile-painting',
# Allow devtools to connect to chrome.
'--remote-debugging-port=%i' % self._remote_debugging_port,
# Open a maximized window.
'--start-maximized',
# Skip user image selection screen, and post login screens.
'--oobe-skip-postlogin',
# Debug logging.
'--vmodule=*/chromeos/net/*=2,*/chromeos/login/*=2'])
# Disable GAIA services unless we're using GAIA login, or if there's an
# explicit request for it.
if (self.browser_options.disable_gaia_services and
not self.browser_options.gaia_login):
args.append('--disable-gaia-services')
return args
@property
def pid(self):
return self._cri.GetChromePid()
@property
def browser_directory(self):
result = self._cri.GetChromeProcess()
if result and 'path' in result:
return os.path.dirname(result['path'])
return None
@property
def profile_directory(self):
return '/home/chronos/Default'
def GetRemotePort(self, port):
if self._cri.local:
return port
return self._cri.GetRemotePort()
def __del__(self):
self.Close()
def Start(self):
# Escape all commas in the startup arguments we pass to Chrome
# because dbus-send delimits array elements by commas
startup_args = [a.replace(',', '\\,') for a in self.GetBrowserStartupArgs()]
# Restart Chrome with the login extension and remote debugging.
logging.info('Restarting Chrome with flags and login')
args = ['dbus-send', '--system', '--type=method_call',
'--dest=org.chromium.SessionManager',
'/org/chromium/SessionManager',
'org.chromium.SessionManagerInterface.EnableChromeTesting',
'boolean:true',
'array:string:"%s"' % ','.join(startup_args)]
self._cri.RunCmdOnDevice(args)
if not self._cri.local:
self._port = util.GetUnreservedAvailableLocalPort()
self._forwarder = self.forwarder_factory.Create(
forwarders.PortPairs(
http=forwarders.PortPair(self._port, self._remote_debugging_port),
https=None,
dns=None), use_remote_port_forwarding=False)
# Wait for oobe.
self._WaitForBrowserToComeUp(wait_for_extensions=False)
util.WaitFor(lambda: self.oobe_exists, 10)
if self.browser_options.auto_login:
try:
if self._is_guest:
pid = self.pid
self.oobe.NavigateGuestLogin()
# Guest browsing shuts down the current browser and launches an
# incognito browser in a separate process, which we need to wait for.
util.WaitFor(lambda: pid != self.pid, 10)
elif self.browser_options.gaia_login:
self.oobe.NavigateGaiaLogin(self._username, self._password)
else:
self.oobe.NavigateFakeLogin(self._username, self._password)
self._WaitForLogin()
except util.TimeoutException:
self._cri.TakeScreenShot('login-screen')
raise exceptions.LoginException('Timed out going through login screen')
logging.info('Browser is up!')
def Close(self):
super(CrOSBrowserBackend, self).Close()
if self._cri:
self._cri.RestartUI(False) # Logs out.
self._cri.CloseConnection()
util.WaitFor(lambda: not self._IsCryptohomeMounted(), 180)
if self._forwarder:
self._forwarder.Close()
self._forwarder = None
if self._cri:
for e in self._extensions_to_load:
self._cri.RmRF(os.path.dirname(e.local_path))
self._cri = None
@property
@decorators.Cache
def forwarder_factory(self):
return cros_forwarder.CrOsForwarderFactory(self._cri)
def IsBrowserRunning(self):
return bool(self.pid)
def GetStandardOutput(self):
return 'Cannot get standard output on CrOS'
def GetStackTrace(self):
return 'Cannot get stack trace on CrOS'
@property
@decorators.Cache
def misc_web_contents_backend(self):
"""Access to chrome://oobe/login page."""
return misc_web_contents_backend.MiscWebContentsBackend(self)
@property
def oobe(self):
return self.misc_web_contents_backend.GetOobe()
@property
def oobe_exists(self):
return self.misc_web_contents_backend.oobe_exists
@property
def _username(self):
return self.browser_options.username
@property
def _password(self):
return self.browser_options.password
def _IsCryptohomeMounted(self):
username = '$guest' if self._is_guest else self._username
return self._cri.IsCryptohomeMounted(username, self._is_guest)
def _IsLoggedIn(self):
"""Returns True if cryptohome has mounted, the browser is
responsive to devtools requests, and the oobe has been dismissed."""
return (self._IsCryptohomeMounted() and
self.HasBrowserFinishedLaunching() and
not self.oobe_exists)
def _WaitForLogin(self):
# Wait for cryptohome to mount.
util.WaitFor(self._IsLoggedIn, 60)
# Wait for extensions to load.
self._WaitForBrowserToComeUp()
| |
from gensim.models import Word2Vec
import re
from flask import jsonify, make_response
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.restful import Resource, reqparse
from flask.ext.restful.representations.json import output_json
import json
import ast # de-string incoming list
import itertools
import io
import sys
PETRglobals_WriteActorRoot = True
PETRglobals_ActorDict = {}
class DictionaryLookupAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('syns', type=unicode, location='json')
self.ActorDict = self.read_actor_dictionary("/app/resources/Phoenix.Countries.actors.txt")
super(DictionaryLookupAPI, self).__init__()
def get(self):
return """This service takes in the output of /get_synonyms as input and returns possible matches from the OEDA event data dictionaries."""
def post(self):
args = self.reqparse.parse_args()
print args
x = args['syns']
syns = ast.literal_eval(x)
matches = self.syns_to_dict(syns)
return matches
def syns_to_dict(self, syns):
matches = []
for s in syns[0:10]:
syn_split = s.split("_")
o = self.recurse(PETRglobals_ActorDict, syn_split)
if o:
matches.append(o)
return matches
# This is done in a different endpoint. word2vec is so big that I don't want to load it twice.
# Instead, for now, users will have to pass in the output of self.get_syns
#def syns_to_dict(self, word):
# if not isinstance(word, list):
# word = [word]
# syns = self.get_syns(word)
# print syns
# matches = []
# for s in syns[0:10]:
# syn_split = s.split("_")
# o = self.recurse(PETRglobals_ActorDict, syn_split)
# if o:
# matches.append(o)
# # what happens if there are no matches?
# # we try the last word alone
# if not matches and len(word) == 1:
# split_word = word[0].split("_")
# if len(split_word) > 1:
# sw = [split_word[-1]]
# syns = self.get_syns(sw)
# print syns
# for s in syns[0:10]:
# syn_split = s.split("_")
# o = recurse(PETRglobals_ActorDict, syn_split)
# if o:
# matches.append(o)
# return matches
def recurse(self, actor_dict, terms):
try:
new_dict = actor_dict[terms[0]]
terms = terms[1:]
nd = []
try:
nd = new_dict["#"]
return nd
except KeyError:
nd = self.recurse(new_dict, terms)
return nd
except KeyError:
pass
except IndexError:
pass # IndexError: list index out of range: new_dict = actor_dict[terms[0]]
# all code below is (lightly modified and) taken from https://github.com/openeventdata/petrarch2/
def open_FIN(self, filename, descrstr):
# opens the global input stream fin using filename;
# descrstr provides information about the file in the event it isn't found
global FIN
global FINline, FINnline, CurrentFINname
try:
FIN = io.open(filename, 'r', encoding='utf-8')
CurrentFINname = filename
FINnline = 0
except IOError:
print("\aError: Could not find the", descrstr, "file:", filename)
print("Terminating program")
sys.exit()
def close_FIN(self):
# closes the global input stream fin.
# IOError should only happen during debugging or if something has seriously gone wrong
# with the system, so exit if this occurs.
global FIN
try:
FIN.close()
except IOError:
print("\aError: Could not close the input file")
print("Terminating program")
sys.exit()
def read_FIN_line(self):
global FIN
global FINline, FINnline
line = FIN.readline()
FINnline += 1
while True:
# print '==',line,
if len(line) == 0:
break # calling function needs to handle EOF
# deal with simple lines we need to skip
if line[0] == '#' or line[0] == '\n' or line[
0:2] == '<!' or len(line.strip()) == 0:
line = FIN.readline()
FINnline += 1
continue
if not line: # handle EOF
print("EOF hit in read_FIN_line()")
raise EOFError
return line
if ('#' in line):
line = line[:line.find('#')]
if ('<!--' in line):
if ('-->' in line): # just remove the substring
pline = line.partition('<!--')
line = pline[0] + pline[2][pline[2].find('-->') + 3:]
else:
while ('-->' not in line):
line = FIN.readline()
FINnline += 1
line = FIN.readline()
FINnline += 1
if len(line.strip()) > 0:
break
line = FIN.readline()
FINnline += 1
# print "++",line
FINline = line
return line
def read_actor_dictionary(self, actorfile):
""" This is a simple dictionary of dictionaries indexed on the words in the actor string. The final node has the
key '#' and contains codes with their date restrictions and, optionally, the root phrase in the case
of synonyms.
Example:
UFFE_ELLEMANN_JENSEN_ [IGOEUREEC 820701-821231][IGOEUREEC 870701-871231] # president of the CoEU from DENMARK# IGOrulers.txt
the actor above is stored as:
{u'UFFE': {u'ELLEMANN': {u'JENSEN': {u'#': [(u'IGOEUREEC', [u'820701', u'821231']), (u'IGOEUREEC', [u'870701', u'871231'])]}}}}
"""
self.open_FIN(actorfile, "actor")
line = self.read_FIN_line().strip()
current_acts = []
datelist = []
while len(line) > 0:
if line[0] == '[': # Date
data = line[1:-1].split()
code = data[0]
try:
if '-' in data[1]:
dates = data[1].split('-')
else:
dates = [data[1]]
except:
dates = []
datelist.append((code, dates))
else:
if line[0] == '+': # Synonym
actor = line[1:].replace("_", ' ').split()
else: # Base actor
# add previous actor entry to dictionary:
if PETRglobals_WriteActorRoot and len(
current_acts) > 0: # store the root phrase if we're only to use it
datelist.append(current_acts[0])
for targ in current_acts:
list = PETRglobals_ActorDict
while targ != []:
if targ[0] in [' ', '']:
targ = targ[1:]
continue
if not isinstance(list, dict):
print("BADNESS", list)
exit()
list = list.setdefault(targ[0], {})
targ = targ[1:]
list["#"] = datelist
datelist = [] # reset for the new actor
current_acts = []
temp = line.split('\t')
if len(temp)==1:
temp = line.split(" ")
if len(temp)>1:
datestring = temp[1].strip().replace("\n","").split(']')
for i in range(len(datestring)):
if len(datestring[i])==0:
continue
data = datestring[i][datestring[i].find('[')+1:].split()
code = data[0].replace(']','')
try:
date = data[1].replace(']','')
if '-' in date:
dates = date.split('-')
else:
dates = [date]
except:
dates = []
datelist.append((code, dates))
#print(datelist)
actor = temp[0].replace("_", ' ').split()
current_acts.append(actor)
line = self.read_FIN_line().strip()
| |
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from .fixes import _import_fft
from .utils import (sizeof_fmt, logger, get_config, warn, _explain_exception,
verbose)
_cuda_capable = False
def get_cuda_memory(kind='available'):
"""Get the amount of free memory for CUDA operations.
Parameters
----------
kind : str
Can be "available" or "total".
Returns
-------
memory : str
The amount of available or total memory as a human-readable string.
"""
if not _cuda_capable:
warn('CUDA not enabled, returning zero for memory')
mem = 0
else:
import cupy
mem = cupy.cuda.runtime.memGetInfo()[dict(available=0, total=1)[kind]]
return sizeof_fmt(mem)
@verbose
def init_cuda(ignore_config=False, verbose=None):
"""Initialize CUDA functionality.
This function attempts to load the necessary interfaces
(hardware connectivity) to run CUDA-based filtering. This
function should only need to be run once per session.
If the config var (set via mne.set_config or in ENV)
MNE_USE_CUDA == 'true', this function will be executed when
the first CUDA setup is performed. If this variable is not
set, this function can be manually executed.
Parameters
----------
ignore_config : bool
If True, ignore the config value MNE_USE_CUDA and force init.
%(verbose)s
"""
global _cuda_capable
if _cuda_capable:
return
if not ignore_config and (get_config('MNE_USE_CUDA', 'false').lower() !=
'true'):
logger.info('CUDA not enabled in config, skipping initialization')
return
# Triage possible errors for informative messaging
_cuda_capable = False
try:
import cupy # noqa
except ImportError:
warn('module cupy not found, CUDA not enabled')
return
device_id = int(get_config('MNE_CUDA_DEVICE', '0'))
try:
# Initialize CUDA
_set_cuda_device(device_id, verbose)
except Exception:
warn('so CUDA device could be initialized, likely a hardware error, '
'CUDA not enabled%s' % _explain_exception())
return
_cuda_capable = True
# Figure out limit for CUDA FFT calculations
logger.info('Enabling CUDA with %s available memory' % get_cuda_memory())
@verbose
def set_cuda_device(device_id, verbose=None):
"""Set the CUDA device temporarily for the current session.
Parameters
----------
device_id : int
Numeric ID of the CUDA-capable device you want MNE-Python to use.
%(verbose)s
"""
if _cuda_capable:
_set_cuda_device(device_id, verbose)
elif get_config('MNE_USE_CUDA', 'false').lower() == 'true':
init_cuda()
_set_cuda_device(device_id, verbose)
else:
warn('Could not set CUDA device because CUDA is not enabled; either '
'run mne.cuda.init_cuda() first, or set the MNE_USE_CUDA config '
'variable to "true".')
@verbose
def _set_cuda_device(device_id, verbose=None):
"""Set the CUDA device."""
import cupy
cupy.cuda.Device(device_id).use()
logger.info('Now using CUDA device {}'.format(device_id))
###############################################################################
# Repeated FFT multiplication
def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft,
kind='FFT FIR filtering'):
"""Set up repeated CUDA FFT multiplication with a given filter.
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT multiplication.
h : array
The filtering function that will be used repeatedly.
n_fft : int
The number of points in the FFT.
kind : str
The kind to report to the user.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to filter.
h_fft : array | instance of gpuarray
This will either be a gpuarray (if CUDA enabled) or ndarray.
Notes
-----
This function is designed to be used with fft_multiply_repeated().
"""
rfft, irfft = _import_fft(('rfft', 'irfft'))
cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft,
h_fft=rfft(h, n=n_fft))
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
import cupy
try:
# do the IFFT normalization now so we don't have to later
h_fft = cupy.array(cuda_dict['h_fft'])
logger.info('Using CUDA for %s' % kind)
except Exception as exp:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large: "%s"), falling back to '
'n_jobs=1' % str(exp))
cuda_dict.update(h_fft=h_fft,
rfft=_cuda_upload_rfft,
irfft=_cuda_irfft_get)
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
return n_jobs, cuda_dict
def _fft_multiply_repeated(x, cuda_dict):
"""Do FFT multiplication by a filter function (possibly using CUDA).
Parameters
----------
h_fft : 1-d array or gpuarray
The filtering array to apply.
x : 1-d array
The array to filter.
n_fft : int
The number of points in the FFT.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
Returns
-------
x : 1-d array
Filtered version of x.
"""
# do the fourier-domain operations
x_fft = cuda_dict['rfft'](x, cuda_dict['n_fft'])
x_fft *= cuda_dict['h_fft']
x = cuda_dict['irfft'](x_fft, cuda_dict['n_fft'])
return x
###############################################################################
# FFT Resampling
def _setup_cuda_fft_resample(n_jobs, W, new_len):
"""Set up CUDA FFT resampling.
Parameters
----------
n_jobs : int | str
If n_jobs == 'cuda', the function will attempt to set up for CUDA
FFT resampling.
W : array
The filtering function to be used during resampling.
If n_jobs='cuda', this function will be shortened (since CUDA
assumes FFTs of real signals are half the length of the signal)
and turned into a gpuarray.
new_len : int
The size of the array following resampling.
Returns
-------
n_jobs : int
Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise
original n_jobs is passed.
cuda_dict : dict
Dictionary with the following CUDA-related variables:
use_cuda : bool
Whether CUDA should be used.
fft_plan : instance of FFTPlan
FFT plan to use in calculating the FFT.
ifft_plan : instance of FFTPlan
FFT plan to use in calculating the IFFT.
x_fft : instance of gpuarray
Empty allocated GPU space for storing the result of the
frequency-domain multiplication.
x : instance of gpuarray
Empty allocated GPU space for the data to resample.
Notes
-----
This function is designed to be used with fft_resample().
"""
rfft, irfft = _import_fft(('rfft', 'irfft'))
cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft)
rfft_len_x = len(W) // 2 + 1
# fold the window onto inself (should be symmetric) and truncate
W = W.copy()
W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][:rfft_len_x - 1]) / 2.
W = W[:rfft_len_x]
if n_jobs == 'cuda':
n_jobs = 1
init_cuda()
if _cuda_capable:
try:
import cupy
# do the IFFT normalization now so we don't have to later
W = cupy.array(W)
logger.info('Using CUDA for FFT resampling')
except Exception:
logger.info('CUDA not used, could not instantiate memory '
'(arrays may be too large), falling back to '
'n_jobs=1')
else:
cuda_dict.update(use_cuda=True,
rfft=_cuda_upload_rfft,
irfft=_cuda_irfft_get)
else:
logger.info('CUDA not used, CUDA could not be initialized, '
'falling back to n_jobs=1')
cuda_dict['W'] = W
return n_jobs, cuda_dict
def _cuda_upload_rfft(x, n, axis=-1):
"""Upload and compute rfft."""
import cupy
return cupy.fft.rfft(cupy.array(x), n=n, axis=axis)
def _cuda_irfft_get(x, n, axis=-1):
"""Compute irfft and get."""
import cupy
return cupy.fft.irfft(x, n=n, axis=axis).get()
def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None,
pad='reflect_limited'):
"""Do FFT resampling with a filter function (possibly using CUDA).
Parameters
----------
x : 1-d array
The array to resample. Will be converted to float64 if necessary.
new_len : int
The size of the output array (before removing padding).
npads : tuple of int
Amount of padding to apply to the start and end of the
signal before resampling.
to_removes : tuple of int
Number of samples to remove after resampling.
cuda_dict : dict
Dictionary constructed using setup_cuda_multiply_repeated().
pad : str
The type of padding to use. Supports all :func:`np.pad` ``mode``
options. Can also be "reflect_limited" (default), which pads with a
reflected version of each vector mirrored on the first and last values
of the vector, followed by zeros.
.. versionadded:: 0.15
Returns
-------
x : 1-d array
Filtered version of x.
"""
cuda_dict = dict(use_cuda=False) if cuda_dict is None else cuda_dict
# add some padding at beginning and end to make this work a little cleaner
if x.dtype != np.float64:
x = x.astype(np.float64)
x = _smart_pad(x, npads, pad)
old_len = len(x)
shorter = new_len < old_len
use_len = new_len if shorter else old_len
x_fft = cuda_dict['rfft'](x, None)
if use_len % 2 == 0:
nyq = use_len // 2
x_fft[nyq:nyq + 1] *= 2 if shorter else 0.5
x_fft *= cuda_dict['W']
y = cuda_dict['irfft'](x_fft, new_len)
# now let's trim it back to the correct size (if there was padding)
if (to_removes > 0).any():
y = y[to_removes[0]:y.shape[0] - to_removes[1]]
return y
###############################################################################
# Misc
# this has to go in mne.cuda instead of mne.filter to avoid import errors
def _smart_pad(x, n_pad, pad='reflect_limited'):
"""Pad vector x."""
n_pad = np.asarray(n_pad)
assert n_pad.shape == (2,)
if (n_pad == 0).all():
return x
elif (n_pad < 0).any():
raise RuntimeError('n_pad must be non-negative')
if pad == 'reflect_limited':
# need to pad with zeros if len(x) <= npad
l_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype)
r_z_pad = np.zeros(max(n_pad[1] - len(x) + 1, 0), dtype=x.dtype)
return np.concatenate([l_z_pad, 2 * x[0] - x[n_pad[0]:0:-1], x,
2 * x[-1] - x[-2:-n_pad[1] - 2:-1], r_z_pad])
else:
return np.pad(x, (tuple(n_pad),), pad)
| |
import numpy as np
from gpkit import Model, Variable, SignomialsEnabled, SignomialEquality, VarKey, units, Vectorize
# Importing atmospheric model
from gpkitmodels.SP.atmosphere.atmosphere import Atmosphere
# SimPleAC with mission design and flight segments, and lapse rate and BSFC model (3.4.2)
class SimPleAC(Model):
def setup(self):
self.engine = Engine()
self.wing = Wing()
self.fuse = Fuselage()
self.components = [self.engine, self.wing, self.fuse]
# Environmental constants
g = Variable("g", 9.81, "m/s^2", "gravitational acceleration")
rho_f = Variable("\\rho_f", 817, "kg/m^3", "density of fuel")
# Free Variables
W = Variable("W", "N", "maximum takeoff weight")
W_f = Variable("W_f", "N", "maximum fuel weight")
V_f = Variable("V_f", "m^3", "maximum fuel volume")
V_f_avail = Variable("V_{f_{avail}}", "m^3", "fuel volume available")
constraints = []
# Fuel volume model
with SignomialsEnabled():
constraints += [V_f == W_f / g / rho_f,
V_f_avail <= self.wing['V_{f_{wing}}'] + self.fuse['V_{f_{fuse}}'], #[SP]
V_f_avail >= V_f]
return constraints, self.components
def dynamic(self,state):
return SimPleACP(self,state)
class SimPleACP(Model):
def setup(self,aircraft,state):
self.aircraft = aircraft
self.engineP = aircraft.engine.dynamic(state)
self.wingP = aircraft.wing.dynamic(state)
self.fuseP = aircraft.fuse.dynamic(state)
self.Pmodels = [self.engineP, self.wingP, self.fuseP]
# Free variables
C_D = Variable("C_D", "-", "drag coefficient")
D = Variable("D", "N", "total drag force")
LoD = Variable('L/D','-','lift-to-drag ratio')
V = Variable("V", "m/s", "cruising speed")
constraints = []
constraints += [self.engineP['T'] * V <= self.aircraft.engine['\\eta_{prop}'] * self.engineP['P_{shaft}'],
C_D >= self.fuseP['C_{D_{fuse}}'] + self.wingP['C_{D_{wpar}}'] + self.wingP['C_{D_{ind}}'],
D >= 0.5 * state['\\rho'] * self.aircraft['S'] * C_D * V ** 2,
self.wingP['Re'] == (state['\\rho'] / state['\\mu']) * V * (self.aircraft['S'] / self.aircraft['A']) ** 0.5,
self.fuseP['Re_{fuse}'] == state['\\rho']*V*self.aircraft.fuse['l_{fuse}']/state['\\mu'],
LoD == self.wingP['C_L'] / C_D]
return constraints, self.Pmodels
class Fuselage(Model):
def setup(self):
# Free Variables
S = Variable('S_{fuse}', 'm^2', 'fuselage surface area')
l = Variable('l_{fuse}', 'm', 'fuselage length')
r = Variable('r_{fuse}', 'm', 'fuselage minor radius')
f = Variable('f_{fuse}', '-', 'fuselage fineness ratio', fix = True)
k = Variable('k_{fuse}', '-', 'fuselage form factor')
# Free variables (fixed for performance eval.)
V = Variable('V_{fuse}', 'm^3', 'total volume in the fuselage', fix = True)
V_f_fuse = Variable('V_{f_{fuse}}', 'm^3', 'fuel volume in the fuselage')
W_fuse = Variable('W_{fuse}', 'N', 'fuselage weight')
p = 1.6075
constraints = [f == l/r/2,
f <= 6,
k >= 1 + 60/f**3 + f/400,
3*(S/np.pi)**p >= 2*(l*2*r)**p + (2*r)**(2*p),
V == 4./6.*np.pi*r**2*l,
V_f_fuse >= 1*10**-10*units('m^3'),
]
return constraints
def dynamic(self,state):
return FuselageP(self,state)
class FuselageP(Model):
def setup(self,fuselage,state):
# Constants
Cfref = Variable('C_{f_{fuse,ref}}', 0.455, '-', 'fuselage reference skin friction coefficient', pr=10.)
# Free Variables
Re = Variable('Re_{fuse}', '-', 'fuselage Reynolds number')
Cf = Variable('C_{f_{fuse}}', '-', 'fuselage skin friction coefficient')
Cd = Variable('C_{D_{fuse}}', '-', 'fuselage drag coefficient')
constraints = [Cf >= Cfref/Re**0.3,
Cd >= fuselage['k_{fuse}']*Cf,
]
return constraints
class Wing(Model):
def setup(self):
# Non-dimensional constants
C_Lmax = Variable("C_{L,max}", 1.6, "-", "lift coefficient at stall", pr=5.)
e = Variable("e", 0.92, "-", "Oswald efficiency factor", pr=3.)
N_ult = Variable("N_{ult}", 3, "-", "ultimate load factor", pr=15.)
tau = Variable("\\tau", "-", "airfoil thickness to chord ratio", fix = True)
tau_ref = Variable("\\tau_{ref}", 0.12, "-", "reference airfoil thickness to chord ratio")
# Dimensional constants
W_w_coeff1 = Variable("W_{w_{coeff1}}", 2e-5, "1/m",
"wing weight coefficient 1", pr= 30.) #orig 12e-5
W_w_coeff2 = Variable("W_{w_{coeff2}}", 60., "Pa",
"wing weight coefficient 2", pr=10.)
# Free Variables (fixed for performance eval.)
A = Variable("A", "-", "aspect ratio",fix = True)
S = Variable("S", "m^2", "total wing area", fix = True)
W_w = Variable("W_w", "N", "wing weight")
W_w_strc = Variable('W_{w_{strc}}','N','wing structural weight', fix = True)
W_w_surf = Variable('W_{w_{surf}}','N','wing skin weight', fix = True)
V_f_wing = Variable("V_{f_{wing}}",'m^3','fuel volume in the wing', fix = True)
constraints = []
# Structural model
constraints += [W_w_surf >= W_w_coeff2 * S,
W_w >= W_w_surf + W_w_strc]
# Wing fuel and form factor model
constraints += [V_f_wing**2 <= 0.0009*S**3/A*tau**2, # linear with b and tau, quadratic with chord
tau >= 0.08, tau <= 0.23,
]
# Form factor model
return constraints
def dynamic(self,state):
return WingP(self,state)
class WingP(Model):
def setup(self,wing,state):
self.wing = wing
# Free Variables
C_D_ind = Variable('C_{D_{ind}}', '-', "wing induced drag coefficient")
C_D_wpar = Variable('C_{D_{wpar}}', '-', "wing profile drag coefficient")
C_L = Variable("C_L", "-", "wing lift coefficient")
Re = Variable("Re", "-", "Reynolds number")
Re_ref = Variable("Re_{ref}", 1500000, "-", "reference Reynolds number")
constraints = []
# Drag model
w = C_D_wpar
u_1 = C_L
u_2 = Re/Re_ref
u_3 = self.wing['\\tau']/self.wing['\\tau_{ref}']
nc = w**0.00488697 >= 0.000347324 * (u_1)**6.64787 * (u_2)**-0.00842527 * (u_3)**-0.406817 + \
0.974515 * (u_1)**-0.00206058 * (u_2)**-0.00117649 * (u_3)**-0.000597604 + \
0.000211504 * (u_1)**1.35483 * (u_2)**-0.252459 * (u_3)**3.91243
nc.name = 'drag'
constraints += [C_D_ind == C_L ** 2 / (np.pi * self.wing['A'] * self.wing['e']),
nc]
return constraints
class Engine(Model):
def setup(self):
# Dimensional constants
BSFC_ref = Variable("BSFC_{ref}", 0.32, "lbf/(hp*hr)", "reference brake specific fuel consumption")
eta_prop = Variable("\\eta_{prop}", 0.8, '-',"propeller efficiency")
P_shaft_ref = Variable("P_{shaft,ref}", 10, "hp", "reference MSL maximum shaft power")
W_e_ref = Variable("W_{e,ref}", 10, "lbf","reference engine weight")
h_ref = Variable("h_{ref}", 15000,'ft','engine lapse reference altitude')
# Free variables
P_shaft_max = Variable("P_{shaft,max}","kW","MSL maximum shaft power")
W_e = Variable("W_e", "N", "engine weight", fix = True)
constraints = [(W_e/W_e_ref) == 1.27847 * (P_shaft_max/P_shaft_ref)**0.772392]
return constraints
def dynamic(self,state):
return EngineP(self,state)
class EngineP(Model):
def setup(self,engine,state):
self.engine = engine
# Dimensional constants
# Free variables
BSFC = Variable("BSFC", "lbf/(hp*hr)", "brake specific fuel consumption")
P_shaft = Variable("P_{shaft}", "kW","shaft power")
P_shaft_alt = Variable("P_{shaft,alt}", "kW", 'maximum shaft power at altitude')
Thrust = Variable("T", "N", "propeller thrust")
L = Variable("L","-","power lapse percentage")
constraints = []
with SignomialsEnabled():
constraints += [P_shaft <= P_shaft_alt,
L == (0.937 * (state['h']/self.engine['h_{ref}'])**0.0922)**10,
SignomialEquality(1, L + P_shaft_alt / self.engine['P_{shaft,max}']),
(BSFC/self.engine['BSFC_{ref}'])**(0.1) >= 0.984*(P_shaft/P_shaft_alt)**-0.0346,
BSFC/self.engine['BSFC_{ref}'] >= 1.,
]
return constraints
class Mission(Model):
def setup(self,aircraft,Nsegments):
self.aircraft = aircraft
W_f_m = Variable('W_{f_m}','N','total mission fuel')
t_m = Variable('t_m','hr','total mission time')
with Vectorize(Nsegments):
Wavg = Variable('W_{avg}','N','segment average weight')
Wstart = Variable('W_{start}', 'N', 'weight at the beginning of flight segment')
Wend = Variable('W_{end}', 'N', 'weight at the end of flight segment')
h = Variable('h','m','final segment flight altitude')
havg = Variable('h_{avg}','m','average segment flight altitude')
dhdt = Variable('\\frac{dh}{dt}','m/hr','climb rate')
W_f_s = Variable('W_{f_s}','N', 'segment fuel burn')
t_s = Variable('t_s','hr','time spent in flight segment')
R_s = Variable('R_s','km','range flown in segment')
state = Atmosphere()
self.aircraftP = self.aircraft.dynamic(state)
# Mission variables
hcruise = Variable('h_{cruise_m}', 'm', 'minimum cruise altitude')
Range = Variable("Range_m", "km", "aircraft range")
W_p = Variable("W_{p_m}", "N", "payload weight", pr=20.)
rho_p = Variable("\\rho_{p_m}", "kg/m^3", "payload density", pr = 10.)
V_min = Variable("V_{min_m}", "m/s", "takeoff speed", pr=20.)
TOfac = Variable('T/O factor_m', '-','takeoff thrust factor')
cost_index = Variable("C_m", '1/hr','hourly cost index')
constraints = []
# Setting up the mission
with SignomialsEnabled():
constraints += [havg == state['h'], # Linking states
h[1:Nsegments-1] >= hcruise, # Adding minimum cruise altitude
# Weights at beginning and end of mission
Wstart[0] >= W_p + self.aircraft.wing['W_w'] + self.aircraft.engine['W_e'] + self.aircraft.fuse['W_{fuse}'] + W_f_m,
Wend[Nsegments-1] >= W_p + self.aircraft.wing['W_w'] + self.aircraft.engine['W_e'] + self.aircraft.fuse['W_{fuse}'],
# Lift, and linking segment start and end weights
Wavg <= 0.5 * state['\\rho'] * self.aircraft['S'] * self.aircraftP.wingP['C_L'] * self.aircraftP['V'] ** 2,
Wstart >= Wend + W_f_s, # Making sure fuel gets burnt!
Wstart[1:Nsegments] == Wend[:Nsegments-1],
Wavg == Wstart ** 0.5 * Wend ** 0.5,
# Altitude changes
h[0] == t_s[0]*dhdt[0], # Starting altitude
dhdt >= 1.*units('m/hr'),
havg[0] == 0.5*h[0],
havg[1:Nsegments] == (h[1:Nsegments]*h[0:Nsegments-1])**(0.5),
SignomialEquality(h[1:Nsegments],h[:Nsegments-1] + t_s[1:Nsegments]*dhdt[1:Nsegments]),
# Thrust and fuel burn
W_f_s >= self.aircraftP.engineP['BSFC'] * self.aircraftP.engineP['P_{shaft}'] * t_s,
self.aircraftP.engineP['T'] * self.aircraftP['V'] >= self.aircraftP['D'] * self.aircraftP['V'] + Wavg * dhdt,
# Max MSL thrust at least 2*climb thrust
self.aircraft.engine['P_{shaft,max}'] >= TOfac*self.aircraftP.engineP['P_{shaft}'][0],
# Flight time
t_s == R_s/self.aircraftP['V'],
# Aggregating segment variables
self.aircraft['W_f'] >= W_f_m,
R_s == Range/Nsegments, # Dividing into equal range segments
W_f_m >= sum(W_f_s),
t_m >= sum(t_s)
]
# Maximum takeoff weight
constraints += [self.aircraft['W'] >= W_p + self.aircraft.wing['W_w'] + self.aircraft['W_f'] +
self.aircraft.engine['W_e'] + self.aircraft.fuse['W_{fuse}']]
# Stall constraint
constraints += [self.aircraft['W'] <= 0.5 * state['\\rho'] *
self.aircraft['S'] * self.aircraft['C_{L,max}'] * V_min ** 2]
# Wing weight model
constraints += [self.aircraft.wing['W_{w_{strc}}']**2. >=
self.aircraft.wing['W_{w_{coeff1}}']**2. / self.aircraft.wing['\\tau']**2. *
(self.aircraft.wing['N_{ult}']**2. * self.aircraft.wing['A'] ** 3. *
((W_p + self.aircraft.fuse['W_{fuse}'] +
self.aircraft['W_e'] + self.aircraft.fuse['V_{f_{fuse}}']*self.aircraft['g']*self.aircraft['\\rho_f']) *
self.aircraft['W'] * self.aircraft.wing['S']))]
# Fuselage volume and weight
constraints += [self.aircraft.fuse['V_{fuse}'] >=
self.aircraft.fuse['V_{f_{fuse}}'] + W_p/(rho_p*self.aircraft['g']),
self.aircraft.fuse['W_{fuse}'] == self.aircraft.fuse['S_{fuse}']*self.aircraft.wing['W_{w_{coeff2}}'],
]
# Upper bounding variables
constraints += [t_m <= 100000*units('hr'),
W_f_m <= 1e10*units('N'),
cost_index >= 1e-10*units('1/hr')]
return constraints, state, self.aircraft, self.aircraftP
def test():
m = Mission(SimPleAC(),4)
m.substitutions.update({
'h_{cruise_m}' :5000*units('m'),
'Range_m' :3000*units('km'),
'W_{p_m}' :3000*units('N'),
'\\rho_{p_m}' :1500*units('kg/m^3'),
'C_m' :120*units('1/hr'),
'V_{min_m}' :35*units('m/s'),
'T/O factor_m' :2,
})
m.cost = m['W_{f_m}']*units('1/N') + m['C_m']*m['t_m']
sol = m.localsolve(verbosity=0)
if __name__ == "__main__":
test()
| |
# Copyright (c) 2015 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbican.common import config
from barbican.common import exception
from barbican.common import hrefs
from barbican.common import resources as res
from barbican.model import repositories as repo
# All negative values will be treated as unlimited
UNLIMITED_VALUE = -1
DISABLED_VALUE = 0
CONF = config.CONF
class QuotaDriver(object):
"""Driver to enforce quotas and obtain quota information."""
def __init__(self):
self.repo = repo.get_project_quotas_repository()
def _get_resources(self):
"""List of resources that can be constrained by a quota"""
return ['secrets', 'orders', 'containers', 'consumers', 'cas']
def _get_defaults(self):
"""Return list of default quotas"""
quotas = {
'secrets': CONF.quotas.quota_secrets,
'orders': CONF.quotas.quota_orders,
'containers': CONF.quotas.quota_containers,
'consumers': CONF.quotas.quota_consumers,
'cas': CONF.quotas.quota_cas
}
return quotas
def _extract_project_quotas(self, project_quotas_model):
"""Convert project quotas model to Python dict
:param project_quotas_model: Model containing quota information
:return: Python dict containing quota information
"""
resp_quotas = {}
for resource in self._get_resources():
resp_quotas[resource] = getattr(project_quotas_model, resource)
return resp_quotas
def _compute_effective_quotas(self, configured_quotas):
"""Merge configured and default quota information
When a quota value is not set, use the default value
:param configured_quotas: configured quota values
:return: effective quotas
"""
default_quotas = self._get_defaults()
resp_quotas = dict(configured_quotas)
for resource, quota in resp_quotas.items():
if quota is None:
resp_quotas[resource] = default_quotas[resource]
return resp_quotas
def get_effective_quotas(self, external_project_id):
"""Collect and return the effective quotas for a project
:param external_project_id: external ID of current project
:return: dict with effective quotas
"""
try:
retrieved_project_quotas = self.repo.get_by_external_project_id(
external_project_id)
except exception.NotFound:
resp_quotas = self._get_defaults()
else:
resp_quotas = self._compute_effective_quotas(
self._extract_project_quotas(retrieved_project_quotas))
return resp_quotas
def is_unlimited_value(self, v):
"""A helper method to check for unlimited value."""
return v <= UNLIMITED_VALUE
def is_disabled_value(self, v):
"""A helper method to check for disabled value."""
return v == DISABLED_VALUE
def set_project_quotas(self, external_project_id, parsed_project_quotas):
"""Create a new database entry, or update existing one
:param external_project_id: ID of project whose quotas are to be set
:param parsed_project_quotas: quota values to save in database
:return: None
"""
project = res.get_or_create_project(external_project_id)
self.repo.create_or_update_by_project_id(project.id,
parsed_project_quotas)
# commit to DB to avoid async issues if the enforcer is called from
# another thread
repo.commit()
def get_project_quotas(self, external_project_id):
"""Retrieve configured quota information from database
:param external_project_id: ID of project for whose values are wanted
:return: the values
"""
try:
retrieved_project_quotas = self.repo.get_by_external_project_id(
external_project_id)
except exception.NotFound:
return None
resp_quotas = self._extract_project_quotas(retrieved_project_quotas)
resp = {'project_quotas': resp_quotas}
return resp
def get_project_quotas_list(self, offset_arg=None, limit_arg=None):
"""Return a dict and list of all configured quota information
:return: a dict and list of a page of quota config info
"""
retrieved_project_quotas, offset, limit, total =\
self.repo.get_by_create_date(offset_arg=offset_arg,
limit_arg=limit_arg,
suppress_exception=True)
resp_quotas = []
for quotas in retrieved_project_quotas:
list_item = {'project_id': quotas.project.external_id,
'project_quotas':
self._extract_project_quotas(quotas)}
resp_quotas.append(list_item)
resp = {'project_quotas': resp_quotas}
resp_overall = hrefs.add_nav_hrefs(
'project_quotas', offset, limit, total, resp)
resp_overall.update({'total': total})
return resp_overall
def delete_project_quotas(self, external_project_id):
"""Remove configured quota information from database
:param external_project_id: ID of project whose quotas will be deleted
:raises NotFound: if project has no configured values
:return: None
"""
self.repo.delete_by_external_project_id(external_project_id)
def get_quotas(self, external_project_id):
"""Get the effective quotas for a project
Effective quotas are based on both configured and default values
:param external_project_id: ID of project for which to get quotas
:return: dict of effective quota values
"""
resp_quotas = self.get_effective_quotas(external_project_id)
resp = {'quotas': resp_quotas}
return resp
class QuotaEnforcer(object):
"""Checks quotas limits and current resource usage levels"""
def __init__(self, resource_type, resource_repo):
self.quota_driver = QuotaDriver()
self.resource_type = resource_type
self.resource_repo = resource_repo
def enforce(self, project):
"""Enforce the quota limit for the resource
:param project: the project object corresponding to the sender
:raises QuotaReached: exception raised if quota forbids request
:return: None
"""
quotas = self.quota_driver.get_effective_quotas(project.external_id)
quota = quotas[self.resource_type]
reached = False
count = 0
if self.quota_driver.is_unlimited_value(quota):
pass
elif self.quota_driver.is_disabled_value(quota):
reached = True
else:
count = self.resource_repo.get_count(project.id)
if count >= quota:
reached = True
if reached:
raise exception.QuotaReached(
external_project_id=project.external_id,
resource_type=self.resource_type,
quota=quota)
| |
"""The test for switch device automation."""
from datetime import timedelta
from unittest.mock import patch
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.switch import DOMAIN
from homeassistant.const import CONF_PLATFORM, STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert conditions == expected_conditions
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a switch condition."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_capabilities = {
"extra_fields": [
{"name": "for", "optional": True, "type": "positive_time_period_dict"}
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == expected_capabilities
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_off",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_if_fires_on_for_condition(hass, calls):
"""Test for firing if condition is on with delay."""
point1 = dt_util.utcnow()
point2 = point1 + timedelta(seconds=10)
point3 = point2 + timedelta(seconds=10)
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
ent1, ent2, ent3 = platform.ENTITIES
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = point1
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": {
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_off",
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
("platform", "event.event_type")
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 10 secs into the future
mock_utcnow.return_value = point2
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 0
# Time travel 20 secs into the future
mock_utcnow.return_value = point3
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_off event - test_event1"
| |
#! /usr/bin/env python
import argparse
import textwrap
import os.path as osp
import json
import os
import os.path as osp
class InputFileMissing(Exception):
pass
class ALFAHelper(object):
'''Returns filenames from existing items in the ALFA repository
or suggests new ones according to the Axon ontology,
generates commands based on various pipelines steps (parse_command)
and identifies the last performed step in the processing of a subject'''
def __init__(self, directory='/home/grg/data/ALFA_DWI', jsonfile='/home/grg/git/alfa/alfa_dwi_pipeline_io_aug2016.json'):
from brainvisa import axon
axon.initializeProcesses()
import neuroHierarchy
self.__db = neuroHierarchy.databases._databases[directory]
self.args_types = json.load(open(jsonfile))
def find_diskitem(self, subject, axontype='Any Type', fmt=None):
res = self.__db.findDiskItem(exactType=True, **{'_type': axontype, 'subject':subject})
if res is None:
if fmt is None:
raise Exception('No diskitem found. Provide a format to generate a filename for a new item.')
else:
res = self.__db.findOrCreateDiskItem(**{'_type': axontype, 'subject':subject, '_format': fmt})
return res
def get_ALFA_types(self):
import neuroProcesses
return list(set([each.type.name for each in self.__db.findDiskItems(**{'_type': 'Any Type'})]))
def parse_command(self, subject, name, jsonfile='/home/grg/git/alfa/alfa_dwi_pipeline_aug2016.json'):
'''Types starting with @ indicate that the corresponding files must exist before execution (ReadDiskItems). \n
Types starting with > indicate that the corresponding files must exist after execution (WriteDiskItems). \n
Strings starting with # designate hard-coded filenames.
Types starting with ! indicate that the filenames will be returned without extension.
'''
j = json.load(open(jsonfile))
types = self.args_types[name]
dsk = []
for each in types:
t = each.strip('@#!>')
d = t
if not each.startswith('#'):
if 'DTIFIT' in t:
fmt = 'Directory'
elif 'Bvec' in t:
fmt = 'Bvec file'
elif 'stats' in t:
fmt = 'CSV file'
else:
fmt = 'gz compressed NIFTI-1 image'
d = self.find_diskitem(subject, t, fmt=fmt).fullPath()
if each.startswith('@'):
if not osp.isfile(d) and not osp.isdir(d):
raise InputFileMissing('%s not found (type %s) while declared as input (or remove the leading @)'%(d, t))
if each.startswith('!'):
d = self.find_diskitem(subject, t, fmt=fmt).fullPath()
d = d[:d.index('.')]
dsk.append(d)
res = parse_command(j[name], dsk)
return res
def current_stage(self, subject):
steps = ['denoising', 'eddycorrect', 'extractb0', 'fslbet.25', 'fslfast', 'denoise_t1', 'spm12', 'dilate', 'ants_t1_to_dwi', 'ants_dwi_to_t1', 'ants_mni_to_t1', 'rotcorr', 'dtifit', 'warp_aal_to_t1', 'warp_aal_to_dwi', 'warp_md_to_t1', 'warp_md_to_mni', 'roistats_md', 'roistats_md_mni']
def __sort_input_first__(a):
inp = []
out = []
oth = []
for each in a:
if each.startswith('@'):
inp.append(each)
elif each.startswith('>'):
out.append(each)
else:
oth.append(each)
res = []
for each in [inp, out, oth]:
res.extend(each)
return res
existing_all = True
for i, each in enumerate(steps):
for filetype in __sort_input_first__(self.args_types[each]):
t = filetype.strip('@#!>')
if filetype[0] in '@>':
res = [e for e in self.__db.findDiskItems(_type=t, subject=subject)]
if len(res) == 0:
existing_all = False
if filetype.startswith('@'):
print subject, 'is stuck at step before', each, '(missing: %s)'%t
return '%s-1'%each
else:
print subject, 'is stuck at step', each, '(missing: %s)'%t
return each
if not existing_all:
print subject, 'is stuck at step', each
return each
print subject, 'is complete'
return 0
def generate_batch(self, subjects, step, batchfile, create_workflow=True):
w = open(batchfile, 'w')
succeeded = []
for each in subjects:
try:
w.write('%s\n'%self.parse_command(each, step))
succeeded.append(each)
except InputFileMissing as e:
print 'Failed', each, e
w.close()
if create_workflow:
try:
import create_workflow as cw
import os.path as osp
cw.create_workflow(batchfile, "%s.workflow"%osp.splitext(batchfile)[0], names=succeeded)
except ImportError:
print 'create_workflow command not found. Check that create_workflow.py is in the same folder as thesaurus.'
raise
return succeeded
def parse_command(cmd, args):
if len(args) != cmd.count('%s'):
raise Exception('%s (%s)\n%s\n%s'%(
args, len(args),
cmd,
'Please check the number of arguments respect the command.'))
for each in args:
cmd = cmd.replace('%s', each, 1)
return cmd
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Associating a command with a unique alias for simple recall.
'''))
parser.add_argument("--cmd", dest='command', type=str, required=False, help='Command')
parser.add_argument("--name", dest='name', type=str, required=False, help='Command alias')
parser.add_argument("-n", dest='dontdo', action="store_true", help='Print the command without running it')
parser.add_argument("--store", dest='action', action='store_const', const=1)
parser.add_argument("--run", dest='action', action='store_const', const=2)
parser.add_argument("--list", dest='action', action='store_const', const=3)
parser.add_argument("json", type=str, help='JSON file containing all the commands')
parser.add_argument("args", nargs='*', default=None, type=str, help='Arguments of the command')
args = parser.parse_args()
action = args.action
if action is None:
action = 2
arg = args.args
name = args.name
if action == 2:
# Run command
j = json.load(open(args.json))
if args.name is None:
raise Exception('Command name should be provided')
if not name in j:
raise KeyError('Command with key %s not found.'%name)
cmd = j[name]
a = arg if not arg is None else []
cmd = parse_command(cmd, a)
print cmd
if not args.dontdo:
import time
start_time = time.time()
res = os.system(cmd)
print('Command was:\n%s'%cmd)
if res == 0:
print('Execution complete. Elapsed time: %f seconds'% (time.time() - start_time))
elif res == 2:
print('Execution interrupted. Elapsed time: %f seconds'% (time.time() - start_time))
print('===')
elif action == 1:
#Store command
if args.name is None:
raise Exception('Command name should be provided')
if not arg is None:
print 'Warning: command called in store mode, provided arguments will get ignored'
if args.command is None:
raise Exception('A command should be provided when calling in store mode')
j = json.load(open(args.json))
j[name] = args.command
json.dump(j, open(args.json,'w'), indent=2)
elif action == 3:
#List command
j = json.load(open(args.json))
from pprint import pprint
pprint(j, indent=2)
| |
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import sys
import PySide.QtGui as qg
import numpy
import popupcad
import popupcad.filetypes
from popupcad.filetypes.operation import Operation
from popupcad.filetypes.laminate import Laminate
from popupcad.filetypes.design import NoOperation
from popupcad.filetypes.design import Design
from dev_tools.enum import enum
from popupcad.algorithms.points import calctransformfrom2lines
from popupcad.filetypes.sketch import Sketch
class Dialog(qg.QDialog):
def __init__(
self,
design,
prioroperations,
sketch=None,
subdesign=None,
subopid=None,
transformtype=None,
shift=0,
flip=False):
super(Dialog, self).__init__()
if sketch is None:
self.sketch = Sketch()
else:
self.sketch = sketch
if transformtype is None:
self.transformtype = PlaceOperation4.transformtypes.place
else:
self.transformtype = transformtype
self.combobox = qg.QComboBox()
self.subdesign = subdesign
self.subopid = subopid
self.prioroperations = prioroperations
self.design = design
self.lineedit = qg.QLineEdit()
self.lineedit.setReadOnly(True)
button3 = qg.QPushButton('...')
button3.clicked.connect(self.getfile)
button_sketch = qg.QPushButton('Edit Sketch')
button_sketch.clicked.connect(self.opensketch)
self.radiobox_place = qg.QRadioButton('Place')
self.radiobox_stretch = qg.QRadioButton('Stretch')
self.radiobox_scale = qg.QRadioButton('Scale')
layout_stretch_scale = qg.QHBoxLayout()
layout_stretch_scale.addWidget(self.radiobox_place)
layout_stretch_scale.addWidget(self.radiobox_stretch)
layout_stretch_scale.addWidget(self.radiobox_scale)
layout5 = qg.QHBoxLayout()
layout5.addWidget(qg.QLabel('Flip Layers'))
self.flip = qg.QCheckBox()
self.flip.setChecked(flip)
layout5.addWidget(self.flip)
layout4 = qg.QHBoxLayout()
layout4.addWidget(qg.QLabel('Shift Layers'))
self.sb = qg.QSpinBox()
self.sb.setRange(-100, 100)
self.sb.setSingleStep(1)
self.sb.setValue(shift)
layout4.addWidget(self.sb)
layout3 = qg.QHBoxLayout()
layout3.addWidget(self.lineedit)
layout3.addWidget(button3)
button1 = qg.QPushButton('Ok')
button1.clicked.connect(self.accept)
button2 = qg.QPushButton('Cancel')
button2.clicked.connect(self.reject)
layout2 = qg.QHBoxLayout()
layout2.addWidget(button1)
layout2.addWidget(button2)
layout = qg.QVBoxLayout()
layout.addLayout(layout3)
layout.addWidget(self.combobox)
layout.addWidget(button_sketch)
layout.addLayout(layout_stretch_scale)
layout.addLayout(layout5)
layout.addLayout(layout4)
layout.addLayout(layout2)
self.setLayout(layout)
self.radiobox_place.setChecked(False)
self.radiobox_scale.setChecked(False)
self.radiobox_stretch.setChecked(False)
if self.transformtype == PlaceOperation4.transformtypes.place:
self.radiobox_place.setChecked(True)
elif self.transformtype == PlaceOperation4.transformtypes.stretch:
self.radiobox_stretch.setChecked(True)
elif self.transformtype == PlaceOperation4.transformtypes.scale:
self.radiobox_scale.setChecked(True)
if self.subdesign is not None:
self.validatename()
def opensketch(self):
from popupcad.guis.sketcher import Sketcher
try:
seededrefop = self.prioroperations[-1].id
except IndexError:
seededrefop = None
self.sketcherdialog = Sketcher(
self,
self.sketch,
self.design,
accept_method=self.addsketchop,
selectops=True)
self.sketcherdialog.show()
self.sketcherdialog.activateWindow()
self.sketcherdialog.raise_()
def addsketchop(self, sketch):
self.sketch = sketch
def validatename(self):
self.combobox.clear()
self.combobox.addItems([str(op) for op in self.subdesign.operations])
try:
ii = self.subdesign.operation_index(self.subopid)
except NoOperation:
self.subopid = self.subdesign.findlastdesignop().id
ii = self.subdesign.operation_index(self.subopid)
self.combobox.setCurrentIndex(ii)
self.lineedit.setText(self.subdesign.get_basename())
def getfile(self):
design = Design.open(self)
if design is not None:
self.subdesign = design
self.validatename()
else:
self.subdesign = None
def accept(self):
if self.subdesign is not None:
if self.radiobox_scale.isChecked():
transformtype = PlaceOperation4.transformtypes.scale
elif self.radiobox_stretch.isChecked():
transformtype = PlaceOperation4.transformtypes.stretch
elif self.radiobox_place.isChecked():
transformtype = PlaceOperation4.transformtypes.place
ii = self.combobox.currentIndex()
self.subopid = self.subdesign.operations[ii].id
self.design.sketches[self.sketch.id] = self.sketch
self.design.subdesigns[self.subdesign.id] = self.subdesign
self.acceptdata = self.sketch.id, self.subdesign.id, self.subopid, transformtype, self.sb.value(
), self.flip.isChecked()
super(Dialog, self).accept()
else:
qg.QMessageBox('Please Select a Design')
class PlaceOperation4(Operation):
name = 'PlacementOp'
operationtypes = ['placement']
transformtypes = enum(place='place', stretch='stretch', scale='scale')
def copy(self, identical=True):
new = PlaceOperation4(
self.sketchid,
self.subdesignid,
self.subopid,
self.transformtype,
self.shift,
self.flip)
new.customname = self.customname
if identical:
new.id = self.id
return new
def __init__(self, *args):
super(PlaceOperation4, self).__init__()
self.editdata(*args)
self.id = id(self)
def editdata(
self,
sketchid,
subdesignid,
subopid,
transformtype,
shift,
flip):
super(PlaceOperation4, self).editdata()
self.sketchid = sketchid
self.subdesignid = subdesignid
self.subopid = subopid
self.transformtype = transformtype
self.shift = shift
self.flip = flip
def operate(self, design):
import shapely.affinity as aff
subdesign = design.subdesigns[self.subdesignid]
locateline = subdesign.findlocateline()
try:
designgeometry = subdesign.operations[
subdesign.operation_index(
self.subopid)].output[
self.getoutputref()].csg
except AttributeError:
subdesign.reprocessoperations()
designgeometry = subdesign.operations[
subdesign.operation_index(
self.subopid)].output[
self.getoutputref()].csg
sketch = design.sketches[self.sketchid]
if self.transformtype == self.transformtypes.place:
scale_x = 1.
scale_y = 1.
elif self.transformtype == self.transformtypes.stretch:
scale_x = None
scale_y = 1.
if self.transformtype == self.transformtypes.scale:
scale_x = None
scale_y = None
lsout = Laminate(design.return_layer_definition())
step = 1
if self.flip:
step = -1
if self.shift > 0:
outshift = self.shift
inshift = 0
elif self.shift < 0:
outshift = 0
inshift = -self.shift
else:
outshift = 0
inshift = 0
for layerout, layerin in zip(
design.return_layer_definition().layers[
outshift:], subdesign.return_layer_definition().layers[
::step][
inshift:]):
newgeoms = []
for geom in sketch.operationgeometry:
for designgeom in designgeometry.layer_sequence[layerin].geoms:
newgeoms.append(
aff.affine_transform(
designgeom,
calctransformfrom2lines(
locateline.exteriorpoints(),
geom.exteriorpoints(),
scale_x=scale_x,
scale_y=scale_y)))
newgeoms = popupcad.algorithms.csg_shapely.unary_union_safe(newgeoms)
newgeoms = popupcad.algorithms.csg_shapely.condition_shapely_entities(newgeoms)
lsout.replacelayergeoms(layerout, newgeoms)
return lsout
def parentrefs(self):
return []
def subdesignrefs(self):
return [self.subdesignid]
def sketchrefs(self):
return [self.sketchid]
def fromQTransform(self, tin):
tout = numpy.array([[tin.m11(), tin.m12(), tin.m13()], [
tin.m21(), tin.m22(), tin.m23()], [tin.m31(), tin.m32(), tin.m33()]]).T
return tout
def toQTransform(self, tin):
tout = qg.QTransform(
tin[1][1],
tin[1][2],
tin[1][3],
tin[2][1],
tin[2][2],
tin[2][3],
tin[3][1],
tin[3][2],
tin[3][3])
return tout
@classmethod
def new(cls, parent, design, currentop, newsignal):
dialog = Dialog(design, design.operations)
if dialog.exec_() == dialog.Accepted:
operation = cls(*dialog.acceptdata)
newsignal.emit(operation)
def edit(self, parent, design, editedsignal):
sketch = design.sketches[self.sketchid]
subdesign = design.subdesigns[self.subdesignid]
dialog = Dialog(
design,
design.prioroperations(self),
sketch=sketch,
subdesign=subdesign,
subopid=self.subopid,
transformtype=self.transformtype,
shift=self.shift,
flip=self.flip)
if dialog.exec_() == dialog.Accepted:
self.editdata(*dialog.acceptdata)
editedsignal.emit(self)
def upgrade(self, *args, **kwargs):
from popupcad_deprecated.placeop5 import PlaceOperation5
new = PlaceOperation5(
self.sketchid,
self.subdesignid,
self.subopid,
self.transformtype,
self.shift,
self.flip,
1.,
1.)
new.customname = self.customname
new.id = self.id
return new
if __name__ == "__main__":
app = qg.QApplication(sys.argv)
sys.exit(app.exec_())
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests For miscellaneous util methods used with compute."""
import string
from nova.compute import instance_types
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
from nova import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.config')
CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeValidateDeviceTestCase(test.TestCase):
def setUp(self):
super(ComputeValidateDeviceTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
# check if test name includes "xen"
if 'xen' in self.id():
self.flags(compute_driver='xenapi.XenAPIDriver')
self.instance = {
'uuid': 'fake',
'root_device_name': None,
'instance_type_id': 'fake',
}
else:
self.instance = {
'uuid': 'fake',
'root_device_name': '/dev/vda',
'default_ephemeral_device': '/dev/vdb',
'instance_type_id': 'fake',
}
self.data = []
def fake_get(instance_type_id, ctxt=None):
return self.instance_type
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda context, instance: self.data)
def _validate_device(self, device=None):
return compute_utils.get_device_name_for_instance(self.context,
self.instance,
device)
@staticmethod
def _fake_bdm(device):
return {
'device_name': device,
'no_device': None,
'volume_id': 'fake',
'snapshot_id': None
}
def test_wrap(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
device = self._validate_device()
self.assertEqual(device, '/dev/vdaa')
def test_wrap_plus_one(self):
self.data = []
for letter in string.ascii_lowercase[2:]:
self.data.append(self._fake_bdm('/dev/vd' + letter))
self.data.append(self._fake_bdm('/dev/vdaa'))
device = self._validate_device()
self.assertEqual(device, '/dev/vdab')
def test_later(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vdd'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdf')
def test_gap(self):
self.data = [
self._fake_bdm('/dev/vdc'),
self._fake_bdm('/dev/vde'),
]
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_no_bdms(self):
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_lxc_names_work(self):
self.instance['root_device_name'] = '/dev/a'
self.instance['ephemeral_device_name'] = '/dev/b'
self.data = []
device = self._validate_device()
self.assertEqual(device, '/dev/c')
def test_name_conversion(self):
self.data = []
device = self._validate_device('/dev/c')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/sdc')
self.assertEqual(device, '/dev/vdc')
device = self._validate_device('/dev/xvdc')
self.assertEqual(device, '/dev/vdc')
def test_invalid_bdms(self):
self.instance['root_device_name'] = "baddata"
self.assertRaises(exception.InvalidDevicePath,
self._validate_device)
def test_invalid_device_prefix(self):
self.assertRaises(exception.InvalidDevicePath,
self._validate_device, '/baddata/vdc')
def test_device_in_use(self):
self.assertRaises(exception.DevicePathInUse,
self._validate_device, '/dev/vdb')
def test_swap(self):
self.instance['default_swap_device'] = "/dev/vdc"
device = self._validate_device()
self.assertEqual(device, '/dev/vdd')
def test_swap_no_ephemeral(self):
del self.instance['default_ephemeral_device']
self.instance['default_swap_device'] = "/dev/vdb"
device = self._validate_device()
self.assertEqual(device, '/dev/vdc')
def test_ephemeral_xenapi(self):
self.instance_type = {
'ephemeral_gb': 10,
'swap': 0,
}
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdc')
def test_swap_xenapi(self):
self.instance_type = {
'ephemeral_gb': 0,
'swap': 10,
}
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
def test_swap_and_ephemeral_xenapi(self):
self.instance_type = {
'ephemeral_gb': 10,
'swap': 10,
}
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_swap_and_one_attachment_xenapi(self):
self.instance_type = {
'ephemeral_gb': 0,
'swap': 10,
}
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
self.data.append(self._fake_bdm(device))
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
class UsageInfoTestCase(test.TestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True)
super(UsageInfoTestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
notifier_api._reset_drivers()
self.addCleanup(notifier_api._reset_drivers)
self.flags(use_local=True, group='conductor')
self.flags(compute_driver='nova.virt.fake.FakeDriver',
notification_driver=[test_notifier.__name__],
network_manager='nova.network.manager.FlatManager')
self.compute = importutils.import_object(CONF.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.tests.image.fake._FakeImageService,
'show', fake_show)
fake_network.set_stub_network_methods(self.stubs)
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst.update(params)
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
"""Ensure 'exists' notification generates appropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
instance = db.instance_get(self.context, instance_id)
compute_utils.notify_usage_exists(self.context, instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.exists')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
def test_notify_usage_exists_deleted_instance(self):
"""Ensure 'exists' notification generates appropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
self.compute.terminate_instance(self.context, instance)
instance = db.instance_get(self.context.elevated(read_deleted='yes'),
instance_id)
compute_utils.notify_usage_exists(self.context, instance)
msg = test_notifier.NOTIFICATIONS[-1]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.exists')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
"""Ensure 'exists' notification generates appropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
self.compute.terminate_instance(self.context, instance)
compute_utils.notify_usage_exists(self.context, instance)
msg = test_notifier.NOTIFICATIONS[-1]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.exists')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'], {})
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_about_instance_usage(self):
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
sys_metadata = {'image_md_key1': 'val1',
'image_md_key2': 'val2',
'other_data': 'meow'}
extra_usage_info = {'image_name': 'fake_name'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
compute_utils.notify_about_instance_usage(self.context, instance,
'create.start', extra_usage_info=extra_usage_info)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create.start')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance['uuid'])
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description', 'image_meta'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
self.assertEquals(payload['image_meta'],
{'md_key1': 'val1', 'md_key2': 'val2'})
self.assertEquals(payload['image_name'], 'fake_name')
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance)
class MetadataToDictTestCase(test.TestCase):
def test_metadata_to_dict(self):
self.assertEqual(compute_utils.metadata_to_dict(
[{'key': 'foo1', 'value': 'bar'},
{'key': 'foo2', 'value': 'baz'}]),
{'foo1': 'bar', 'foo2': 'baz'})
def test_metadata_to_dict_empty(self):
self.assertEqual(compute_utils.metadata_to_dict([]), {})
| |
#!/usr/bin/env python
import time
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
from pygimli.solver import parseArgToArray
from pygimli.physics.gravimetry import solveGravimetry
def density(poro, densMatrix=2510, densFluid=1000, satur=1,
mesh=None):
r"""
densMatrix, densFluid in kg/m^3
"""
poro = parseArgToArray(poro, mesh.cellCount(), mesh)
densMatrix = parseArgToArray(densMatrix, mesh.cellCount(), mesh)
densFluid = parseArgToArray(densFluid, mesh.cellCount(), mesh)
satur = parseArgToArray(satur, mesh.cellCount(), mesh)
dens = np.array(densMatrix * (1.-poro)) + densFluid * poro * satur
return dens
class Gravimetry():
"""
General Gravimetry Method Manager
"""
def __init__(self, verbose=False):
"""Default constructor."""
self.fop = self.createFOP(verbose)
self.tD = None
self.tM = None
self.inv = self.createInv(verbose)
def createFOP(self, verbose):
return pg.physics.gravimetry.GravimetryModelling(verbose=verbose)
def createInv(self, verbose):
self.tD = pg.trans.Trans()
self.tM = pg.trans.Trans()
inv = pg.Inversion(verbose=verbose, dosave=False)
inv.setTransData(self.tD)
inv.setTransModel(self.tM)
return inv
def setParaMesh(self, mesh):
"""
Set the parameter mesh for any inversion.
Parameters
----------
"""
self.fop.setMesh(mesh)
self.fop.createRefinedForwardMesh(refine=False, pRefine=False)
def invert(self, sensorPositions, gz, errAbs,
verbose=0, **kwargs):
"""
"""
self.fop.setVerbose(verbose)
self.inv.setMaxIter(kwargs.pop('maxiter', 10))
self.inv.setLambda(kwargs.pop('lambd', 10))
self.fop.setSensorPositions(sensorPositions)
mesh = kwargs.pop('mesh', None)
if mesh is None:
raise('implement me')
self.setParaMesh(mesh)
startModel = pg.Vector(self.fop.regionManager().parameterCount(), 0.0)
self.inv.setForwardOperator(self.fop)
self.fop.regionManager().setConstraintType(10)
# check err here
self.inv.setData(gz)
self.inv.setAbsoluteError(errAbs)
self.inv.setModel(startModel)
model = self.inv.run()
return model
# tl can start here
values = model
if values is not None:
if isinstance(values, pg.Vector):
values = [values]
elif isinstance(values, np.ndarray):
if values.ndim == 1:
values = [values]
allModel = pg.Matrix(len(values)+1, len(model))
allModel[0] = model
self.inv.setVerbose(False)
for i in range(1, len(values)):
tic = time.time()
self.inv.setModel(model)
self.inv.setReferenceModel(model)
dData = pg.abs(values[i] - data)
# relModel = self.inv.invSubStep(pg.log(dData))
# allModel[i] = model * pg.exp(relModel)
relModel = self.inv.invSubStep(dData)
allModel[i] = model + relModel
print(i, "/", len(values), " : ", time.time()-tic, "s")
return allModel
return model
def simulate(self, mesh, dDensity):
self.fop.setMesh(mesh)
# TODO!
def calcInvBlock(mesh, dens, out='gravInv'):
# extract block delta density
densBlock = pg.Vector(dens)
densMarker2 = dens[pg.find(mesh.cellMarker() == 2)[0]]
# densBlock[(mesh.cellMarker() == 1)|(mesh.cellMarker() == 3)] = densMarker2
densBlock[pg.find((mesh.cellMarker() == 1) | (mesh.cellMarker() == 3))] = \
densMarker2
densBlock -= densMarker2
# define meausrement positions
gravPointsX = np.linspace(-20, 20, 41)
sensorPositions = np.vstack((gravPointsX, np.zeros(len(gravPointsX)))).T
# solve analytical
gz = solveGravimetry(mesh, densBlock, pnts=sensorPositions, complete=False)
# noisyfy
errAbs = 0.00001
dzerr = np.random.randn(len(sensorPositions)) * errAbs
gz = gz + dzerr
# createParamesh
paraMesh = pg.createGrid(x=np.linspace(-20, 20, 41),
y=np.linspace(-20, 0, 21))
# init Gravimetry manager (should do meshing, simulation and noisying)
Grav = Gravimetry(verbose=True)
model = Grav.invert(sensorPositions, gz, errAbs, verbose=1, mesh=paraMesh)
fig, ax = plt.subplots()
ax.plot(pg.x(sensorPositions), gz, label='gz')
ax.plot(pg.x(sensorPositions), Grav.inv.response(), label='response')
ax.legend()
ax.grid()
ax.set_xlabel('$x$ [m]')
ax.set_ylabel('$\partial u / \partial z$ [mGal]')
plt.show(block=False)
ax.figure.savefig(out, bbox_inches='tight')
return Grav, densBlock
#savefig(mesh, plc, densBlock, '$\Delta$ Density [kg$/$m$^3$]')
def simulateGravimetry(mesh, dDens):
gravPointsX = np.arange(-19, 19.1, 1)
gravPoints = np.vstack((gravPointsX, np.zeros(len(gravPointsX)))).T
solutionName = createCacheName('grav', mesh, times)
try:
#vel = pg.load(solutionName + '.bmat')
Gdg = np.load(solutionName + '.bmat.npy')
except Exception as e:
print(e)
print("Building .... ")
#Gdg, Gdgz = solveGravimetry(mesh, None, pnts=gravPoints, complete=True)
Gdg = solveGravimetry(mesh, None, pnts=gravPoints)
np.save(solutionName + '.bmat', Gdg)
#dz = Gdg.dot(dDens.transpose([1,0])).T
dz = np.zeros((len(dDens), len(gravPoints)))
for i in range(len(dDens)):
dzerr = np.random.randn(len(gravPoints)) * 0.01
dz[i] = Gdg.dot(dDens[i]) + dzerr
print(Gdg.shape, dDens.shape, dz.shape)
return gravPoints, dz
def invertGravimetry(gravPoints, dz):
dzerr = np.random.randn(len(gravPoints)) * 0.0001
dz = dz + dzerr
mesh = pg.createGrid(x=np.linspace(-20, 20, 41),
y=np.linspace(-20, 0, 21))
grav = Gravimetry(verbose=True)
model = grav.invert(gravPoints, dz, verbose=1, mesh=mesh)
plt.plot(pg.x(gravPoints), dz)
plt.plot(pg.x(gravPoints), grav.inv.response())
paraDomain=grav.fop.regionManager().paraDomain()
pg.show(paraDomain, model, colorBar=1, hold=1)
pg.showNow()
plt.show()
pass
def animateGravimetry(mesh, dDens, gravPoints, dz):
dpi=92
orientation = 'horizontal'
fig = plt.figure(facecolor='white', figsize=(2*800/dpi, 2*490/dpi), dpi=dpi)
axDDe = fig.add_subplot(3,1,1)
axGra = fig.add_subplot(3,1,2)
#axGra = fig.fig.add_subplot(1,3,2)
# ** Density **
gciDDe = pg.viewer.mpl.drawModel(axDDe, mesh, data=dDens[1],
cMin=0, cMax=20,
)
cbar = createColorbar(gciDDe, orientation=orientation,
label='Delta density in kg/m$^3$')
def ani(i):
axGra.clear()
axGra.plot(pg.x(gravPoints), dz[i])
axGra.plot(pg.x(gravPoints), pg.y(gravPoints), 'v', color='black')
axGra.set_ylabel('Grav in mGal')
axGra.set_xlim((-20, 20))
axGra.set_ylim((0, 0.001))
axGra.grid()
pg.viewer.mpl.setMappableData(gciDDe, abs(dDens[i]),
cMin=0, cMax=20,
logScale=False)
for i in range(len(dz)):
ani(i)
plt.pause(0.001)
if __name__ == "__main__":
pass
| |
import copy
from mock import Mock
import pytest
from sigopt.xgboost.constants import (
DEFAULT_CLASSIFICATION_METRIC,
DEFAULT_EVALS_NAME,
DEFAULT_REGRESSION_METRIC,
PARAMETER_INFORMATION,
SUPPORTED_AUTOBOUND_PARAMS,
)
from sigopt.xgboost.experiment import XGBExperiment
EXPERIMENT_CONFIG_BASE = dict(
name='Single metric optimization',
type='offline',
parameters=[
dict(
name='eta',
type='double',
bounds={'min': 0.1, 'max': 0.5}
),
dict(
name='max_depth',
type='int',
bounds={'min': 2, 'max': 6}
),
dict(
name='num_boost_round',
type='int',
bounds={'min': 2, 'max': 6}
)
],
metrics=[
dict(
name='accuracy',
strategy='optimize',
objective='maximize'
)
],
parallel_bandwidth=1,
budget=2
)
PARAMS_BASE = {
'num_class': 3,
'lambda': 1,
}
def verify_experiment_config_integrity(experiment_config):
assert isinstance(experiment_config, dict)
assert 'type' in experiment_config
assert 'parameters' in experiment_config
assert 'metrics' in experiment_config
assert 'budget' in experiment_config
parameters = experiment_config['parameters']
for parameter in parameters:
assert 'name' in parameter
assert 'type' in parameter
if parameter['type'] in ['int', 'double']:
assert 'bounds' in parameter
else:
assert 'categorical_values' in parameter
metrics = experiment_config['metrics']
for metric in metrics:
assert 'name' in metric
assert 'strategy' in metric
assert 'objective' in metric
def parse_and_create_experiment_config(experiment_config, params):
num_boost_round = None
run_options = None
d_train = Mock()
evals = Mock()
xgb_experiment = XGBExperiment(experiment_config, d_train, evals, params, num_boost_round, run_options)
xgb_experiment.parse_and_create_metrics()
xgb_experiment.parse_and_create_parameters()
return xgb_experiment
class TestExperimentConfig:
def verify_integrity(self, experiment_config, params):
xgb_experiment = parse_and_create_experiment_config(experiment_config, params)
verify_experiment_config_integrity(xgb_experiment.experiment_config_parsed)
def test_base(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
self.verify_integrity(experiment_config, params)
def test_config_no_search_space(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters']
self.verify_integrity(experiment_config, params)
def test_config_search_space_name_only(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
for parameter in experiment_config['parameters']:
del parameter['type']
del parameter['bounds']
self.verify_integrity(experiment_config, params)
def test_config_detect_log_transformation(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'] = [dict(name='eta')]
xgb_experiment = parse_and_create_experiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['parameters'][0]['transformation'] == 'log'
def test_config_search_space_mixed(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters'][2]['type']
del experiment_config['parameters'][2]['bounds']
self.verify_integrity(experiment_config, params)
def test_config_search_space_wrong_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'][0]['type'] = 'int'
del experiment_config['parameters'][0]['bounds']
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_no_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
del experiment_config['parameters'][0]['type']
del experiment_config['parameters'][1]['type']
del experiment_config['parameters'][2]['type']
self.verify_integrity(experiment_config, params)
def test_config_search_space_categories_no_type(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist'],
)
)
self.verify_integrity(experiment_config, params)
def test_config_search_space_no_categorical_values(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
type='categorical',
)
)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_wrong_categories(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='tree_method',
type='categorical',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist', 'WrongCategory'],
)
)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_search_space_fake_categories(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['parameters'].append(
dict(
name='foo',
type='categorical',
categorical_values=['auto', 'exact', 'hist', 'gpu_hist', 'WrongCategory'],
)
)
self.verify_integrity(experiment_config, params)
def test_config_no_supported_bounds(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['parameters'].append(dict(name='max_leaves'))
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_autodetect_metric_from_objective(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
del experiment_config['metrics']
params = copy.deepcopy(PARAMS_BASE)
params['objective'] = 'binary:logistic'
xgb_experiment = parse_and_create_experiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_CLASSIFICATION_METRIC)
)
params['objective'] = 'multi:softmax'
xgb_experiment = parse_and_create_experiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_CLASSIFICATION_METRIC)
)
params['objective'] = 'reg:squarederror'
xgb_experiment = parse_and_create_experiment_config(experiment_config, params)
assert xgb_experiment.experiment_config_parsed['metrics'][0]['name'] == '-'.join(
(DEFAULT_EVALS_NAME, DEFAULT_REGRESSION_METRIC)
)
def test_config_metric_string_only(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
experiment_config['metrics'] = 'accuracy'
self.verify_integrity(experiment_config, params)
def test_config_metric_list(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'].append(dict(
name='f1',
strategy='store',
objective='maximize'
))
params = copy.deepcopy(PARAMS_BASE)
self.verify_integrity(experiment_config, params)
def test_config_param_defined_twice(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
params['eta'] = 0.1
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_num_boost_round_defined_twice(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
params = copy.deepcopy(PARAMS_BASE)
params['num_boost_round'] = 10
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_wrong_metric_string(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'] = 'NOT_A_METRIC_SUPPORTED'
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
def test_config_wrong_metric_list(self):
experiment_config = copy.deepcopy(EXPERIMENT_CONFIG_BASE)
experiment_config['metrics'][0]['name'] = 'NOT_A_METRIC_SUPPORTED'
params = copy.deepcopy(PARAMS_BASE)
with pytest.raises(ValueError):
self.verify_integrity(experiment_config, params)
| |
import numpy as np
class lattice():
"""Contains functions to help out calculate matrices
in the Fermi-Hubbard model"""
def __init__(self, xs,ys,zs):
self.x, self.y, self.z = np.mgrid[ 0:xs, 0:ys, 0:zs]
self.xs = xs
self.ys = ys
self.zs = zs
def show(self,spins):
for i in np.ravel(spins):
print "%d "%i,
print
def state(self,m):
# Each site can have 4 possible configurations, we have
# labeled them as follows:
#
# 0 = vacuum
# 1 = spin up
# 2 = spin down
# 3 = doubly occupied
#
spins = np.zeros_like( self.x)
i = 0
end = False
while m > 0:
if i>=spins.size:
end =True
break
spins.flat[i] = (m%4)
m = m /4
i = i +1
if end:
return None
else:
return spins
def sector(self):
# Calculates the spin sector for the current state
s = 0
for i in self.spins.flat:
if i == 0 : s = s+0
elif i == 1 : s = s+1
elif i == 2 : s = s-1
elif i == 3 : s = s+0
return s
def filling(self):
# Calculates the fillign for the current state
f = 0
for i in self.spins.flat:
if i == 0 : f = f+0
elif i == 1 : f = f+1
elif i == 2 : f = f+1
elif i == 3 : f = f+2
return f
def defstates(self):
'''This function calculates the half filling states of the
Fermi-Hubbard model in a 3D lattice'''
end = False
n = 0
self.states = {}
while n < 300:
self.spins = self.state(n)
# The condition onf this if specifies only HALF-FILLING states
if self.spins is not None and self.filling() == self.spins.size:
sec = self.sector()
if sec in self.states.keys():
self.states[ sec].append(self.spins)
else:
self.states[ sec]=[self.spins]
n = n+1
for k in self.states.keys():
print "Sector %d, %d states:"%(k,len(self.states[k]))
for spins in self.states[k]:
self.show(spins)
def nearest(self):
'''This function makes a list of the nearest neighbor
pairs in the lattice'''
print "\nNearest neighbors:"
sites = []
for i in range(self.x.size):
sites.append( (self.x.flat[i], self.y.flat[i], self.z.flat[i], i))
neighbors = []
for i,s1 in enumerate(sites):
for j,s2 in enumerate(sites):
if j > i:
d2 = (s1[0]-s2[0])**2 + (s1[1]-s2[1])**2 + (s1[2]-s2[2])**2
print s1,"--",s2," = ",d2
if d2 == 1:
neighbors.append( (s1[3],s2[3]))
print "Neighbor list: "
print neighbors
self.neighbors = neighbors
def kinetic0(self):
'''This function calculates the kinetic energy matrix
in the spin=0 sector'''
connected = [(0,3,1,2),\
(0,3,2,1),\
(3,0,1,2),\
(3,0,2,1),\
(1,2,0,3),\
(1,2,3,0),\
(2,1,0,3),\
(2,1,3,0)]
tsign = [ 1, -1, 1, -1, 1, 1, -1, -1]
print
msize = len(self.states[0])
kinetic = np.zeros((msize,msize))
for i,s1 in enumerate(self.states[0]):
for j,s2 in enumerate(self.states[0]):
for n in self.neighbors:
# Here we have two sites with two states, we will write them
# in a tuple as:
c = (s1.flat[n[0]], s1.flat[n[1]], s2.flat[n[0]], s2.flat[n[1]])
print "States %d,%d"%(i,j),"Neighbor pair ",n,\
" --> %d,%d and %d,%d"%c,
if c in connected:
kinetic[i,j] = kinetic[i,j] - tsign[ connected.index(c) ]
print " -t"
else:
print
print "\nKinetic energy matrix: ",kinetic.shape
print kinetic
self.kinetic = kinetic
def interaction0(self):
'''This fuction calculates the interaction energy matrix
in the spin=0 sector'''
print
msize = len(self.states[0])
inter = np.zeros((msize,msize))
# The basis we have chose is of number states,
# so the interaction energy is diagonal
for i,s1 in enumerate(self.states[0]):
for site in s1.flat:
if site == 3: # 3=double occupancy
inter[i,i] = inter[i,i] + 1
print "\nInteraction energy matrix:i ",inter.shape
print inter
self.inter = inter
def diagonal0(self):
'''This fuction calculates a diagonal matrix
in the spin=0 sector'''
print
msize = len(self.states[0])
diag = np.zeros((msize,msize))
# The basis we have chose is of number states,
# so the interaction energy is diagonal
for i,s1 in enumerate(self.states[0]):
for site in s1.flat:
diag[i,i] = 1.0
self.diag = diag
def latex(state):
out = r"$|"
for j,i in enumerate(np.ravel(state)):
if i == 0 : out+='0'
elif i == 1 : out+=r'\!\uparrow'
elif i == 2 : out+=r'\!\downarrow'
elif i == 3 : out+=r'\!\uparrow\! \downarrow'
if j+1< state.size:
out+=','
else:
out+= r'\rangle'
out+=r'$'
return out
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font',**{'family':'serif'})
if __name__=="__main__":
b = lattice(2,1,1)
b.defstates()
b.nearest()
b.kinetic0()
b.interaction0()
b.diagonal0()
np.savetxt('211_t.dat', b.kinetic, fmt='%01d')
np.savetxt('211_U.dat', b.inter, fmt='%01d')
t = 1.
U = np.concatenate( ( np.linspace(0.1,2.,12), np.linspace(2.0,10.,8)))
eva = []
eve = []
for u in U:
H = t*b.kinetic + u*b.inter
##print H
evals,evecs = np.linalg.eigh(H)
##print "U = ",u
##print evals
##print evecs
# Sort the eigenvals and eigenvecs
index = np.argsort(evals)
eva.append(evals[index])
# Ensure the eigenvecs have correct phase
vecs=[]
for i in index:
vec = evecs[:,index[i]]
#Find first entry that is non-zero
i = list(np.abs(vec) > 1e-5).index(True)
vec = vec / np.sign(vec[i])
vecs.append(vec)
vecs = np.transpose( np.array(vecs) )
eve.append(vecs)
#eve.append(evecs[index])
##print
##print evals[index]
##print evecs[index]
##print "#################"
##print index
##print
##for i in index:
## print "Eigenvalue %d = "%i, evals[index[i]]
## print "Eigenvector %d = "%i, evecs[:,index[i]]
## print "H*ev %d = "%i, np.dot(H, evecs[:,index[i]])
## #print np.dot(H, evecs[index[i]]) / evecs[index[i]]
## print
eva = np.array(eva)
eve = np.array(eve)
from matplotlib import rc
rc('font', **{'family':'serif'})
rc('text', usetex=True)
figure = plt.figure(figsize=(8.,4))
gs = matplotlib.gridspec.GridSpec( 2,11)
figure.suptitle('')
ax = plt.subplot( gs[0:2,0:5] )
ax0 = plt.subplot( gs[0,5:8] )
ax1 = plt.subplot( gs[1,5:8] )
ax2 = plt.subplot( gs[0,8:11] )
ax3 = plt.subplot( gs[1,8:11] )
axvs = [ax0,ax1,ax2,ax3]
c=['blue','green','red','black']
for col in range(eva.shape[1]):
ax.plot( U, eva[:,col], '-', c=c[col],lw=2.,\
label='%d'%col)
for i,axv in enumerate(axvs):
axv.plot( U, eve[:,i,col],\
'-',c=c[col],lw=1.5,\
label='%d'%col)
txts = ['a', 'b', 'c', 'd']
for i,axv in enumerate(axvs):
axv.set_ylabel( latex( b.states[0][i]), rotation=0 , labelpad=12)
axv.grid()
axv.set_ylim(-1,1.)
axv.text( 0.05, 0.02, txts[i], ha='left', va='bottom', transform=axv.transAxes)
statestext = [latex( b.states[0][i] )[1:-1] for i in range(4) ]
figure.text( 0.74, 0.94, r'$\psi = a\,%s + b\,%s + c\,%s + d\,%s$'% tuple(statestext ),\
ha='center', fontsize=16)
ax.grid()
ax.set_xlabel('$U/t$', fontsize=14)
ax.set_ylabel('$E/t$', fontsize=14)
ax.legend(loc='best',numpoints=1,\
prop={'size':10}, \
handlelength=1.1,handletextpad=0.5)
gs.tight_layout(figure, rect=[0,0.0,1.0,0.92])
outfile = 'Ut_eigenvalues_2site.png'
figure.savefig(outfile, dpi=250)
| |
import operator
import sys
from spec import Spec, skip, eq_, raises, assert_raises
from invoke.collection import Collection
from invoke.tasks import task, Task
from invoke.vendor import six
from invoke.vendor.six.moves import reduce
from _utils import load, support_path
@task
def _mytask():
six.print_("woo!")
def _func():
pass
class Collection_(Spec):
class init:
"__init__"
def can_accept_task_varargs(self):
"can accept tasks as *args"
@task
def task1():
pass
@task
def task2():
pass
c = Collection(task1, task2)
assert 'task1' in c
assert 'task2' in c
def can_accept_collections_as_varargs_too(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def kwargs_act_as_name_args_for_given_objects(self):
sub = Collection()
@task
def task1():
pass
ns = Collection(loltask=task1, notsub=sub)
eq_(ns['loltask'], task1)
eq_(ns.collections['notsub'], sub)
def initial_string_arg_acts_as_name(self):
sub = Collection('sub')
ns = Collection(sub)
eq_(ns.collections['sub'], sub)
def initial_string_arg_meshes_with_varargs_and_kwargs(self):
# Collection('myname', atask, acollection, othertask=taskobj, ...)
@task
def task1():
pass
@task
def task2():
pass
sub = Collection('sub')
ns = Collection('root', task1, sub, sometask=task2)
for x, y in (
(ns.name, 'root'),
(ns['task1'], task1),
(ns.collections['sub'], sub),
(ns['sometask'], task2),
):
eq_(x, y)
class useful_special_methods:
def _meh(self):
@task
def task1():
pass
@task
def task2():
pass
return Collection('meh', task1=task1, task2=task2)
def setup(self):
self.c = self._meh()
def repr_(self):
"__repr__"
eq_(repr(self.c), "<Collection 'meh': task1, task2>")
def equality_should_be_useful(self):
eq_(self.c, self._meh())
class from_module:
def setup(self):
self.c = Collection.from_module(load('integration'))
class parameters:
def setup(self):
self.mod = load('integration')
self.fm = Collection.from_module
def name_override(self):
eq_(self.fm(self.mod).name, 'integration')
eq_(
self.fm(self.mod, name='not-integration').name,
'not-integration'
)
def inline_configuration(self):
# No configuration given, none gotten
eq_(self.fm(self.mod).configuration(), {})
# Config kwarg given is reflected when config obtained
eq_(
self.fm(self.mod, config={'foo': 'bar'}).configuration(),
{'foo': 'bar'}
)
def name_and_config_simultaneously(self):
# Test w/ posargs to enforce ordering, just for safety.
c = self.fm(self.mod, 'the name', {'the': 'config'})
eq_(c.name, 'the name')
eq_(c.configuration(), {'the': 'config'})
def adds_tasks(self):
assert 'print_foo' in self.c
def derives_collection_name_from_module_name(self):
eq_(self.c.name, 'integration')
def submodule_names_are_stripped_to_last_chunk(self):
with support_path():
from package import module
c = Collection.from_module(module)
eq_(module.__name__, 'package.module')
eq_(c.name, 'module')
assert 'mytask' in c # Sanity
def honors_explicit_collections(self):
coll = Collection.from_module(load('explicit_root'))
assert 'top_level' in coll.tasks
assert 'sub' in coll.collections
# The real key test
assert 'sub_task' not in coll.tasks
def allows_tasks_with_explicit_names_to_override_bound_name(self):
coll = Collection.from_module(load('subcollection_task_name'))
assert 'explicit_name' in coll.tasks # not 'implicit_name'
def returns_unique_Collection_objects_for_same_input_module(self):
# Ignoring self.c for now, just in case it changes later.
# First, a module with no root NS
mod = load('integration')
c1 = Collection.from_module(mod)
c2 = Collection.from_module(mod)
assert c1 is not c2
# Now one *with* a root NS (which was previously buggy)
mod2 = load('explicit_root')
c3 = Collection.from_module(mod2)
c4 = Collection.from_module(mod2)
assert c3 is not c4
class explicit_root_ns:
def setup(self):
mod = load('explicit_root')
mod.ns.configure({'key': 'builtin', 'otherkey': 'yup'})
mod.ns.name = 'builtin_name'
self.unchanged = Collection.from_module(mod)
self.changed = Collection.from_module(
mod,
name='override_name',
config={'key': 'override'}
)
def inline_config_with_root_namespaces_overrides_builtin(self):
eq_(self.unchanged.configuration()['key'], 'builtin')
eq_(self.changed.configuration()['key'], 'override')
def inline_config_overrides_via_merge_not_replacement(self):
assert 'otherkey' in self.changed.configuration()
def inline_name_overrides_root_namespace_object_name(self):
eq_(self.unchanged.name, 'builtin_name')
eq_(self.changed.name, 'override_name')
def root_namespace_object_name_overrides_module_name(self):
# Duplicates part of previous test for explicitness' sake.
# I.e. proves that the name doesn't end up 'explicit_root'.
eq_(self.unchanged.name, 'builtin_name')
class add_task:
def setup(self):
self.c = Collection()
def associates_given_callable_with_given_name(self):
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def uses_function_name_as_implicit_name(self):
self.c.add_task(_mytask)
assert '_mytask' in self.c
def prefers_name_kwarg_over_task_name_attr(self):
self.c.add_task(Task(_func, name='notfunc'), name='yesfunc')
assert 'yesfunc' in self.c
assert 'notfunc' not in self.c
def prefers_task_name_attr_over_function_name(self):
self.c.add_task(Task(_func, name='notfunc'))
assert 'notfunc' in self.c
assert '_func' not in self.c
@raises(ValueError)
def raises_ValueError_if_no_name_found(self):
# Can't use a lambda here as they are technically real functions.
class Callable(object):
def __call__(self):
pass
self.c.add_task(Task(Callable()))
@raises(ValueError)
def raises_ValueError_on_multiple_defaults(self):
t1 = Task(_func, default=True)
t2 = Task(_func, default=True)
self.c.add_task(t1, 'foo')
self.c.add_task(t2, 'bar')
@raises(ValueError)
def raises_ValueError_if_task_added_mirrors_subcollection_name(self):
self.c.add_collection(Collection('sub'))
self.c.add_task(_mytask, 'sub')
def allows_specifying_task_defaultness(self):
self.c.add_task(_mytask, default=True)
eq_(self.c.default, '_mytask')
def specifying_default_False_overrides_task_setting(self):
@task(default=True)
def its_me():
pass
self.c.add_task(its_me, default=False)
eq_(self.c.default, None)
class add_collection:
def setup(self):
self.c = Collection()
def adds_collection_as_subcollection_of_self(self):
c2 = Collection('foo')
self.c.add_collection(c2)
assert 'foo' in self.c.collections
def can_take_module_objects(self):
self.c.add_collection(load('integration'))
assert 'integration' in self.c.collections
@raises(ValueError)
def raises_ValueError_if_collection_without_name(self):
# Aka non-root collections must either have an explicit name given
# via kwarg, have a name attribute set, or be a module with
# __name__ defined.
root = Collection()
sub = Collection()
root.add_collection(sub)
@raises(ValueError)
def raises_ValueError_if_collection_named_same_as_task(self):
self.c.add_task(_mytask, 'sub')
self.c.add_collection(Collection('sub'))
class getitem:
"__getitem__"
def setup(self):
self.c = Collection()
def finds_own_tasks_by_name(self):
# TODO: duplicates an add_task test above, fix?
self.c.add_task(_mytask, 'foo')
eq_(self.c['foo'], _mytask)
def finds_subcollection_tasks_by_dotted_name(self):
sub = Collection('sub')
sub.add_task(_mytask)
self.c.add_collection(sub)
eq_(self.c['sub._mytask'], _mytask)
def honors_aliases_in_own_tasks(self):
t = Task(_func, aliases=['bar'])
self.c.add_task(t, 'foo')
eq_(self.c['bar'], t)
def honors_subcollection_task_aliases(self):
self.c.add_collection(load('decorator'))
assert 'decorator.bar' in self.c
def honors_own_default_task_with_no_args(self):
t = Task(_func, default=True)
self.c.add_task(t)
eq_(self.c[''], t)
def honors_subcollection_default_tasks_on_subcollection_name(self):
sub = Collection.from_module(load('decorator'))
self.c.add_collection(sub)
# Sanity
assert self.c['decorator.biz'] is sub['biz']
# Real test
assert self.c['decorator'] is self.c['decorator.biz']
@raises(ValueError)
def raises_ValueError_for_no_name_and_no_default(self):
self.c['']
@raises(ValueError)
def ValueError_for_empty_subcol_task_name_and_no_default(self):
self.c.add_collection(Collection('whatever'))
self.c['whatever']
class to_contexts:
def setup(self):
@task
def mytask(text, boolean=False, number=5):
six.print_(text)
@task(aliases=['mytask27'])
def mytask2():
pass
@task(aliases=['othertask'], default=True)
def subtask():
pass
sub = Collection('sub', subtask)
self.c = Collection(mytask, mytask2, sub)
self.contexts = self.c.to_contexts()
alias_tups = [list(x.aliases) for x in self.contexts]
self.aliases = reduce(operator.add, alias_tups, [])
# Focus on 'mytask' as it has the more interesting sig
self.context = [x for x in self.contexts if x.name == 'mytask'][0]
def returns_iterable_of_Contexts_corresponding_to_tasks(self):
eq_(self.context.name, 'mytask')
eq_(len(self.contexts), 3)
def allows_flaglike_access_via_flags(self):
assert '--text' in self.context.flags
def positional_arglist_preserves_order_given(self):
@task(positional=('second', 'first'))
def mytask(first, second, third):
pass
c = Collection()
c.add_task(mytask)
ctx = c.to_contexts()[0]
eq_(ctx.positional_args, [ctx.args['second'], ctx.args['first']])
def exposes_namespaced_task_names(self):
assert 'sub.subtask' in [x.name for x in self.contexts]
def exposes_namespaced_task_aliases(self):
assert 'sub.othertask' in self.aliases
def exposes_subcollection_default_tasks(self):
assert 'sub' in self.aliases
def exposes_aliases(self):
assert 'mytask27' in self.aliases
class task_names:
def setup(self):
self.c = Collection.from_module(load('explicit_root'))
def returns_all_task_names_including_subtasks(self):
eq_(set(self.c.task_names.keys()), set(['top_level', 'sub.sub_task']))
def includes_aliases_and_defaults_as_values(self):
names = self.c.task_names
eq_(names['top_level'], ['othertop'])
eq_(names['sub.sub_task'], ['sub.othersub', 'sub'])
class configuration:
"Configuration methods"
def setup(self):
self.root = Collection()
self.task = Task(_func, name='task')
def basic_set_and_get(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def configure_performs_merging(self):
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration()['foo'], 'bar')
self.root.configure({'biz': 'baz'})
eq_(set(self.root.configuration().keys()), set(['foo', 'biz']))
def configure_allows_overwriting(self):
self.root.configure({'foo': 'one'})
eq_(self.root.configuration()['foo'], 'one')
self.root.configure({'foo': 'two'})
eq_(self.root.configuration()['foo'], 'two')
def call_returns_dict(self):
eq_(self.root.configuration(), {})
self.root.configure({'foo': 'bar'})
eq_(self.root.configuration(), {'foo': 'bar'})
def access_merges_from_subcollections(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'bar'})
self.root.configure({'biz': 'baz'})
# With no inner collection
eq_(set(self.root.configuration().keys()), set(['biz']))
# With inner collection
self.root.add_collection(inner)
eq_(
set(self.root.configuration('inner.task').keys()),
set(['foo', 'biz'])
)
def parents_overwrite_children_in_path(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'inner'})
self.root.add_collection(inner)
# Before updating root collection's config, reflects inner
eq_(self.root.configuration('inner.task')['foo'], 'inner')
self.root.configure({'foo': 'outer'})
# After, reflects outer (since that now overrides)
eq_(self.root.configuration('inner.task')['foo'], 'outer')
def sibling_subcollections_ignored(self):
inner = Collection('inner', self.task)
inner.configure({'foo': 'hi there'})
inner2 = Collection('inner2', Task(_func, name='task2'))
inner2.configure({'foo': 'nope'})
root = Collection(inner, inner2)
eq_(root.configuration('inner.task')['foo'], 'hi there')
eq_(root.configuration('inner2.task2')['foo'], 'nope')
def subcollection_paths_may_be_dotted(self):
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
def invalid_subcollection_paths_result_in_KeyError(self):
# Straight up invalid
assert_raises(KeyError,
Collection('meh').configuration,
'nope.task'
)
# Exists but wrong level (should be 'root.task', not just
# 'task')
inner = Collection('inner', self.task)
assert_raises(KeyError,
Collection('root', inner).configuration, 'task')
def keys_dont_have_to_exist_in_full_path(self):
# Kinda duplicates earlier stuff; meh
# Key only stored on leaf
leaf = Collection('leaf', self.task)
leaf.configure({'key': 'leaf-value'})
middle = Collection('middle', leaf)
root = Collection('root', middle)
eq_(root.configuration('middle.leaf.task'), {'key': 'leaf-value'})
# Key stored on mid + leaf but not root
middle.configure({'key': 'whoa'})
eq_(root.configuration('middle.leaf.task'), {'key': 'whoa'})
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import copy
import frappe
import frappe.share
from frappe import _, msgprint
from frappe.utils import cint
from frappe.query_builder import DocType
rights = ("select", "read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions", "share")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def print_has_permission_check_logs(func):
def inner(*args, **kwargs):
frappe.flags['has_permission_check_logs'] = []
result = func(*args, **kwargs)
self_perm_check = True if not kwargs.get('user') else kwargs.get('user') == frappe.session.user
raise_exception = False if kwargs.get('raise_exception') is False else True
# print only if access denied
# and if user is checking his own permission
if not result and self_perm_check and raise_exception:
msgprint(('<br>').join(frappe.flags.get('has_permission_check_logs', [])))
frappe.flags.pop('has_permission_check_logs', None)
return result
return inner
@print_has_permission_check_logs
def has_permission(doctype, ptype="read", doc=None, verbose=False, user=None, raise_exception=True, parent_doctype=None):
"""Returns True if user has permission `ptype` for given `doctype`.
If `doc` is passed, it also checks user, share and owner permissions.
Note: if Table DocType is passed, it always returns True.
"""
if not user: user = frappe.session.user
if not doc and hasattr(doctype, 'doctype'):
# first argument can be doc or doctype
doc = doctype
doctype = doc.doctype
if user == "Administrator":
return True
if frappe.is_table(doctype):
return has_child_table_permission(doctype, ptype, doc, verbose,
user, raise_exception, parent_doctype)
meta = frappe.get_meta(doctype)
if doc:
if isinstance(doc, str):
doc = frappe.get_doc(meta.name, doc)
perm = get_doc_permissions(doc, user=user, ptype=ptype).get(ptype)
if not perm: push_perm_check_log(_('User {0} does not have access to this document').format(frappe.bold(user)))
else:
if ptype=="submit" and not cint(meta.is_submittable):
push_perm_check_log(_("Document Type is not submittable"))
return False
if ptype=="import" and not cint(meta.allow_import):
push_perm_check_log(_("Document Type is not importable"))
return False
role_permissions = get_role_permissions(meta, user=user)
perm = role_permissions.get(ptype)
if not perm:
push_perm_check_log(_('User {0} does not have doctype access via role permission for document {1}').format(frappe.bold(user), frappe.bold(doctype)))
def false_if_not_shared():
if ptype in ("read", "write", "share", "submit", "email", "print"):
shared = frappe.share.get_shared(doctype, user,
["read" if ptype in ("email", "print") else ptype])
if doc:
doc_name = get_doc_name(doc)
if doc_name in shared:
if ptype in ("read", "write", "share", "submit") or meta.permissions[0].get(ptype):
return True
elif shared:
# if atleast one shared doc of that type, then return True
# this is used in db_query to check if permission on DocType
return True
return False
if not perm:
perm = false_if_not_shared()
return bool(perm)
def get_doc_permissions(doc, user=None, ptype=None):
"""Returns a dict of evaluated permissions for given `doc` like `{"read":1, "write":1}`"""
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype): return {"read": 1, "write": 1}
meta = frappe.get_meta(doc.doctype)
def is_user_owner():
return (doc.get("owner") or "").lower() == user.lower()
if has_controller_permissions(doc, ptype, user=user) is False:
push_perm_check_log('Not allowed via controller permission check')
return {ptype: 0}
permissions = copy.deepcopy(get_role_permissions(meta, user=user, is_owner=is_user_owner()))
if not cint(meta.is_submittable):
permissions["submit"] = 0
if not cint(meta.allow_import):
permissions["import"] = 0
# Override with `if_owner` perms irrespective of user
if permissions.get('has_if_owner_enabled'):
# apply owner permissions on top of existing permissions
# some access might be only for the owner
# eg. everyone might have read access but only owner can delete
permissions.update(permissions.get("if_owner", {}))
if not has_user_permission(doc, user):
if is_user_owner():
# replace with owner permissions
permissions = permissions.get("if_owner", {})
# if_owner does not come with create rights...
permissions['create'] = 0
else:
permissions = {}
return permissions
def get_role_permissions(doctype_meta, user=None, is_owner=None):
"""
Returns dict of evaluated role permissions like
{
"read": 1,
"write": 0,
// if "if_owner" is enabled
"if_owner":
{
"read": 1,
"write": 0
}
}
"""
if isinstance(doctype_meta, str):
doctype_meta = frappe.get_meta(doctype_meta) # assuming doctype name was passed
if not user: user = frappe.session.user
cache_key = (doctype_meta.name, user)
if user == 'Administrator':
return allow_everything()
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict(
if_owner={}
)
roles = frappe.get_roles(user)
def is_perm_applicable(perm):
return perm.role in roles and cint(perm.permlevel)==0
def has_permission_without_if_owner_enabled(ptype):
return any(p.get(ptype, 0) and not p.get('if_owner', 0) for p in applicable_permissions)
applicable_permissions = list(filter(is_perm_applicable, getattr(doctype_meta, 'permissions', [])))
has_if_owner_enabled = any(p.get('if_owner', 0) for p in applicable_permissions)
perms['has_if_owner_enabled'] = has_if_owner_enabled
for ptype in rights:
pvalue = any(p.get(ptype, 0) for p in applicable_permissions)
# check if any perm object allows perm type
perms[ptype] = cint(pvalue)
if (
pvalue
and has_if_owner_enabled
and not has_permission_without_if_owner_enabled(ptype)
and ptype != 'create'
):
perms['if_owner'][ptype] = cint(pvalue and is_owner)
# has no access if not owner
# only provide select or read access so that user is able to at-least access list
# (and the documents will be filtered based on owner sin further checks)
perms[ptype] = 1 if ptype in ('select', 'read') else 0
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def get_user_permissions(user):
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
return get_user_permissions(user)
def has_user_permission(doc, user=None):
'''Returns True if User is allowed to view considering User Permissions'''
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
user_permissions = get_user_permissions(user)
if not user_permissions:
# no user permission rules specified for this doctype
return True
# user can create own role permissions, so nothing applies
if get_role_permissions('User Permission', user=user).get('write'):
return True
apply_strict_user_permissions = frappe.get_system_settings('apply_strict_user_permissions')
doctype = doc.get('doctype')
docname = doc.get('name')
# STEP 1: ---------------------
# check user permissions on self
if doctype in user_permissions:
allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(doctype, []), doctype)
# if allowed_docs is empty it states that there is no applicable permission under the current doctype
# only check if allowed_docs is not empty
if allowed_docs and docname not in allowed_docs:
# no user permissions for this doc specified
push_perm_check_log(_('Not allowed for {0}: {1}').format(_(doctype), docname))
return False
# STEP 2: ---------------------------------
# check user permissions in all link fields
def check_user_permission_on_link_fields(d):
# check user permissions for all the link fields of the given
# document object d
#
# called for both parent and child records
meta = frappe.get_meta(d.get("doctype"))
# check all link fields for user permissions
for field in meta.get_link_fields():
if field.ignore_user_permissions: continue
# empty value, do you still want to apply user permissions?
if not d.get(field.fieldname) and not apply_strict_user_permissions:
# nah, not strict
continue
if field.options not in user_permissions:
continue
# get the list of all allowed values for this link
allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(field.options, []), doctype)
if allowed_docs and d.get(field.fieldname) not in allowed_docs:
# restricted for this link field, and no matching values found
# make the right message and exit
if d.get('parentfield'):
# "Not allowed for Company = Restricted Company in Row 3. Restricted field: reference_type"
msg = _('Not allowed for {0}: {1} in Row {2}. Restricted field: {3}').format(
_(field.options), d.get(field.fieldname), d.idx, field.fieldname)
else:
# "Not allowed for Company = Restricted Company. Restricted field: reference_type"
msg = _('Not allowed for {0}: {1}. Restricted field: {2}').format(
_(field.options), d.get(field.fieldname), field.fieldname)
push_perm_check_log(msg)
return False
return True
if not check_user_permission_on_link_fields(doc):
return False
for d in doc.get_all_children():
if not check_user_permission_on_link_fields(d):
return False
return True
def has_controller_permissions(doc, ptype, user=None):
"""Returns controller permissions if defined. None if not defined"""
if not user: user = frappe.session.user
methods = frappe.get_hooks("has_permission").get(doc.doctype, [])
if not methods:
return None
for method in reversed(methods):
controller_permission = frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user)
if controller_permission is not None:
return controller_permission
# controller permissions could not decide on True or False
return None
def get_doctypes_with_read():
return list({p.parent if type(p.parent) == str else p.parent.encode('UTF8') for p in get_valid_perms()})
def get_valid_perms(doctype=None, user=None):
'''Get valid permissions for the current user from DocPerm and Custom DocPerm'''
roles = get_roles(user)
perms = get_perms_for(roles)
custom_perms = get_perms_for(roles, 'Custom DocPerm')
doctypes_with_custom_perms = get_doctypes_with_custom_docperms()
for p in perms:
if not p.parent in doctypes_with_custom_perms:
custom_perms.append(p)
if doctype:
return [p for p in custom_perms if p.parent == doctype]
else:
return custom_perms
def get_all_perms(role):
'''Returns valid permissions for a given role'''
perms = frappe.get_all('DocPerm', fields='*', filters=dict(role=role))
custom_perms = frappe.get_all('Custom DocPerm', fields='*', filters=dict(role=role))
doctypes_with_custom_perms = frappe.get_all("Custom DocPerm", pluck="parent", distinct=True)
for p in perms:
if p.parent not in doctypes_with_custom_perms:
custom_perms.append(p)
return custom_perms
def get_roles(user=None, with_standard=True):
"""get roles of current user"""
if not user:
user = frappe.session.user
if user=='Guest':
return ['Guest']
def get():
if user == 'Administrator':
return frappe.get_all("Role", pluck="name") # return all available roles
else:
table = DocType("Has Role")
roles = frappe.qb.from_(table).where(
(table.parent == user) & (table.role.notin(["All", "Guest"]))
).select(table.role).run(pluck=True)
return roles + ['All', 'Guest']
roles = frappe.cache().hget("roles", user, get)
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
def get_doctype_roles(doctype, access_type="read"):
"""Returns a list of roles that are allowed to access passed doctype."""
meta = frappe.get_meta(doctype)
return [d.role for d in meta.get("permissions") if d.get(access_type)]
def get_perms_for(roles, perm_doctype='DocPerm'):
'''Get perms for given roles'''
filters = {
'permlevel': 0,
'docstatus': 0,
'role': ['in', roles]
}
return frappe.db.get_all(perm_doctype, fields=['*'], filters=filters)
def get_doctypes_with_custom_docperms():
'''Returns all the doctypes with Custom Docperms'''
doctypes = frappe.db.get_all('Custom DocPerm', fields=['parent'], distinct=1)
return [d.parent for d in doctypes]
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if frappe.session.user == "Administrator" or "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user)
def add_user_permission(doctype, name, user, ignore_permissions=False, applicable_for=None,
is_default=0, hide_descendants=0):
'''Add user permission'''
from frappe.core.doctype.user_permission.user_permission import user_permission_exists
if not user_permission_exists(user, doctype, name, applicable_for):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.get_doc(dict(
doctype='User Permission',
user=user,
allow=doctype,
for_value=name,
is_default=is_default,
applicable_for=applicable_for,
hide_descendants=hide_descendants,
)).insert(ignore_permissions=ignore_permissions)
def remove_user_permission(doctype, name, user):
user_permission_name = frappe.db.get_value('User Permission',
dict(user=user, allow=doctype, for_value=name))
frappe.delete_doc('User Permission', user_permission_name)
def clear_user_permissions_for_doctype(doctype, user=None):
filters = {'allow': doctype}
if user:
filters['user'] = user
user_permissions_for_doctype = frappe.db.get_all('User Permission', filters=filters)
for d in user_permissions_for_doctype:
frappe.delete_doc('User Permission', d.name)
def can_import(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "import")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to import: {doctype}".format(doctype=doctype))
else:
return False
return True
def can_export(doctype, raise_exception=False):
if "System Manager" in frappe.get_roles():
return True
else:
role_permissions = frappe.permissions.get_role_permissions(doctype)
has_access = role_permissions.get('export') or \
role_permissions.get('if_owner').get('export')
if not has_access and raise_exception:
raise frappe.PermissionError(_("You are not allowed to export {} doctype").format(doctype))
return has_access
def update_permission_property(doctype, role, permlevel, ptype, value=None, validate=True):
'''Update a property in Custom Perm'''
from frappe.core.doctype.doctype.doctype import validate_permissions_for_doctype
out = setup_custom_perms(doctype)
name = frappe.get_value('Custom DocPerm', dict(parent=doctype, role=role,
permlevel=permlevel))
table = DocType("Custom DocPerm")
frappe.qb.update(table).set(ptype, value).where(table.name == name).run()
if validate:
validate_permissions_for_doctype(doctype)
return out
def setup_custom_perms(parent):
'''if custom permssions are not setup for the current doctype, set them up'''
if not frappe.db.exists('Custom DocPerm', dict(parent=parent)):
copy_perms(parent)
return True
def add_permission(doctype, role, permlevel=0, ptype=None):
'''Add a new permission rule to the given doctype
for the given Role and Permission Level'''
from frappe.core.doctype.doctype.doctype import validate_permissions_for_doctype
setup_custom_perms(doctype)
if frappe.db.get_value('Custom DocPerm', dict(parent=doctype, role=role,
permlevel=permlevel, if_owner=0)):
return
if not ptype:
ptype = 'read'
custom_docperm = frappe.get_doc({
"doctype":"Custom DocPerm",
"__islocal": 1,
"parent": doctype,
"parenttype": "DocType",
"parentfield": "permissions",
"role": role,
"permlevel": permlevel,
ptype: 1,
})
custom_docperm.save()
validate_permissions_for_doctype(doctype)
return custom_docperm.name
def copy_perms(parent):
'''Copy all DocPerm in to Custom DocPerm for the given document'''
for d in frappe.get_all('DocPerm', fields='*', filters=dict(parent=parent)):
custom_perm = frappe.new_doc('Custom DocPerm')
custom_perm.update(d)
custom_perm.insert(ignore_permissions=True)
def reset_perms(doctype):
"""Reset permissions for given doctype."""
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
frappe.db.delete("Custom DocPerm", {"parent": doctype})
def get_linked_doctypes(dt):
return list(set([dt] + [d.options for d in
frappe.get_meta(dt).get("fields", {
"fieldtype":"Link",
"ignore_user_permissions":("!=", 1),
"options": ("!=", "[Select]")
})
]))
def get_doc_name(doc):
if not doc: return None
return doc if isinstance(doc, str) else doc.name
def allow_everything():
'''
returns a dict with access to everything
eg. {"read": 1, "write": 1, ...}
'''
perm = {ptype: 1 for ptype in rights}
return perm
def get_allowed_docs_for_doctype(user_permissions, doctype):
''' Returns all the docs from the passed user_permissions that are
allowed under provided doctype '''
return filter_allowed_docs_for_doctype(user_permissions, doctype, with_default_doc=False)
def filter_allowed_docs_for_doctype(user_permissions, doctype, with_default_doc=True):
''' Returns all the docs from the passed user_permissions that are
allowed under provided doctype along with default doc value if with_default_doc is set '''
allowed_doc = []
default_doc = None
for doc in user_permissions:
if not doc.get('applicable_for') or doc.get('applicable_for') == doctype:
allowed_doc.append(doc.get('doc'))
if doc.get('is_default') or len(user_permissions) == 1 and with_default_doc:
default_doc = doc.get('doc')
return (allowed_doc, default_doc) if with_default_doc else allowed_doc
def push_perm_check_log(log):
if frappe.flags.get('has_permission_check_logs') is None:
return
frappe.flags.get('has_permission_check_logs').append(_(log))
def has_child_table_permission(child_doctype, ptype="read", child_doc=None,
verbose=False, user=None, raise_exception=True, parent_doctype=None):
parent_doc = None
if child_doc:
parent_doctype = child_doc.get("parenttype")
parent_doc = frappe.get_cached_doc({
"doctype": parent_doctype,
"docname": child_doc.get("parent")
})
if parent_doctype:
if not is_parent_valid(child_doctype, parent_doctype):
frappe.throw(_("{0} is not a valid parent DocType for {1}").format(
frappe.bold(parent_doctype),
frappe.bold(child_doctype)
), title=_("Invalid Parent DocType"))
else:
frappe.throw(_("Please specify a valid parent DocType for {0}").format(
frappe.bold(child_doctype)
), title=_("Parent DocType Required"))
return has_permission(parent_doctype, ptype=ptype, doc=parent_doc,
verbose=verbose, user=user, raise_exception=raise_exception)
def is_parent_valid(child_doctype, parent_doctype):
from frappe.core.utils import find
parent_meta = frappe.get_meta(parent_doctype)
child_table_field_exists = find(parent_meta.get_table_fields(), lambda d: d.options == child_doctype)
return not parent_meta.istable and child_table_field_exists
| |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/process -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_process
short_description: Module to process openshift templates
description:
- Process openshift templates programmatically.
options:
state:
description:
- State has a few different meanings when it comes to process.
- state: present - This state runs an `oc process <template>`. When used in
- conjunction with 'create: True' the process will be piped to | oc create -f
- state: absent - will remove a template
- state: list - will perform an `oc get template <template_name>`
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
template_name:
description:
- Name of the openshift template that is being processed.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the template lives.
required: false
default: default
aliases: []
content:
description:
- Template content that will be processed.
required: false
default: None
aliases: []
params:
description:
- A list of parameters that will be inserted into the template.
required: false
default: None
aliases: []
create:
description:
- Whether or not to create the template after being processed. e.g. oc process | oc create -f -
required: False
default: False
aliases: []
reconcile:
description:
- Whether or not to attempt to determine if there are updates or changes in the incoming template.
default: true
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: process the cloud volume provisioner template with variables
oc_process:
namespace: openshift-infra
template_name: online-volume-provisioner
create: True
params:
PLAT: rhel7
register: processout
run_once: true
- debug: var=processout
'''
# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_process.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = tname
self.data = tdata
self.params = params
self.create = create
self._template = None
@property
def template(self):
'''template property'''
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s].' % self.name)
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocprocess = OCProcess(params['namespace'],
params['template_name'],
params['params'],
params['create'],
kubeconfig=params['kubeconfig'],
tdata=params['content'],
verbose=params['debug'])
state = params['state']
api_rval = ocprocess.get()
if state == 'list':
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
return {"changed" : False, "results": api_rval, "state": state}
elif state == 'present':
if check_mode and params['create']:
return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."}
if not ocprocess.exists() or not params['reconcile']:
#FIXME: this code will never get run in a way that succeeds when
# module.params['reconcile'] is true. Because oc_process doesn't
# create the actual template, the check of ocprocess.exists()
# is meaningless. Either it's already here and this code
# won't be run, or this code will fail because there is no
# template available for oc process to use. Have we conflated
# the template's existence with the existence of the objects
# it describes?
# Create it here
api_rval = ocprocess.process()
if api_rval['returncode'] != 0:
return {"failed": True, "msg": api_rval}
if params['create']:
return {"changed": True, "results": api_rval, "state": state}
return {"changed": False, "results": api_rval, "state": state}
# verify results
update = False
rval = []
all_results = ocprocess.needs_update()
for obj, status in all_results:
if status:
ocprocess.delete(obj)
results = ocprocess.create_obj(obj)
results['kind'] = obj['kind']
rval.append(results)
update = True
if not update:
return {"changed": update, "results": api_rval, "state": state}
for cmd in rval:
if cmd['returncode'] != 0:
return {"failed": True, "changed": update, "msg": rval, "state": state}
return {"changed": update, "results": rval, "state": state}
# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_process.py -*- -*- -*-
def main():
'''
ansible oc module for processing templates
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
template_name=dict(default=None, type='str'),
content=dict(default=None, type='str'),
params=dict(default=None, type='dict'),
create=dict(default=False, type='bool'),
reconcile=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
rval = OCProcess.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_process.py -*- -*- -*-
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_profile_sip
short_description: Manage SIP profiles on a BIG-IP
description:
- Manage SIP profiles on a BIG-IP system.
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the SIP profile to manage.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(sip) profile.
type: str
community:
description:
- When the C(dialog_aware) is C(yes) and the configuration requires multiple SIP virtual server-profile pairings,
this string value indicates whether the pair belongs to the same SIP proxy functional group.
type: str
description:
description:
- Description of the profile.
- To remove the entry completely, set a value of C('').
type: str
dialog_aware:
description:
- When C(yes), the system gathers SIP dialog information and automatically forwards SIP messages belonging to
the known SIP dialog.
type: bool
enable_sip_firewall:
description:
- Specifies whether the Advanced Firewall Manager (AFM) policy is enabled.
- When C(yes), the SIP Security settings configured in the DoS Profile in AFM apply to the virtual servers that
use this profile.
type: bool
insert_record_route_header:
description:
- When C(yes), inserts a Record-Route SIP header, which indicates the next hop for the following SIP request
messages.
type: bool
insert_via_header:
description:
- When C(yes), inserts a Via header in the forwarded SIP request.
- Via headers indicate the path taken through proxy devices and transports used. The response message uses this
routing information.
type: bool
user_via_header:
description:
- When C(insert_via_header) is C(yes), specifies the Via value the system inserts as the top Via header in a
SIP REQUEST message.
- "The valid value must include SIP protocol and sent_by settings, for example: C(SIP/2.0/UDP 10.10.10.10:5060)."
- To remove the entry completely, set a value of C('').
type: str
log_profile:
description:
- Specifies the logging settings the publisher uses to send log messages.
- The format of the name can be either be prepended by partition (C(/Common/foo)), or specified
just as an object name (C(foo)).
- To remove the entry. set a value of C(''), however the profile C(log_publisher)
must also be set as C('').
type: str
log_publisher:
description:
- Specifies the publisher defined to log messages.
- Format of the name can be either be prepended by partition (C(/Common/foo)), or specified
just as an object name (C(foo)).
- To remove the entry. set a value of C(''), however the profile C(log_profile)
must also be set as C('').
type: str
secure_via_header:
description:
- When checked (enabled), inserts a secure Via header in the forwarded SIP request.
- A secure Via header indicates where the message originated.
- This parameter causes the inserted Via header to specify Transport Layer Security. For this option to take
effect, C(insert_via_header) must be set to (yes).
type: bool
security:
description:
- "When C(yes). enables the use of enhanced Horizontal Security Layer (HSL) security checking."
type: bool
terminate_on_bye:
description:
- When C(yes), closes a connection when a BYE transaction finishes.
- A BYE transaction is a message an application sends to another application when it is ready to close the
connection between the two.
type: bool
max_size:
description:
- Specifies the maximum SIP message size that the BIG-IP system accepts.
- The accepted value range is C(0 - 4294967295) bytes.
type: int
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a SIP profile
bigip_profile_sip:
name: foo
parent: sip
log_profile: alg_log
log_publisher: foo-publisher
description: this is a new profile
security: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Update SIP profile
bigip_profile_sip:
name: foo
insert_record_route_header: yes
enable_sip_firewall: yes
insert_via_header: yes
user_via_header: "SIP/2.0/UDP 10.10.10.10:5060"
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Delete a SIP profile
bigip_profile_sip:
name: foo
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: Description of the profile.
returned: changed
type: str
sample: "custom description"
community:
description: Indicates whether the pair belongs to the same SIP proxy functional group.
returned: changed
type: str
sample: foo_community
parent:
description: Specifies the profile from which this profile inherits settings.
returned: changed
type: str
sample: /Common/sip
dialog_aware:
description: Specifies if the system gathers SIP dialog information.
returned: changed
type: bool
sample: no
enable_sip_firewall:
description: Specifies whether the Advanced Firewall Manager policy is enabled.
returned: changed
type: bool
sample: yes
insert_record_route_header:
description: Specifies if the system will insert a Record-Route SIP header.
returned: changed
type: bool
sample: yes
insert_via_header:
description: Specifies if the system will insert a Via header in the forwarded SIP request.
returned: changed
type: bool
sample: yes
user_via_header:
description: The value the system inserts as the top Via header in a SIP REQUEST message.
returned: changed
type: str
sample: "SIP/2.0/UDP 10.10.10.10:5060"
log_profile:
description: The logging settings the publisher uses to send log messages.
returned: changed
type: str
sample: "/Common/alg_profile"
log_publisher:
description: The publisher defined to log messages.
returned: changed
type: str
sample: "/Common/foo_publisher"
secure_via_header:
description: Specifies if the system will insert a secure Via header in the forwarded SIP request.
returned: changed
type: bool
sample: no
security:
description: Enables the use of enhanced Horizontal Security Layer security checking.
returned: changed
type: bool
sample: yes
terminate_on_bye:
description: Specifies if the system will close a connection when a BYE transaction finishes.
returned: changed
type: bool
sample: no
max_size:
description: Specifies if the system will close a connection when a BYE transaction finishes.
returned: changed
type: bool
sample: no
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec, flatten_boolean, fq_name
)
from ..module_utils.icontrol import tmos_version
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'dialogAware': 'dialog_aware',
'enableSipFirewall': 'enable_sip_firewall',
'insertRecordRouteHeader': 'insert_record_route_header',
'insertViaHeader': 'insert_via_header',
'userViaHeader': 'user_via_header',
'logProfile': 'log_profile',
'logPublisher': 'log_publisher',
'secureViaHeader': 'secure_via_header',
'terminateOnBye': 'terminate_on_bye',
'maxSize': 'max_size',
}
api_attributes = [
'community',
'description',
'defaultsFrom',
'dialogAware',
'enableSipFirewall',
'insertRecordRouteHeader',
'insertViaHeader',
'logProfile',
'logPublisher',
'secureViaHeader',
'security',
'terminateOnBye',
'userViaHeader',
'maxSize',
]
returnables = [
'description',
'community',
'parent',
'dialog_aware',
'enable_sip_firewall',
'insert_record_route_header',
'insert_via_header',
'user_via_header',
'log_profile',
'log_publisher',
'secure_via_header',
'security',
'terminate_on_bye',
'max_size',
]
updatables = [
'description',
'community',
'parent',
'dialog_aware',
'enable_sip_firewall',
'insert_record_route_header',
'insert_via_header',
'user_via_header',
'log_profile',
'log_publisher',
'secure_via_header',
'security',
'terminate_on_bye',
'max_size',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def security(self):
if self._values['security'] is None:
return None
result = flatten_boolean(self._values['security'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def dialog_aware(self):
if self._values['dialog_aware'] is None:
return None
result = flatten_boolean(self._values['dialog_aware'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def enable_sip_firewall(self):
if self._values['enable_sip_firewall'] is None:
return None
result = flatten_boolean(self._values['enable_sip_firewall'])
return result
@property
def insert_via_header(self):
if self._values['insert_via_header'] is None:
return None
result = flatten_boolean(self._values['insert_via_header'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def secure_via_header(self):
if self._values['secure_via_header'] is None:
return None
result = flatten_boolean(self._values['secure_via_header'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def terminate_on_bye(self):
if self._values['terminate_on_bye'] is None:
return None
result = flatten_boolean(self._values['terminate_on_bye'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def insert_record_route_header(self):
if self._values['insert_record_route_header'] is None:
return None
result = flatten_boolean(self._values['insert_record_route_header'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
@property
def max_size(self):
if self._values['max_size'] is None:
return None
if 0 <= self._values['max_size'] <= 4294967295:
return self._values['max_size']
raise F5ModuleError(
"Valid 'max_size' must be in range 0 - 4294967295 bytes."
)
@property
def log_profile(self):
if self._values['log_profile'] in [None, '']:
return self._values['log_profile']
result = fq_name(self.partition, self._values['log_profile'])
return result
@property
def log_publisher(self):
if self._values['log_publisher'] in [None, '']:
return self._values['log_publisher']
result = fq_name(self.partition, self._values['log_publisher'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def security(self):
result = flatten_boolean(self._values['security'])
return result
@property
def dialog_aware(self):
result = flatten_boolean(self._values['dialog_aware'])
return result
@property
def enable_sip_firewall(self):
result = flatten_boolean(self._values['enable_sip_firewall'])
return result
@property
def insert_via_header(self):
result = flatten_boolean(self._values['insert_via_header'])
return result
@property
def terminate_on_bye(self):
result = flatten_boolean(self._values['terminate_on_bye'])
return result
@property
def insert_record_route_header(self):
result = flatten_boolean(self._values['insert_record_route_header'])
return result
@property
def secure_via_header(self):
result = flatten_boolean(self._values['secure_via_header'])
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def description(self):
if self.want.description is None:
return None
if self.want.description == '':
if self.have.description in [None, "none"]:
return None
if self.want.description != self.have.description:
return self.want.description
@property
def user_via_header(self):
if self.want.user_via_header is None:
return None
if self.want.user_via_header == '':
if self.have.user_via_header in [None, "none"]:
return None
if self.want.user_via_header != self.have.user_via_header:
return self.want.user_via_header
@property
def log_profile(self):
if self.want.log_profile is None:
return None
if self.want.log_profile == '':
if self.have.log_profile in [None, "none"]:
return None
if self.want.log_profile != self.have.log_profile:
return self.want.log_profile
@property
def log_publisher(self):
if self.want.log_publisher is None:
return None
if self.want.log_publisher == '':
if self.have.log_publisher in [None, "none"]:
return None
if self.want.log_publisher != self.have.log_publisher:
return self.want.log_publisher
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/sip/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
security=dict(type='bool'),
description=dict(),
community=dict(),
dialog_aware=dict(type='bool'),
enable_sip_firewall=dict(type='bool'),
insert_via_header=dict(type='bool'),
user_via_header=dict(),
secure_via_header=dict(type='bool'),
terminate_on_bye=dict(type='bool'),
max_size=dict(type='int'),
log_profile=dict(),
log_publisher=dict(),
insert_record_route_header=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| |
#Author: Aaron Graubert agraubert@wustl.edu
import random
import traceback
from copy import copy
random.seed()
class bracket:
def __init__(self, left, right, victor):
self.left=left
self.right=right
self.victor=victor
self.mapped=False
self.mappedTo=None
self.num=0
self.status=0
self.generation=0
def mapTo(self, target, used=set(), gen=0):
number=0
self.generation=gen
target.generation=gen
while(number in used):
number=random.randint(1,10000)
used=used|set([number])
self.num=number
target.num=number
self.mapped=True
self.mappedTo=target
target.mapped=True
target.mappedTo=self
if(type(self.left)==bracket):
used=used|self.left.mapTo(target.left, used, gen+1)
if(type(self.right)==bracket):
used=used|self.right.mapTo(target.right, used, gen+1)
return used
def nodes(self, indecies=set()):
if(not self.mapped):
return
indecies=indecies|set([self.num])
if(type(self.left)==bracket):
indecies=indecies|self.left.nodes(indecies)
if(type(self.right)==bracket):
indecies=indecies|self.right.nodes(indecies)
return indecies
def dump(self):
clone=copy(self)
indecies=list(self.mapTo(clone))
for item in indecies:
target=clone.fetch(item)
target.leftnum=target.left if(type(target.left)==str) else target.left.num
target.rightnum=target.right if(type(target.right)==str) else target.right.num
target.vicnum=target.victor if(type(target.victor)==str) else target.victor.num
output=""
for item in indecies:
target=clone.fetch(item)
output+=str(target.leftnum)+"::"+str(target.rightnum)+"::"+str(target.vicnum)+"::"+str(item)+"\n"
self.unmap()
return output.rstrip()
def unmap(self):
if(not self.mapped):
return
self.mapped=False
if(type(self.left)==bracket):
self.left.unmap()
if(type(self.right)==bracket):
self.right.unmap()
self.mappedTo.mapped=False
self.mappedTo.mappedTo=None
self.mappedTo=None
def correctness(self, key):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
for item in indecies:
target=self.fetch(item)
if(target.status==0):
target.status=target.recursiveUpdate()
self.unmap()
clone=copy(self)
indecies=list(self.mapTo(clone))
for item in indecies:
target=clone.fetch(item)
target.leftnum=target.left if(type(target.left)==str) else target.left.num
target.rightnum=target.right if(type(target.right)==str) else target.right.num
target.vicnum=target.victor if(type(target.victor)==str) else target.victor.num
output=""
for item in indecies:
target=clone.fetch(item)
output+=str(target.leftnum)+"::"+str(target.rightnum)+"::"+str(target.vicnum)+"::"+str(target.status)+"::"+str(item)+"\n"
self.unmap()
## print(output)
return output.rstrip()
def recursiveUpdate(self):
if(type(self.victor)==str):
return 0
elif(self.victor.status==0):
return self.victor.recursiveUpdate()
elif(self.victor.status==-1):
return -1
else:
return 0
def ppr(self, key, points):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
for item in indecies:
target=self.fetch(item)
if(target.status==0):
target.status=target.recursiveUpdate()
pointsRemaining=0
for item in indecies:
target=self.fetch(item)
if(target.status==0):
pointsRemaining+=points[target.generation]
return pointsRemaining
def numCorrect(self, key):
indecies=list(self.mapTo(key))
for item in indecies:
target=self.fetch(item)
## target.status=1 if(target.findVictor()==key.fetch(item).findVictor()) else (0 if(key.fetch(item).findVictor()=="FUTURE") else -1)
target.status = 0 if(key.fetch(item).findVictor()=="FUTURE") else (1 if(target.findVictor()==key.fetch(item).findVictor()) else -1)
## for item in indecies:
## target=self.fetch(item)
## if(target.status==0):
## target.status=target.recursiveUpdate()
total=0
for item in indecies:
target=self.fetch(item)
if(target.status==1):
total+=1
return total
def score(self, key, points):
indecies=list(self.mapTo(key))
total=0
for item in indecies:
if(self.fetch(item).findVictor()==key.fetch(item).findVictor()):
total+=points[self.fetch(item).generation]
self.unmap()
return total
def fetch(self, target):
if(not self.mapped):
return None
if(self.num==target):
return self
if(type(self.left)==bracket):
resLeft=self.left.fetch(target)
else:
resLeft=None
if(type(self.right)==bracket):
resRight=self.right.fetch(target)
else:
resRight=None
if(resLeft!=None):
return resLeft
if(resRight!=None):
return resRight
return None
def __str__(self):
return self.makeString()
def findVictor(self):
if(type(self.victor)==bracket):
return self.victor.findVictor()
return self.victor
def makeString(self, tablevel=0):
output=""
tabs=""
for i in range(tablevel):
tabs+="\t"
if(type(self.right)==bracket):
output+=self.right.makeString(tablevel+1)
else:
output+=tabs+"\t"+self.right
output+="\n"+tabs+"("
output+=""+self.findVictor()+")"
output+="\n"
if(type(self.left)==bracket):
output+=self.left.makeString(tablevel+1)
else:
output+=tabs+"\t"+self.left
return output
def seekNext(self, indecies):
gens=[]
highest=0
for item in indecies:
gen=self.fetch(item).generation
if((gen>highest) and self.fetch(item).victor=="FUTURE"):
highest=gen
for item in indecies:
if(self.fetch(item).generation == highest):
gens.append(item)
return gens
def setNext(self, target):
indecies = list(self.mapTo(target))
nums = self.seekNext(indecies)
for num in nums:
if(target.fetch(num).victor == target.fetch(num).left):
self.fetch(num).victor = self.fetch(num).left
else:
self.fetch(num).victor = self.fetch(num).right
self.unmap()
def assignBracket(nodes, target):
item=nodes[target]
victor=0
if(item.victor.isnumeric()):
if(item.victor==item.left):
victor=-1
elif(item.victor==item.right):
victor=1
if(item.left.isnumeric()):
item.left=assignBracket(nodes, item.left)
if(item.right.isnumeric()):
item.right=assignBracket(nodes, item.right)
if(victor==-1):
item.victor=item.left
elif(victor==1):
item.victor=item.right
return item
def bracketBuilder(source):
proto=source.rstrip().rsplit("\n")
nodes={}
for line in proto:
## print(line)
if(line==""):
print("Skipping blank line")
continue
data=line.rsplit("::")
try:
nodes[data[3]]=bracket(data[0], data[1], data[2])
except IndexError as e:
print(data)
traceback.print_exc()
raise SyntaxError("STOP")
return assignBracket(nodes, "0")
| |
#!/usr/bin/env python
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
import optparse
from optparse import make_option
import traceback
# Attempt to import readline.
try:
import readline
except ImportError:
pass
import pyOCD
from pyOCD import __version__
from pyOCD.board import MbedBoard
from pyOCD.target.family import target_kinetis
from pyOCD.pyDAPAccess import DAPAccess
from pyOCD.core.target import Target
from pyOCD.utility import mask
# Make disasm optional.
try:
import capstone
isCapstoneAvailable = True
except ImportError:
isCapstoneAvailable = False
LEVELS = {
'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'critical':logging.CRITICAL
}
CORE_STATUS_DESC = {
Target.TARGET_HALTED : "Halted",
Target.TARGET_RUNNING : "Running",
Target.TARGET_RESET : "Reset",
Target.TARGET_SLEEPING : "Sleeping",
Target.TARGET_LOCKUP : "Lockup",
}
VC_NAMES_MAP = {
Target.CATCH_HARD_FAULT : "hard fault",
Target.CATCH_BUS_FAULT : "bus fault",
Target.CATCH_MEM_FAULT : "memory fault",
Target.CATCH_INTERRUPT_ERR : "interrupt error",
Target.CATCH_STATE_ERR : "state error",
Target.CATCH_CHECK_ERR : "check error",
Target.CATCH_COPROCESSOR_ERR : "coprocessor error",
Target.CATCH_CORE_RESET : "core reset",
}
DP_REGS_MAP = {
0x0 : DAPAccess.REG.DP_0x0,
0x4 : DAPAccess.REG.DP_0x4,
0x8 : DAPAccess.REG.DP_0x8,
0xc : DAPAccess.REG.DP_0xC
}
## Default SWD clock in kHz.
DEFAULT_CLOCK_FREQ_KHZ = 1000
## Command info and help.
COMMAND_INFO = {
'list' : {
'aliases' : [],
'args' : "",
'help' : "Show available targets"
},
'erase' : {
'aliases' : [],
'args' : "ADDR [COUNT]",
'help' : "Erase internal flash sectors"
},
'unlock' : {
'aliases' : [],
'args' : "",
'help' : "Unlock security on the target"
},
'status' : {
'aliases' : ['stat'],
'args' : "",
'help' : "Show the target's current state"
},
'reg' : {
'aliases' : [],
'args' : "[-f] [REG]",
'help' : "Print all or one register"
},
'wreg' : {
'aliases' : [],
'args' : "REG VALUE",
'help' : "Set the value of a register"
},
'reset' : {
'aliases' : [],
'args' : "[-h/--halt]",
'help' : "Reset the target"
},
'savemem' : {
'aliases' : [],
'args' : "ADDR LEN FILENAME",
"help" : "Save a range of memory to a binary file"
},
'loadmem' : {
'aliases' : [],
'args' : "ADDR FILENAME",
"help" : "Load a binary file to an address in memory"
},
'read8' : {
'aliases' : ['read', 'r', 'rb'],
'args' : "ADDR [LEN]",
'help' : "Read 8-bit bytes"
},
'read16' : {
'aliases' : ['r16', 'rh'],
'args' : "ADDR [LEN]",
'help' : "Read 16-bit halfwords"
},
'read32' : {
'aliases' : ['r32', 'rw'],
'args' : "ADDR [LEN]",
'help' : "Read 32-bit words"
},
'write8' : {
'aliases' : ['write', 'w', 'wb'],
'args' : "ADDR DATA...",
'help' : "Write 8-bit bytes"
},
'write16' : {
'aliases' : ['w16', 'wh'],
'args' : "ADDR DATA...",
'help' : "Write 16-bit halfwords"
},
'write32' : {
'aliases' : ['w32', 'ww'],
'args' : "ADDR DATA...",
'help' : "Write 32-bit words"
},
'go' : {
'aliases' : ['g'],
'args' : "",
'help' : "Resume execution of the target"
},
'step' : {
'aliases' : ['s'],
'args' : "",
'help' : "Step one instruction"
},
'halt' : {
'aliases' : ['h'],
'args' : "",
'help' : "Halt the target"
},
'break' : {
'aliases' : [],
'args' : "ADDR",
'help' : "Set a breakpoint address"
},
'rmbreak' : {
'aliases' : [],
'args' : "ADDR",
'help' : "Remove a breakpoint"
},
'lsbreak' : {
'aliases' : [],
'args' : "",
'help' : "List breakpoints"
},
'help' : {
'aliases' : ['?'],
'args' : "[CMD]",
'help' : "Show help for commands"
},
'disasm' : {
'aliases' : ['d'],
'args' : "[-c/--center] ADDR [LEN]",
'help' : "Disassemble instructions at an address",
'extra_help' : "Only available if the capstone library is installed."
},
'exit' : {
'aliases' : ['quit'],
'args' : "",
'help' : "Quit pyocd-tool"
},
'core' : {
'aliases' : [],
'args' : "[NUM]",
'help' : "Select CPU core by number or print selected core"
},
'readdp' : {
'aliases' : ['rdp'],
'args' : "ADDR",
'help' : "Read DP register"
},
'writedp' : {
'aliases' : ['wdp'],
'args' : "ADDR DATA",
'help' : "Read DP register"
},
'readap' : {
'aliases' : ['rap'],
'args' : "[APSEL] ADDR",
'help' : "Read AP register"
},
'writeap' : {
'aliases' : ['wap'],
'args' : "[APSEL] ADDR DATA",
'help' : "Read AP register"
},
'reinit' : {
'aliases' : [],
'args' : "",
'help' : "Reinitialize the target object"
},
'show' : {
'aliases' : [],
'args' : "INFO",
'help' : "Report info about the target",
},
'set' : {
'aliases' : [],
'args' : "NAME VALUE",
'help' : "Set an option value",
'extra_help' : "Available info names: vc, vectorcatch.",
},
}
INFO_HELP = {
'map' : {
'aliases' : [],
'help' : "Target memory map.",
},
'peripherals' : {
'aliases' : [],
'help' : "List of target peripheral instances.",
},
'uid' : {
'aliases' : [],
'help' : "Target's unique ID",
},
'cores' : {
'aliases' : [],
'help' : "Information about CPU cores in the target.",
},
'target' : {
'aliases' : [],
'help' : "General target information.",
},
}
OPTION_HELP = {
'vector-catch' : {
'aliases' : ['vc'],
'help' : "Control enabled vector catch sources. Value is a concatenation of one letter per enabled source in any order, or 'all' or 'none'. (h=hard fault, b=bus fault, m=mem fault, i=irq err, s=state err, c=check err, p=nocp, r=reset, a=all, n=none).",
},
'step-into-interrupt' : {
'aliases' : ['si'],
'help' : "Set whether to enable or disable interrupts when single stepping. Set to 1 to enable."
},
'nreset' : {
'aliases' : [],
'help' : "Set nRESET signal state. Accepts a value of 0 or 1."
},
'log' : {
'aliases' : [],
'help' : "Set log level to one of debug, info, warning, error, critical"
},
'clock' : {
'aliases' : [],
'help' : "Set SWD or JTAG clock frequency"
},
}
def hex_width(value, width):
if width == 8:
return "%02x" % value
elif width == 16:
return "%04x" % value
elif width == 32:
return "%08x" % value
else:
raise ToolError("unrecognized register width (%d)" % reg.size)
def dumpHexData(data, startAddress=0, width=8):
i = 0
while i < len(data):
print "%08x: " % (startAddress + i),
while i < len(data):
d = data[i]
i += 1
if width == 8:
print "%02x" % d,
if i % 4 == 0:
print "",
if i % 16 == 0:
break
elif width == 16:
print "%04x" % d,
if i % 8 == 0:
break
elif width == 32:
print "%08x" % d,
if i % 4 == 0:
break
print
class ToolError(Exception):
pass
class ToolExitException(Exception):
pass
def cmdoptions(opts):
def process_opts(fn):
parser = optparse.OptionParser(add_help_option=False)
for opt in opts:
parser.add_option(opt)
def foo(inst, args):
namespace, other_args = parser.parse_args(args)
return fn(inst, namespace, other_args)
return foo
return process_opts
class PyOCDConsole(object):
PROMPT = '>>> '
def __init__(self, tool):
self.tool = tool
self.last_command = ''
def run(self):
try:
while True:
try:
line = raw_input(self.PROMPT)
line = line.strip()
if line:
self.process_command_line(line)
self.last_command = line
elif self.last_command:
self.process_command(self.last_command)
except KeyboardInterrupt:
print
except EOFError:
# Print a newline when we get a Ctrl-D on a Posix system.
# Windows exits with a Ctrl-Z+Return, so there is no need for this.
if os.name != "nt":
print
def process_command_line(self, line):
for cmd in line.split(';'):
self.process_command(cmd)
def process_command(self, cmd):
try:
firstChar = (cmd.strip())[0]
if firstChar in '$!':
cmd = cmd[1:].strip()
if firstChar == '$':
self.tool.handle_python(cmd)
elif firstChar == '!':
os.system(cmd)
return
args = pyOCD.utility.cmdline.split_command_line(cmd)
cmd = args[0].lower()
args = args[1:]
# Handle register name as command.
if cmd in pyOCD.coresight.cortex_target.CORE_REGISTER:
self.tool.handle_reg([cmd])
return
# Check for valid command.
if cmd not in self.tool.command_list:
print "Error: unrecognized command '%s'" % cmd
return
# Run command.
handler = self.tool.command_list[cmd]
handler(args)
except ValueError:
print "Error: invalid argument"
traceback.print_exc()
except DAPAccess.TransferError as e:
print "Error:", e
traceback.print_exc()
except ToolError as e:
print "Error:", e
except ToolExitException:
raise
except Exception as e:
print "Unexpected exception:", e
traceback.print_exc()
class PyOCDTool(object):
def __init__(self):
self.board = None
self.exitCode = 0
self.step_into_interrupt = False
self.command_list = {
'list' : self.handle_list,
'erase' : self.handle_erase,
'unlock' : self.handle_unlock,
'status' : self.handle_status,
'stat' : self.handle_status,
'reg' : self.handle_reg,
'wreg' : self.handle_write_reg,
'reset' : self.handle_reset,
'savemem' : self.handle_savemem,
'loadmem' : self.handle_loadmem,
'read' : self.handle_read8,
'read8' : self.handle_read8,
'read16' : self.handle_read16,
'read32' : self.handle_read32,
'r' : self.handle_read8,
'rb' : self.handle_read8,
'r16' : self.handle_read16,
'rh' : self.handle_read16,
'r32' : self.handle_read32,
'rw' : self.handle_read32,
'write' : self.handle_write8,
'write8' : self.handle_write8,
'write16' : self.handle_write16,
'write32' : self.handle_write32,
'w' : self.handle_write8,
'wb' : self.handle_write8,
'w16' : self.handle_write16,
'wh' : self.handle_write16,
'w32' : self.handle_write32,
'ww' : self.handle_write32,
'go' : self.handle_go,
'g' : self.handle_go,
'step' : self.handle_step,
's' : self.handle_step,
'halt' : self.handle_halt,
'h' : self.handle_halt,
'break' : self.handle_breakpoint,
'rmbreak' : self.handle_remove_breakpoint,
'lsbreak' : self.handle_list_breakpoints,
'disasm' : self.handle_disasm,
'd' : self.handle_disasm,
'exit' : self.handle_exit,
'quit' : self.handle_exit,
'core' : self.handle_core,
'readdp' : self.handle_readdp,
'writedp' : self.handle_writedp,
'readap' : self.handle_readap,
'writeap' : self.handle_writeap,
'rdp' : self.handle_readdp,
'wdp' : self.handle_writedp,
'rap' : self.handle_readap,
'wap' : self.handle_writeap,
'reinit' : self.handle_reinit,
'show' : self.handle_show,
'set' : self.handle_set,
'help' : self.handle_help,
'?' : self.handle_help,
}
self.info_list = {
'map' : self.handle_show_map,
'peripherals' : self.handle_show_peripherals,
'uid' : self.handle_show_unique_id,
'cores' : self.handle_show_cores,
'target' : self.handle_show_target,
}
self.option_list = {
'vector-catch' : self.handle_set_vectorcatch,
'vc' : self.handle_set_vectorcatch,
'step-into-interrupt' : self.handle_set_step_interrupts,
'si' : self.handle_set_step_interrupts,
'nreset' : self.handle_set_nreset,
'log' : self.handle_set_log,
'clock' : self.handle_set_clock,
}
def get_args(self):
debug_levels = LEVELS.keys()
epi = "Available commands:\n" + ', '.join(sorted(self.command_list.keys()))
parser = argparse.ArgumentParser(description='Target inspection utility', epilog=epi)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument("-H", "--halt", action="store_true", help="Halt core upon connect.")
parser.add_argument("-N", "--no-init", action="store_true", help="Do not init debug system.")
parser.add_argument('-k', "--clock", metavar='KHZ', default=DEFAULT_CLOCK_FREQ_KHZ, type=int, help="Set SWD speed in kHz. (Default 1 MHz.)")
parser.add_argument('-b', "--board", action='store', metavar='ID', help="Use the specified board. ")
parser.add_argument('-t', "--target", action='store', metavar='TARGET', help="Override target.")
parser.add_argument("-d", "--debug", dest="debug_level", choices=debug_levels, default='warning', help="Set the level of system logging output. Supported choices are: " + ", ".join(debug_levels), metavar="LEVEL")
parser.add_argument("cmd", nargs='?', default=None, help="Command")
parser.add_argument("args", nargs='*', help="Arguments for the command.")
parser.add_argument("-da", "--daparg", dest="daparg", nargs='+', help="Send setting to DAPAccess layer.")
return parser.parse_args()
def configure_logging(self):
level = LEVELS.get(self.args.debug_level, logging.WARNING)
logging.basicConfig(level=level)
def run(self):
try:
# Read command-line arguments.
self.args = self.get_args()
self.cmd = self.args.cmd
if self.cmd:
self.cmd = self.cmd.lower()
# Set logging level
self.configure_logging()
DAPAccess.set_args(self.args.daparg)
# Check for a valid command.
if self.cmd and self.cmd not in self.command_list:
print "Error: unrecognized command '%s'" % self.cmd
return 1
# Handle certain commands without connecting.
if self.cmd == 'list':
self.handle_list([])
return 0
elif self.cmd == 'help':
self.handle_help(self.args.args)
return 0
if self.args.clock != DEFAULT_CLOCK_FREQ_KHZ:
print "Setting SWD clock to %d kHz" % self.args.clock
# Connect to board.
self.board = MbedBoard.chooseBoard(board_id=self.args.board, target_override=self.args.target, init_board=False, frequency=(self.args.clock * 1000))
self.board.target.setAutoUnlock(False)
self.board.target.setHaltOnConnect(False)
try:
if not self.args.no_init:
self.board.init()
except DAPAccess.TransferFaultError as e:
if not self.board.target.isLocked():
print "Transfer fault while initing board: %s" % e
traceback.print_exc()
self.exitCode = 1
return self.exitCode
except Exception as e:
print "Exception while initing board: %s" % e
traceback.print_exc()
self.exitCode = 1
return self.exitCode
self.target = self.board.target
self.link = self.board.link
self.flash = self.board.flash
self.svd_device = self.target.svd_device
self.peripherals = {}
if self.svd_device:
for p in self.svd_device.peripherals:
self.peripherals[p.name.lower()] = p
# Halt if requested.
if self.args.halt:
self.handle_halt([])
# Handle a device with flash security enabled.
self.didErase = False
if not self.args.no_init and self.target.isLocked() and self.cmd != 'unlock':
print "Warning: Target is locked, limited operations available. Use unlock command to mass erase and unlock."
# If no command, enter interactive mode.
if not self.cmd:
if not self.args.no_init:
try:
# Say what we're connected to.
print "Connected to %s [%s]: %s" % (self.target.part_number,
CORE_STATUS_DESC[self.target.getState()], self.board.getUniqueID())
except DAPAccess.TransferFaultError:
pass
# Run the command line.
console = PyOCDConsole(self)
console.run()
else:
# Invoke action handler.
result = self.command_list[self.cmd](self.args.args)
if result is not None:
self.exitCode = result
except ToolExitException:
self.exitCode = 0
except ValueError:
print "Error: invalid argument"
except DAPAccess.TransferError:
print "Error: transfer failed"
traceback.print_exc()
self.exitCode = 2
except ToolError as e:
print "Error:", e
self.exitCode = 1
finally:
if self.board != None:
# Pass false to prevent target resume.
self.board.uninit(False)
return self.exitCode
def handle_list(self, args):
MbedBoard.listConnectedBoards()
def handle_status(self, args):
if self.target.isLocked():
print "Security: Locked"
else:
print "Security: Unlocked"
if isinstance(self.target, target_kinetis.Kinetis):
print "MDM-AP Status: 0x%08x" % self.target.mdm_ap.read_reg(target_kinetis.MDM_STATUS)
if not self.target.isLocked():
for i, c in enumerate(self.target.cores):
core = self.target.cores[c]
print "Core %d status: %s" % (i, CORE_STATUS_DESC[core.getState()])
def handle_reg(self, args):
# If there are no args, print all register values.
if len(args) < 1:
self.dump_registers()
return
if len(args) == 2 and args[0].lower() == '-f':
del args[0]
show_fields = True
else:
show_fields = False
reg = args[0].lower()
if reg in pyOCD.coresight.cortex_target.CORE_REGISTER:
value = self.target.readCoreRegister(reg)
if type(value) is int or type(value) is long:
print "%s = 0x%08x (%d)" % (reg, value, value)
elif type(value) is float:
print "%s = %g" % (reg, value)
else:
raise ToolError("Unknown register value type")
else:
subargs = reg.split('.')
if self.peripherals.has_key(subargs[0]):
p = self.peripherals[subargs[0]]
if len(subargs) > 1:
r = [x for x in p.registers if x.name.lower() == subargs[1]]
if len(r):
self._dump_peripheral_register(p, r[0], True)
else:
raise ToolError("invalid register '%s' for %s" % (subargs[1], p.name))
else:
for r in p.registers:
self._dump_peripheral_register(p, r, show_fields)
else:
raise ToolError("invalid peripheral '%s'" % (subargs[0]))
def handle_write_reg(self, args):
if len(args) < 1:
raise ToolError("No register specified")
if len(args) < 2:
raise ToolError("No value specified")
reg = args[0].lower()
if reg in pyOCD.coresight.cortex_target.CORE_REGISTER:
if reg.startswith('s') and reg != 'sp':
value = float(args[1])
else:
value = self.convert_value(args[1])
self.target.writeCoreRegister(reg, value)
else:
value = self.convert_value(args[1])
subargs = reg.split('.')
if len(subargs) < 2:
raise ToolError("no register specified")
if self.peripherals.has_key(subargs[0]):
p = self.peripherals[subargs[0]]
r = [x for x in p.registers if x.name.lower() == subargs[1]]
if len(r):
r = r[0]
addr = p.base_address + r.address_offset
if len(subargs) == 2:
print "writing 0x%x to 0x%x:%d (%s)" % (value, addr, r.size, r.name)
self.target.writeMemory(addr, value, r.size)
elif len(subargs) == 3:
f = [x for x in r.fields if x.name.lower() == subargs[2]]
if len(f):
f = f[0]
msb = f.bit_offset + f.bit_width - 1
lsb = f.bit_offset
originalValue = self.target.readMemory(addr, r.size)
value = mask.bfi(originalValue, msb, lsb, value)
print "writing 0x%x to 0x%x[%d:%d]:%d (%s.%s)" % (value, addr, msb, lsb, r.size, r.name, f.name)
self.target.writeMemory(addr, value, r.size)
else:
raise ToolError("too many dots")
self._dump_peripheral_register(p, r, True)
else:
raise ToolError("invalid register '%s' for %s" % (subargs[1], p.name))
else:
raise ToolError("invalid peripheral '%s'" % (subargs[0]))
@cmdoptions([make_option('-h', "--halt", action="store_true")])
def handle_reset(self, args, other):
print "Resetting target"
if args.halt:
self.target.resetStopOnReset()
status = self.target.getState()
if status != Target.TARGET_HALTED:
print "Failed to halt device on reset"
else:
print "Successfully halted device on reset"
else:
self.target.reset()
def handle_set_nreset(self, args):
if len(args) != 1:
print "Missing reset state"
return
state = int(args[0], base=0)
print "nRESET = %d" % (state)
self.target.dp.assert_reset((state == 0))
@cmdoptions([make_option('-c', "--center", action="store_true")])
def handle_disasm(self, args, other):
if len(other) == 0:
print "Error: no address specified"
return 1
addr = self.convert_value(other[0])
if len(other) < 2:
count = 6
else:
count = self.convert_value(other[1])
if args.center:
addr -= count // 2
# Since we're disassembling, make sure the Thumb bit is cleared.
addr &= ~1
# Print disasm of data.
data = self.target.readBlockMemoryUnaligned8(addr, count)
self.print_disasm(str(bytearray(data)), addr)
def handle_read8(self, args):
return self.do_read(args, 8)
def handle_read16(self, args):
return self.do_read(args, 16)
def handle_read32(self, args):
return self.do_read(args, 32)
def handle_write8(self, args):
return self.do_write(args, 8)
def handle_write16(self, args):
return self.do_write(args, 16)
def handle_write32(self, args):
return self.do_write(args, 32)
def handle_savemem(self, args):
if len(args) < 3:
print "Error: missing argument"
return 1
addr = self.convert_value(args[0])
count = self.convert_value(args[1])
filename = args[2]
data = bytearray(self.target.readBlockMemoryUnaligned8(addr, count))
with open(filename, 'wb') as f:
f.write(data)
print "Saved %d bytes to %s" % (count, filename)
def handle_loadmem(self, args):
if len(args) < 2:
print "Error: missing argument"
return 1
addr = self.convert_value(args[0])
filename = args[1]
with open(filename, 'rb') as f:
data = bytearray(f.read())
self.target.writeBlockMemoryUnaligned8(addr, data)
print "Loaded %d bytes to 0x%08x" % (len(data), addr)
def do_read(self, args, width):
if len(args) == 0:
print "Error: no address specified"
return 1
addr = self.convert_value(args[0])
if len(args) < 2:
count = width // 8
else:
count = self.convert_value(args[1])
if width == 8:
data = self.target.readBlockMemoryUnaligned8(addr, count)
byteData = data
elif width == 16:
byteData = self.target.readBlockMemoryUnaligned8(addr, count)
data = pyOCD.utility.conversion.byteListToU16leList(byteData)
elif width == 32:
byteData = self.target.readBlockMemoryUnaligned8(addr, count)
data = pyOCD.utility.conversion.byteListToU32leList(byteData)
# Print hex dump of output.
dumpHexData(data, addr, width=width)
def do_write(self, args, width):
if len(args) == 0:
print "Error: no address specified"
return 1
addr = self.convert_value(args[0])
if len(args) <= 1:
print "Error: no data for write"
return 1
else:
data = [self.convert_value(d) for d in args[1:]]
if width == 8:
pass
elif width == 16:
data = pyOCD.utility.conversion.u16leListToByteList(data)
elif width == 32:
data = pyOCD.utility.conversion.u32leListToByteList(data)
if self.isFlashWrite(addr, width, data):
self.target.flash.init()
self.target.flash.programPhrase(addr, data)
else:
self.target.writeBlockMemoryUnaligned8(addr, data)
self.target.flush()
def handle_erase(self, args):
if len(args) < 1:
raise ToolError("invalid arguments")
addr = self.convert_value(args[0])
if len(args) < 2:
count = 1
else:
count = self.convert_value(args[1])
self.flash.init()
while count:
info = self.flash.getPageInfo(addr)
self.flash.erasePage(info.base_addr)
print "Erased page 0x%08x" % info.base_addr
count -= 1
addr += info.size
def handle_unlock(self, args):
# Currently the same as erase.
if not self.didErase:
self.target.massErase()
def handle_go(self, args):
self.target.resume()
status = self.target.getState()
if status == Target.TARGET_RUNNING:
print "Successfully resumed device"
else:
print "Failed to resume device"
def handle_step(self, args):
self.target.step(disable_interrupts=not self.step_into_interrupt)
addr = self.target.readCoreRegister('pc')
if isCapstoneAvailable:
addr &= ~1
data = self.target.readBlockMemoryUnaligned8(addr, 4)
self.print_disasm(str(bytearray(data)), addr, maxInstructions=1)
else:
print "PC = 0x%08x" % (addr)
def handle_halt(self, args):
self.target.halt()
status = self.target.getState()
if status != Target.TARGET_HALTED:
print "Failed to halt device"
return 1
else:
print "Successfully halted device"
def handle_breakpoint(self, args):
if len(args) < 1:
raise ToolError("no breakpoint address provided")
addr = self.convert_value(args[0])
if self.target.setBreakpoint(addr):
self.target.selected_core.bp_manager.flush()
print "Set breakpoint at 0x%08x" % addr
else:
print "Failed to set breakpoint at 0x%08x" % addr
def handle_remove_breakpoint(self, args):
if len(args) < 1:
raise ToolError("no breakpoint address provided")
addr = self.convert_value(args[0])
try:
type = self.target.getBreakpointType(addr)
self.target.removeBreakpoint(addr)
self.target.selected_core.bp_manager.flush()
print "Removed breakpoint at 0x%08x" % addr
except:
print "Failed to remove breakpoint at 0x%08x" % addr
def handle_list_breakpoints(self, args):
availableBpCount = self.target.selected_core.availableBreakpoint()
print "%d hardware breakpoints available" % availableBpCount
bps = self.target.selected_core.bp_manager.get_breakpoints()
if not len(bps):
print "No breakpoints installed"
else:
for i, addr in enumerate(bps):
print "%d: 0x%08x" % (i, addr)
def handle_set_log(self, args):
if len(args) < 1:
print "Error: no log level provided"
return 1
if args[0].lower() not in LEVELS:
print "Error: log level must be one of {%s}" % ','.join(LEVELS.keys())
return 1
logging.getLogger().setLevel(LEVELS[args[0].lower()])
def handle_set_clock(self, args):
if len(args) < 1:
print "Error: no clock frequency provided"
return 1
try:
freq_Hz = self.convert_value(args[0]) * 1000
except:
print "Error: invalid frequency"
return 1
self.link.set_clock(freq_Hz)
if self.link.get_swj_mode() == DAPAccess.PORT.SWD:
swd_jtag = 'SWD'
else:
swd_jtag = 'JTAG'
if freq_Hz >= 1000000:
nice_freq = "%.2f MHz" % (freq_Hz / 1000000)
elif freq_Hz > 1000:
nice_freq = "%.2f kHz" % (freq_Hz / 1000)
else:
nice_freq = "%d Hz" % freq_Hz
print "Changed %s frequency to %s" % (swd_jtag, nice_freq)
def handle_exit(self, args):
raise ToolExitException()
def handle_python(self, args):
try:
env = {
'board' : self.board,
'target' : self.target,
'link' : self.link,
'flash' : self.flash,
}
result = eval(args, globals(), env)
if result is not None:
if type(result) is int:
print "0x%08x (%d)" % (result, result)
else:
print result
except Exception as e:
print "Exception while executing expression:", e
traceback.print_exc()
def handle_core(self, args):
if len(args) < 1:
print "Core %d is selected" % self.target.selected_core.core_number
return
core = int(args[0], base=0)
self.target.select_core(core)
print "Selected core %d" % core
def handle_readdp(self, args):
if len(args) < 1:
print "Missing DP address"
return
addr_int = self.convert_value(args[0])
addr = DP_REGS_MAP[addr_int]
result = self.target.dp.read_reg(addr)
print "DP register 0x%x = 0x%08x" % (addr_int, result)
def handle_writedp(self, args):
if len(args) < 1:
print "Missing DP address"
return
if len(args) < 2:
print "Missing value"
return
addr_int = self.convert_value(args[0])
addr = DP_REGS_MAP[addr_int]
data = self.convert_value(args[1])
self.target.dp.write_reg(addr, data)
def handle_readap(self, args):
if len(args) < 1:
print "Missing AP address"
return
if len(args) == 1:
addr = self.convert_value(args[0])
elif len(args) == 2:
addr = (self.convert_value(args[0]) << 24) | self.convert_value(args[1])
result = self.target.dp.readAP(addr)
print "AP register 0x%x = 0x%08x" % (addr, result)
def handle_writeap(self, args):
if len(args) < 1:
print "Missing AP address"
return
if len(args) < 2:
print "Missing value"
return
if len(args) == 2:
addr = self.convert_value(args[0])
data_arg = 1
elif len(args) == 3:
addr = (self.convert_value(args[0]) << 24) | self.convert_value(args[1])
data_arg = 2
data = self.convert_value(args[data_arg])
self.target.dp.writeAP(addr, data)
def handle_reinit(self, args):
self.target.init()
def handle_show(self, args):
if len(args) < 1:
raise ToolError("missing info name argument")
infoName = args[0]
try:
self.info_list[infoName](args[1:])
except KeyError:
raise ToolError("unkown info name '%s'" % infoName)
def handle_show_unique_id(self, args):
print "Unique ID: %s" % self.board.getUniqueID()
def handle_show_target(self, args):
print "Target: %s" % self.target.part_number
print "DAP IDCODE: 0x%08x" % self.target.readIDCode()
def handle_show_cores(self, args):
if self.target.isLocked():
print "Target is locked"
else:
print "Cores: %d" % len(self.target.cores)
for i, c in enumerate(self.target.cores):
core = self.target.cores[c]
print "Core %d type: %s" % (i, pyOCD.coresight.cortex_target.CORE_TYPE_NAME[core.core_type])
def handle_show_map(self, args):
print "Region Start End Size Blocksize"
for region in self.target.getMemoryMap():
print "{:<15} {:#010x} {:#010x} {:#10x} {}".format(region.name, region.start, region.end, region.length, region.blocksize if region.isFlash else '-')
def handle_show_peripherals(self, args):
for periph in sorted(self.peripherals.values(), key=lambda x:x.base_address):
print "0x%08x: %s" % (periph.base_address, periph.name)
def handle_set(self, args):
if len(args) < 1:
raise ToolError("missing option name argument")
name = args[0]
try:
self.option_list[name](args[1:])
except KeyError:
raise ToolError("unkown option name '%s'" % name)
def handle_set_vectorcatch(self, args):
if len(args) == 0:
catch = self.target.getVectorCatch()
print "Vector catch:"
for mask in sorted(VC_NAMES_MAP.iterkeys()):
name = VC_NAMES_MAP[mask]
s = "ON" if (catch & mask) else "OFF"
print " {:3} {}".format(s, name)
else:
try:
self.target.setVectorCatch(pyOCD.utility.cmdline.convert_vector_catch(args[0]))
except ValueError as e:
print e
def handle_set_step_interrupts(self, args):
if len(args) == 0:
print "Interrupts while stepping:", ("enabled" if self.step_into_interrupt else "disabled")
else:
self.step_into_interrupt = (args[0] in ('1', 'true', 'yes', 'on'))
def handle_help(self, args):
if not args:
self.list_commands()
else:
cmd = args[0]
for name, info in COMMAND_INFO.iteritems():
if cmd == name or cmd in info['aliases']:
print "Usage: {cmd} {args}".format(cmd=cmd, args=info['args'])
if len(info['aliases']):
print "Aliases:", ", ".join(info['aliases'])
print info['help']
if info.has_key('extra_help'):
print info['extra_help']
def list_commands(self):
cmds = sorted(COMMAND_INFO.keys())
print "Commands:\n---------"
for cmd in cmds:
info = COMMAND_INFO[cmd]
print "{cmd:<25} {args:<20} {help}".format(
cmd=', '.join(sorted([cmd] + info['aliases'])),
**info)
print "\nInfo:\n---------"
for name in sorted(INFO_HELP.keys()):
info = INFO_HELP[name]
print "{name:<25} {help}".format(
name=', '.join(sorted([name] + info['aliases'])),
help=info['help'])
print "\nOptions:\n---------"
for name in sorted(OPTION_HELP.keys()):
info = OPTION_HELP[name]
print "{name:<25} {help}".format(
name=', '.join(sorted([name] + info['aliases'])),
help=info['help'])
print """
All register names are also available as commands that print the register's value.
Any ADDR or LEN argument will accept a register name.
Prefix line with $ to execute a Python expression.
Prefix line with ! to execute a shell command."""
def isFlashWrite(self, addr, width, data):
mem_map = self.board.target.getMemoryMap()
region = mem_map.getRegionForAddress(addr)
if (region is None) or (not region.isFlash):
return False
if width == 8:
l = len(data)
elif width == 16:
l = len(data) * 2
elif width == 32:
l = len(data) * 4
return region.containsRange(addr, length=l)
## @brief Convert an argument to a 32-bit integer.
#
# Handles the usual decimal, binary, and hex numbers with the appropriate prefix.
# Also recognizes register names and address dereferencing. Dereferencing using the
# ARM assembler syntax. To dereference, put the value in brackets, i.e. '[r0]' or
# '[0x1040]'. You can also use put an offset in the brackets after a comma, such as
# '[r3,8]'. The offset can be positive or negative, and any supported base.
def convert_value(self, arg):
arg = arg.lower().replace('_', '')
deref = (arg[0] == '[')
if deref:
arg = arg[1:-1]
offset = 0
if ',' in arg:
arg, offset = arg.split(',')
arg = arg.strip()
offset = int(offset.strip(), base=0)
if arg in pyOCD.coresight.cortex_target.CORE_REGISTER:
value = self.target.readCoreRegister(arg)
print "%s = 0x%08x" % (arg, value)
else:
subargs = arg.split('.')
if self.peripherals.has_key(subargs[0]) and len(subargs) > 1:
p = self.peripherals[subargs[0]]
r = [x for x in p.registers if x.name.lower() == subargs[1]]
if len(r):
value = p.base_address + r[0].address_offset
else:
raise ToolError("invalid register '%s' for %s" % (subargs[1], p.name))
else:
value = int(arg, base=0)
if deref:
value = pyOCD.utility.conversion.byteListToU32leList(self.target.readBlockMemoryUnaligned8(value + offset, 4))[0]
print "[%s,%d] = 0x%08x" % (arg, offset, value)
return value
def dump_registers(self):
# Registers organized into columns for display.
regs = ['r0', 'r6', 'r12',
'r1', 'r7', 'sp',
'r2', 'r8', 'lr',
'r3', 'r9', 'pc',
'r4', 'r10', 'xpsr',
'r5', 'r11', 'primask']
for i, reg in enumerate(regs):
regValue = self.target.readCoreRegister(reg)
print "{:>8} {:#010x} ".format(reg + ':', regValue),
if i % 3 == 2:
print
def _dump_peripheral_register(self, periph, reg, show_fields):
addr = periph.base_address + reg.address_offset
value = self.target.readMemory(addr, reg.size)
value_str = hex_width(value, reg.size)
print "%s.%s @ %08x = %s" % (periph.name, reg.name, addr, value_str)
if show_fields:
for f in reg.fields:
if f.is_reserved:
continue
msb = f.bit_offset + f.bit_width - 1
lsb = f.bit_offset
f_value = mask.bfx(value, msb, lsb)
v_enum = None
if f.enumerated_values:
for v in f.enumerated_values:
if v.value == f_value:
v_enum = v
break
if f.bit_width == 1:
bits_str = "%d" % lsb
else:
bits_str = "%d:%d" % (msb, lsb)
f_value_str = "%x" % f_value
digits = (f.bit_width + 3) / 4
f_value_str = "0" * (digits - len(f_value_str)) + f_value_str
f_value_bin_str = bin(f_value)[2:]
f_value_bin_str = "0" * (f.bit_width - len(f_value_bin_str)) + f_value_bin_str
if v_enum:
f_value_enum_str = " %s: %s" % (v.name, v_enum.description)
else:
f_value_enum_str = ""
print " %s[%s] = %s (%s)%s" % (f.name, bits_str, f_value_str, f_value_bin_str, f_value_enum_str)
def print_disasm(self, code, startAddr, maxInstructions=None):
if not isCapstoneAvailable:
print "Warning: Disassembly is not available because the Capstone library is not installed"
return
pc = self.target.readCoreRegister('pc') & ~1
md = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB)
addrLine = 0
text = ''
n = 0
for i in md.disasm(code, startAddr):
hexBytes = ''
for b in i.bytes:
hexBytes += '%02x' % b
pc_marker = '*' if (pc == i.address) else ' '
text += "{addr:#010x}:{pc_marker} {bytes:<10}{mnemonic:<8}{args}\n".format(addr=i.address, pc_marker=pc_marker, bytes=hexBytes, mnemonic=i.mnemonic, args=i.op_str)
n += 1
if (maxInstructions is not None) and (n >= maxInstructions):
break
print text
def main():
sys.exit(PyOCDTool().run())
if __name__ == '__main__':
main()
| |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| |
import functools
import hashlib
from django.core.cache import cache
from django.db.models.signals import pre_delete, post_save
from django.dispatch import Signal
try:
from inspect import getcallargs
except ImportError:
import sys
from inspect import getargspec, ismethod
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, str):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
WEEK = 7 * 24 * 60 * 60
cache_invalidated = Signal(providing_args=['keys'])
class CacheFunction(object):
CACHE_MISS = object()
def __init__(self, prefix='', timeout=WEEK, fhash=None, fkey=None):
self.prefix = prefix
self.timeout = timeout
if fhash is None:
fhash = self.hash_key
self.fhash = fhash
if fkey is None:
fkey = self.generate_key
self.fkey = fkey
def __call__(self, *args, **kwargs):
if args:
if kwargs or not callable(args[0]):
raise TypeError("invalid usage")
return self._decorator(args[0])
else:
return functools.partial(self._decorator, **kwargs)
def _decorator(self, func, invalidate=None, key=None, signals=(), models=(), timeout=None):
if key is None:
key = func.__name__
if invalidate is None:
invalidate = (func.__name__,)
if timeout is None:
timeout = self.timeout
@functools.wraps(func)
def wrapper(*args, **kwargs):
k = self.fhash(self.fkey(key, func, args, kwargs))
data = cache.get(k, self.CACHE_MISS)
if data is self.CACHE_MISS:
data = func(*args, **kwargs)
cache.set(k, data, timeout)
return data
if invalidate:
def iwrapper(sender, **kwargs):
try:
keys = kwargs['cache_keys']
except KeyError:
if callable(invalidate):
keys = invalidate(sender, **kwargs)
else:
keys = invalidate
if keys:
if isinstance(keys, str):
keys = (keys,)
prefixed = [ self.prefix + k for k in keys ]
cache.delete_many(list(map(self.fhash, prefixed)))
wrapper.invalidated.send(wrapper, cache_keys=keys)
for s in signals:
s.connect(iwrapper, weak=False)
for m in models:
post_save.connect(iwrapper, sender=m, weak=False)
pre_delete.connect(iwrapper, sender=m, weak=False)
def get_from_cache(fargs):
cache_keys = {}
for ix, farg in enumerate(fargs):
if isinstance(farg, (list, tuple))\
and len(farg) == 2\
and isinstance(farg[0], (list, tuple))\
and isinstance(farg[1], dict):
args, kwargs = farg
elif isinstance(farg, dict):
args = ()
kwargs = farg
else:
args = farg
kwargs = {}
k = self.fhash(self.fkey(key, func, args, kwargs))
cache_keys[k] = (ix, farg)
results = cache.get_many(list(cache_keys.keys()))
output = [ self.CACHE_MISS ] * len(fargs)
for k, v in cache_keys.items():
ix = v[0]
try:
output[ix] = results[k]
except KeyError:
pass
return output
wrapper.get_from_cache = get_from_cache
wrapper.invalidated = Signal(providing_args=['cache_keys'])
return wrapper
def hash_key(self, key):
if not isinstance(key, bytes):
key = key.encode('utf-8')
return hashlib.md5(key).hexdigest()
def generate_key(self, key, func, args, kwargs):
if callable(key):
return key(func, *args, **kwargs)
cargs = getcallargs(func, *args, **kwargs)
try:
k = key % args
except TypeError:
k = key % cargs
return self.prefix + k
| |
from Tkinter import *
import tkFileDialog
import tkMessageBox
import os
import string
import time
import datetime
import csv
import numpy
import calendar
from scipy import stats
from scipy import optimize
from scipy import linspace
import math
import pylab as P
from matplotlib.backends.backend_pdf import PdfPages
import fnmatch
#confidence interval for linear regression analysis
confidence_interval=90.0
resultsfoldername = 'C:\DATATOOLS_UOFS'
def MergeResults():
global sampletimefilename
global mergedresultsfilename
global tempfilename
inputfoldername = 'C:\UserData'
opensampletimefile=open(sampletimefilename, 'rb')
sampletimes = numpy.genfromtxt(opensampletimefile, delimiter=',', dtype=None, names=True)
sampleepochtimes=[];
c1=0
# run through sampletimefile and find first and last date
for row in sampletimes['SampleName']:
samplestartstr=str(sampletimes['Year'][c1])+" "+str(sampletimes['Month'][c1])+" "+str(sampletimes['Day'][c1])+" "+str(sampletimes['Hour'][c1])+" "+str(sampletimes['Minute'][c1]) +" "+str(sampletimes['Second'][c1])
samplestructtime=time.strptime(samplestartstr, "%Y %m %d %H %M %S")
sampleepochtime=calendar.timegm(samplestructtime)
sampleepochtimes.append(sampleepochtime)
c1=c1+1
sampleepochtimes=sorted(sampleepochtimes)
firstsampletime=min(sampleepochtimes)
lastsampletime=max(sampleepochtimes)
print sampleepochtimes
os.chdir(resultsfoldername)
mergedresultsfilename=datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '_merged_results.txt'
openresultsfile=open(mergedresultsfilename, 'wb')
resultswriter = csv.writer(openresultsfile, delimiter='\t')
resultswriter.writerow(['Date','Local_Time','Epoch_Time','N2O_ppmv','d15N', 'd15Nalpha', 'd15Nbeta','alpha_ppmv','beta_ppmv', 'CO2_ppmv', 'H2O_volperc'])
os.chdir(inputfoldername)
for dirpath, dirs, files in os.walk(inputfoldername):
for filename in fnmatch.filter(files, '*.dat'):
openinputfile=open(os.path.join(dirpath, filename),'rb')
#print filename
junk,datestr,junk2=filename.split("-",2)
#print datestr
YMD=int(datestr)
yearoffile=YMD/10000
monthoffile=(YMD-yearoffile*10000)/100
dayoffile=YMD-yearoffile*10000-monthoffile*100
filetimestr=str(yearoffile)+" "+str(monthoffile)+" "+str(dayoffile)+" 00 00 00"
filestructtime=time.strptime(filetimestr, "%Y %m %d %H %M %S")
fileepochtime=calendar.timegm(filestructtime)
if fileepochtime > firstsampletime-86400 and fileepochtime < lastsampletime+86400:
fd = numpy.genfromtxt(openinputfile, dtype=None, names=True)
c1=0
for row in fd['TIME']:
temptime,junk=row.split(".",1)
datetimestr=str(fd['DATE'][c1])+" "+str(temptime)
tempstructtime=time.strptime(datetimestr, "%Y-%m-%d %H:%M:%S")
tempepochtime=time.mktime(tempstructtime)
tempepochtime=tempepochtime
tempstructtime=time.localtime(tempepochtime)
gooddate=time.strftime("%Y-%m-%d", tempstructtime)
goodtime=(time.strftime("%H:%M:%S", tempstructtime))
resultswriter.writerow([gooddate,goodtime,fd['EPOCH_TIME'][c1],fd['N2O'][c1],fd['d15N'][c1],fd['d15Nalpha'][c1],fd['d15Nbeta'][c1],fd['alpha_conc'][c1],fd['beta_conc'][c1],fd['co2_conc'][c1],fd['H2O_conc'][c1]])
c1=c1+1
openinputfile.close()
openresultsfile.close()
def askopenresultsfilename():
global sampletimefilename # file with the sample names and times (switcherlog)
global mergedresultsfilename
global tempfilename
# get filename
fileopen_opt = options = {}
options['defaultextension'] = '.csv'
options['filetypes'] = [('csv files', '.csv'),('all files', '.*')]
options['initialdir'] = 'C:\SWITCH_CONTROL\SWITCHERLOG'
options['initialfile'] = 'sample_times_names.csv'
options['parent'] = root
options['title'] = 'Choose a csv file with samplenames and times to open'
sampletimefilename = tkFileDialog.askopenfilename(**fileopen_opt)
# open file
if sampletimefilename:
tempfilename=datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
MergeResults()
opensampletimefile=open(sampletimefilename, 'rb')
os.chdir(resultsfoldername)
openinputfile=open(mergedresultsfilename, 'rb')
resultsfileName=tempfilename + '_results.csv'
openresultsfile=open(resultsfileName, 'wb')
pdffile1 = PdfPages(tempfilename +'_charts_sample_only.pdf')
pdffile2 = PdfPages(tempfilename +'_charts_whole_run.pdf')
sampletimes = numpy.genfromtxt(opensampletimefile, delimiter=',', dtype=None, names=True)
print "amount samples"
print len(sampletimes['SampleName'])
iN2Odata = numpy.genfromtxt(openinputfile, delimiter='\t', dtype=None, names=True)
print "amount datalines"
print len(iN2Odata['Epoch_Time'])
amountrows=len(iN2Odata['Epoch_Time'])
resultswriter = csv.writer(openresultsfile, dialect='excel')
resultswriter.writerow(['SampleName', 'Rundate','Runtime', 'Port', 'N2Omean',
'N2Oslope', 'N2Ointercept','d15Nmean','d15Nslope','d15Nintercept', 'd15Amean',
'd15Aslope', 'd15Aintercept','d15Bmean', 'd15Bslope', 'd15Bintercept','CO2mean',
'H2Omean','alphaN2Omean', 'alphaN2Oslope', 'alphaN2Ointercept','betaN2Omean',
'betaN2Oslope', 'betaN2Ointercept'])
stabilizesec=float(pretimeentry.get())
sampletimesec=float(sampletimeentry.get())*60
# just a counter c1 for keeping track of where we are in the samplelist file
c1=0
# just a counter c2 for keeping track of where we are in the results file
c2=0
for row in sampletimes['SampleName']:
xsec=[]; yN2O=[]; yd15N=[]; yd15A=[]; yd15B=[]; yCO2=[]; yH2O=[]; yalphaN2O=[];ybetaN2O=[];
xsecs=[]; yN2Os=[]; yd15Ns=[]; yd15As=[]; yd15Bs=[]; yCO2s=[]; yH2Os=[]; yalphaN2Os=[];ybetaN2Os=[];
samplestartstr=str(sampletimes['Year'][c1])+" "+str(sampletimes['Month'][c1])+" "+str(sampletimes['Day'][c1])+" "+str(sampletimes['Hour'][c1])+" "+str(sampletimes['Minute'][c1]) +" "+str(sampletimes['Second'][c1])
samplestructtime=time.strptime(samplestartstr, "%Y %m %d %H %M %S")
#sampleepochtime=time.mktime(samplestructtime)
sampleepochtime=calendar.timegm(samplestructtime)
sampleepochtime=sampleepochtime-(int(UTCoffsetentry.get())*3600)
print sampletimes['SampleName'][c1]
print time.strftime("%d %b %Y %H:%M:%S ", samplestructtime)
print sampleepochtime
# discard data before sample is started and stabilized
while sampleepochtime > iN2Odata['Epoch_Time'][c2]:
c2=c2+1
while sampleepochtime+stabilizesec > iN2Odata['Epoch_Time'][c2]:
xsec.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2O.append(iN2Odata['N2O_ppmv'][c2])
yd15N.append(iN2Odata['d15N'][c2])
yd15A.append(iN2Odata['d15Nalpha'][c2])
yd15B.append(iN2Odata['d15Nbeta'][c2])
yCO2.append(iN2Odata['CO2_ppmv'][c2])
yH2O.append(iN2Odata['H2O_volperc'][c2])
yalphaN2O.append(iN2Odata['alpha_ppmv'][c2])
ybetaN2O.append(iN2Odata['beta_ppmv'][c2])
c2=c2+1
while sampleepochtime+stabilizesec+sampletimesec > iN2Odata['Epoch_Time'][c2]:
xsecs.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2Os.append(iN2Odata['N2O_ppmv'][c2])
yd15Ns.append(iN2Odata['d15N'][c2])
yd15As.append(iN2Odata['d15Nalpha'][c2])
yd15Bs.append(iN2Odata['d15Nbeta'][c2])
yCO2s.append(iN2Odata['CO2_ppmv'][c2])
yH2Os.append(iN2Odata['H2O_volperc'][c2])
yalphaN2Os.append(iN2Odata['alpha_ppmv'][c2])
ybetaN2Os.append(iN2Odata['beta_ppmv'][c2])
c2=c2+1
while sampleepochtime+stabilizesec+sampletimesec+120 > iN2Odata['Epoch_Time'][c2]:
xsec.append(iN2Odata['Epoch_Time'][c2]-sampleepochtime)
yN2O.append(iN2Odata['N2O_ppmv'][c2])
yd15N.append(iN2Odata['d15N'][c2])
yd15A.append(iN2Odata['d15Nalpha'][c2])
yd15B.append(iN2Odata['d15Nbeta'][c2])
yCO2.append(iN2Odata['CO2_ppmv'][c2])
yH2O.append(iN2Odata['H2O_volperc'][c2])
yalphaN2O.append(iN2Odata['alpha_ppmv'][c2])
ybetaN2O.append(iN2Odata['beta_ppmv'][c2])
c2=c2+1
c2=0
print 'amount readings for this sample:' + str(len(yN2Os))
rundate=time.strftime("%Y%m%d", samplestructtime)
runtime=time.strftime("%H%M%S", samplestructtime)
if len(yN2Os)>2:
N2Omean=numpy.mean(yN2Os)
H2Omean=numpy.mean(yH2Os)
CO2mean=numpy.mean(yCO2s)
d15Nmean=numpy.mean(yd15Ns)
d15Amean=numpy.mean(yd15As)
d15Bmean=numpy.mean(yd15Bs)
alphaN2Omean=numpy.mean(yalphaN2Os)
betaN2Omean=numpy.mean(ybetaN2Os)
N2Oslope, N2Ointercept, N2Olinr, N2Olinp, N2Ostd_err = stats.linregress(xsecs,yN2Os)
d15Nslope, d15Nintercept, d15Nlinr, d15Nlinp, d15Nstd_err = stats.linregress(xsecs,yd15Ns)
d15Aslope, d15Aintercept, d15Alinr, d15Alinp, d15Astd_err = stats.linregress(xsecs,yd15As)
d15Bslope, d15Bintercept, d15Blinr, d15Blinp, d15Bstd_err = stats.linregress(xsecs,yd15Bs)
alphaN2Oslope, alphaN2Ointercept, alphaN2Olinr, alphaN2Olinp, alphaN2Ostd_err = stats.linregress(xsecs,yalphaN2Os)
betaN2Oslope, betaN2Ointercept, betaN2Olinr, betaN2Olinp, betaN2Ostd_err = stats.linregress(xsecs,ybetaN2Os)
resultswriter.writerow([sampletimes['SampleName'][c1],rundate,runtime, sampletimes['Port'][c1],
N2Omean, N2Oslope, N2Ointercept, d15Nmean, d15Nslope, d15Nintercept, d15Amean, d15Aslope, d15Aintercept, d15Bmean, d15Bslope, d15Bintercept,
CO2mean, H2Omean,alphaN2Omean,alphaN2Oslope,alphaN2Ointercept,betaN2Omean,betaN2Oslope,betaN2Ointercept])
#______________ SAMPLE ONLY PDF_______________________________
fig = P.figure(figsize=(16, 16))
xs = numpy.array(xsecs)
y1s = numpy.array(yN2Os)
y2s = numpy.array(yd15Ns)
y3s = numpy.array(yd15As)
y4s = numpy.array(yd15Bs)
x = numpy.array(xsec)
y1 = numpy.array(yN2O)
y2 = numpy.array(yd15N)
y3 = numpy.array(yd15A)
y4 = numpy.array(yd15B)
(m,b)=P.polyfit(xs,y1s,1)
y12 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y2s,1)
y22 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y3s,1)
y32 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y4s,1)
y42 = P.polyval([m,b],x)
line1=fig.add_subplot(411)
line1.scatter(xs, y1s)
line1.set_xlim(left=0)
line1.grid()
line1.set_title('Sample Name: '+str(sampletimes['SampleName'][c1])+' time: '+time.strftime("%d %b %Y %H:%M:%S ", samplestructtime))
line1.set_ylabel('N2O concentration (ppmv)', color='b')
line2=fig.add_subplot(412)
line2.scatter(xs, y2s)
line2.set_xlim(left=0)
line2.grid()
line2.set_ylabel('d15N', color='b')
line3=fig.add_subplot(413)
line3.scatter(xs, y3s)
line3.set_xlim(left=0)
line3.grid()
line3.set_ylabel('d15N alpha', color='b')
line4=fig.add_subplot(414)
line4.scatter(xs, y4s)
line4.set_xlim(left=0)
#line4.plot(x, y42)
# if doHMRfit.get()>0 and nh3HMRslope!='NA':
# p1=numpy.array([float(nh3HMRintercept),-float(nh3HMRslope),float(nh3HMRmixratio)])
# line4.plot(xHMR, fitfunc(p1, xHMR)) # Plot the HMR fit
line4.grid()
line4.set_ylabel('d15N beta', color='b')
line4.set_xlabel('time (seconds)', color='b')
pdffile1.savefig(dpi=150)
P.close()
#________________________ WHOLE RUN PDF_______________________________
fig = P.figure(figsize=(16, 16))
xs = numpy.array(xsecs)
y1s = numpy.array(yN2Os)
y2s = numpy.array(yd15Ns)
y3s = numpy.array(yd15As)
y4s = numpy.array(yd15Bs)
x = numpy.array(xsec)
y1 = numpy.array(yN2O)
y2 = numpy.array(yd15N)
y3 = numpy.array(yd15A)
y4 = numpy.array(yd15B)
(m,b)=P.polyfit(xs,y1s,1)
y12 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y2s,1)
y22 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y3s,1)
y32 = P.polyval([m,b],x)
(m,b)=P.polyfit(xs,y4s,1)
y42 = P.polyval([m,b],x)
line1=fig.add_subplot(411)
line1.scatter(xs, y1s)
line1.scatter(x, y1, marker='+')
line1.set_xlim(left=0)
line1.grid()
line1.set_title('Sample Name: '+str(sampletimes['SampleName'][c1])+' time: '+time.strftime("%d %b %Y %H:%M:%S ", samplestructtime))
line1.set_ylabel('N2O concentration (ppmv)', color='b')
line2=fig.add_subplot(412)
line2.scatter(xs, y2s)
line2.scatter(x, y2, marker='+')
line2.set_xlim(left=0)
line2.grid()
line2.set_ylabel('d15N', color='b')
line3=fig.add_subplot(413)
line3.scatter(xs, y3s)
line3.scatter(x, y3, marker='+')
line3.set_xlim(left=0)
line3.grid()
line3.set_ylabel('d15N alpha', color='b')
line4=fig.add_subplot(414)
line4.scatter(xs, y4s)
line4.scatter(x, y4, marker='+')
line4.set_xlim(left=0)
#line4.plot(x, y42)
# if doHMRfit.get()>0 and nh3HMRslope!='NA':
# p1=numpy.array([float(nh3HMRintercept),-float(nh3HMRslope),float(nh3HMRmixratio)])
# line4.plot(xHMR, fitfunc(p1, xHMR)) # Plot the HMR fit
line4.grid()
line4.set_ylabel('d15N beta', color='b')
line4.set_xlabel('time (seconds)', color='b')
pdffile2.savefig(dpi=150)
P.close()
else:
resultswriter.writerow([sampletimes['SampleName'][c1],rundate,runtime, sampletimes['Port'][c1],
'na', 'na', 'na', 'na','na','na','na', 'na', 'na', 'na','na', 'na', 'na','na', 'na'])
print 'NO DATA FOUND FOR THIS SAMPLE'
print '----------------------------------------------'
c1=c1+1
openinputfile.close()
openresultsfile.close()
pdffile1.close()
pdffile2.close()
#____________________________________________________________________________________________________________
#--------------------GUI-----------------------------------------------------------------------------------
#_____________________________________________________________________________________________________________
# create a root TkInter frame
root = Tk()
root.title('iN2O results calculator 20140719')
#__________________________________LOGO&TITLE________________________________________
bigtitle = Label(root, anchor=W, font=('times', 20, 'bold'), fg='white',bg='darkgreen', text="iCO2 calculator ")
bigtitle.grid(row=0,column=0,columnspan=10,sticky=[N,S,E,W])
#____________________________OPTIONS______________________________________________________
optionstitle = Label(root, anchor=W, font=('times', 12, 'bold'), text="options:")
optionstitle.grid(row=1,column=0, columnspan=3, sticky=[N,S,E,W])
pretimeentrytitle = Label(root, anchor=W, text="stabilizing time to ignore at start (s):")
pretimeentrytitle.grid(row=3,column=0, columnspan=1, sticky=[E])
pretimeentry= Entry(root,width=4)
pretimeentry.insert(0,"270")
pretimeentry.grid(row=3,column=1, columnspan=1, sticky=[W])
sampletimeentrytitle = Label(root, anchor=W, text="sampling time to include (min):")
sampletimeentrytitle.grid(row=4,column=0, columnspan=1, sticky=[E])
sampletimeentry= Entry(root,width=4)
sampletimeentry.insert(0,"5")
sampletimeentry.grid(row=4,column=1, columnspan=1, sticky=[W])
UTCoffsettitle = Label(root, anchor=W, text="Offset local time UTC (SK: -6):")
UTCoffsettitle.grid(row=13,column=0, columnspan=1, sticky=[E])
UTCoffsetentry= Entry(root,width=4)
UTCoffsetentry.insert(0,"-6")
UTCoffsetentry.grid(row=13,column=1, columnspan=1, sticky=[W])
# doHMRfit=IntVar()
# doHMRapply = Checkbutton(root, text="Fit the exponential HMR model", variable=doHMRfit)
# doHMRapply.grid(row=12,column=0, columnspan=5, sticky=W)
# _______________________CALC INDIVIDUAL FLUXES_____________________________________________
f0=Frame(root,height=1, width=450, bg="grey")
f0.grid(row=24,column=0, columnspan=4, pady=5,sticky=S)
calcfluxtitle = Label(root, anchor=W, font=('times', 12, 'bold'), text="Calculate results")
calcfluxtitle.grid(row=25,column=0, columnspan=4, sticky=[N,S,E,W])
calcfluxhelp = Label(root, anchor=W, text="Open a merged results file")
calcfluxhelp.grid(row=26,column=0, columnspan=4, sticky=[N,S,E,W])
calcfluxhelp2 = Label(root, anchor=W, text="input concentrations in ppmv (=ul/l)")
calcfluxhelp2.grid(row=27,column=0, columnspan=4, sticky=[N,S,E,W])
buttonopenconcfile=Button(root, text='open sampletime file', command=askopenresultsfilename)
buttonopenconcfile.grid(row=28,column=1,columnspan=1,sticky=[W])
calcfluxhelp3 = Label(root, anchor=W, text="results are saved in data_tools_uofs")
calcfluxhelp3.grid(row=29,column=0, columnspan=4, sticky=[N,S,E,W])
# #_____________________________________________________________________________________________________________
root.mainloop( )
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import unittest
import paddle.fluid as fluid
from paddle.fluid.transpiler.distribute_transpiler import delete_ops
import traceback
import collections
import six
class TranspilerTest(unittest.TestCase):
def setUp(self):
self.trainer_id = 0
self.trainers = 2
self.pservers = 2
# NOTE: we do not actually bind this port
self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
self.pserver1_ep = "127.0.0.1:6174"
self.pserver2_ep = "127.0.0.1:6175"
self.sync_mode = True
self.transpiler = None
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
sgd_optimizer.minimize(avg_cost)
def get_main_program(self):
main = fluid.Program()
main.random_seed = 1
with fluid.program_guard(main):
self.net_conf()
self.origin_prog = main.clone()
return main
def get_trainer(self, config=None):
src = fluid.default_startup_program().clone()
t = self._transpiler_instance(config)
trainer_main = t.get_trainer_program(wait_port=False)
trainer_startup = fluid.default_startup_program()
assert (src.num_blocks == 1)
assert (trainer_startup.num_blocks == src.num_blocks)
return trainer_main, trainer_startup
def get_pserver(self, ep, config=None, sync_mode=True):
t = self._transpiler_instance(config, sync_mode)
pserver = t.get_pserver_program(ep)
startup = t.get_startup_program(ep, pserver)
return pserver, startup
def _transpiler_instance(self, config=None, sync_mode=True):
if not self.transpiler:
main = self.get_main_program()
self.transpiler = fluid.DistributeTranspiler(config=config)
self.transpiler.transpile(
self.trainer_id,
program=main,
pservers=self.pserver_eps,
trainers=self.trainers,
sync_mode=sync_mode)
return self.transpiler
def transpiler_test_impl(self):
pass
def test_transpiler(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.transpiler_test_impl()
class TestBasicModel(TranspilerTest):
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
pserver2, startup2 = self.get_pserver(self.pserver2_ep)
trainer, trainer_startup = self.get_trainer()
# splited var blocks should be in startup program
self.assertTrue("fc_w.block0" in trainer_startup.global_block().vars)
self.assertTrue("fc_w.block1" in trainer_startup.global_block().vars)
self.assertTrue("fc_w" in trainer_startup.global_block().vars)
self.assertTrue("fc_b" in trainer_startup.global_block().vars)
self.assertTrue("fc_w@GRAD" not in trainer_startup.global_block().vars)
self.assertTrue("fc_b@GRAD" not in trainer_startup.global_block().vars)
src = [op.type for op in trainer_startup.global_block().ops]
dst = ['fill_constant', 'fill_constant', 'uniform_random', 'recv', 'recv', \
'fetch_barrier', 'concat']
self.assertEqual(src, dst)
self.assertEqual([op.type for op in trainer.global_block().ops], [
'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean',
'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad',
'elementwise_add_grad', 'send', 'mul_grad', 'split_byref', 'send',
'send_barrier', 'recv', 'recv', 'fetch_barrier', 'concat'
])
self.assertEqual(len(pserver.blocks), 3)
# block0: listen_and_serv
self.assertEqual([op.type for op in pserver.blocks[0].ops],
["listen_and_serv"])
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "sgd"])
# confirm startup program
self.assertEqual([op.type for op in startup.global_block().ops],
["fill_constant", "fill_constant", "uniform_random"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup.global_block().var("fc_w.block1")
self.assertEqual(fc_w_var.shape, (500, 1000))
# all parameters should be optimized on pserver
pserver_params = []
for prog in [pserver, pserver2]:
for blk in prog.blocks:
for op in blk.ops:
if "Param" in op.input_names:
param_name = op.input("Param")[0]
is_block_idx = param_name.find(".block")
if is_block_idx != -1:
origin_param_name = param_name[:is_block_idx]
else:
origin_param_name = param_name
pserver_params.append(origin_param_name)
trainer_params = []
for op in self.origin_prog.global_block().ops:
if "Param" in op.input_names:
trainer_params.append(op.input("Param")[0])
self.assertEqual(set(pserver_params), set(trainer_params))
class TestBasicModelWithLargeBlockSize(TranspilerTest):
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.min_block_size = 1048576
pserver, startup = self.get_pserver(self.pserver1_ep, config)
pserver2, startup2 = self.get_pserver(self.pserver2_ep, config)
trainer, _ = self.get_trainer(config)
self.assertEqual([op.type for op in trainer.global_block().ops], [
'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean',
'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad',
'elementwise_add_grad', 'send', 'mul_grad', 'send', 'send_barrier',
'recv', 'recv', 'fetch_barrier'
])
self.assertEqual(len(pserver.blocks), 2)
# block0: listen_and_serv
self.assertEqual([op.type for op in pserver.blocks[0].ops],
["listen_and_serv"])
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "sgd"])
# confirm startup program
self.assertEqual([op.type for op in startup.global_block().ops],
["fill_constant", "fill_constant"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup2.global_block().var("fc_w")
self.assertEqual(fc_w_var.shape, (1000, 1000))
# all parameters should be optimized on pserver
pserver_params = []
for prog in [pserver, pserver2]:
for blk in prog.blocks:
for op in blk.ops:
if "Param" in op.input_names:
param_name = op.input("Param")[0]
is_block_idx = param_name.find(".block")
if is_block_idx != -1:
origin_param_name = param_name[:is_block_idx]
else:
origin_param_name = param_name
pserver_params.append(origin_param_name)
trainer_params = []
for op in self.origin_prog.global_block().ops:
if "Param" in op.input_names:
trainer_params.append(op.input("Param")[0])
self.assertEqual(set(pserver_params), set(trainer_params))
class TestNoSliceVar(TranspilerTest):
def setUp(self):
super(TestNoSliceVar, self).setUp()
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.slice_var_up = False
_, startup = self.get_pserver(self.pserver1_ep, config)
_, startup2 = self.get_pserver(self.pserver2_ep, config)
if "fc_w" in startup.global_block().vars:
fc_w_var = startup.global_block().vars["fc_w"]
elif "fc_w" in startup2.global_block().vars:
fc_w_var = startup2.global_block().vars["fc_w"]
self.assertEqual(fc_w_var.shape, (1000, 1000))
class TestLRDecay(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=1.0,
decay_steps=2100,
decay_rate=0.1,
staircase=True))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 4)
lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
self.assertEqual(lr_decay_ops, [
"increment", "cast", "fill_constant", "elementwise_div", "floor",
"fill_constant", "elementwise_pow", "fill_constant",
"elementwise_mul"
])
class TestDecayedAdagrad(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.DecayedAdagrad(learning_rate=0.1)
opt.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
class TestFtrl(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
opt = fluid.optimizer.Ftrl(learning_rate=0.1)
opt.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
class TestLRDecayConditional(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.piecewise_decay([10000, 20000],
[1.0, 0.5, 1.0]))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
serv_op = pserver.blocks[0].ops[0]
sub_blocks = []
optimize_blocks = []
for b in serv_op.all_attrs()["optimize_blocks"]:
optimize_blocks.append(b.idx)
for b in pserver.blocks:
if b.idx not in optimize_blocks:
sub_blocks.append(b.idx)
self.assertEqual(len(pserver.blocks), 7)
lr_decay_ops = [op.type for op in pserver.blocks[1].ops]
self.assertEqual(lr_decay_ops, [
"increment", "cast", "fill_constant", "fill_constant", "less_than",
"logical_not", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"conditional_block"
])
# test the condition blocks
for b in sub_blocks:
if b == 0:
continue
block = pserver.blocks[b]
self.assertEqual([op.type for op in block.ops], ["assign"])
class TestL2Decay(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(
input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(
name='fc_w',
regularizer=fluid.regularizer.L2Decay(),
gradient_clip=fluid.clip.GradientClipByValue(0.1)),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 3)
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "clip", "sgd"])
self.assertEqual(
[op.type for op in pserver.blocks[2].ops],
["sum", "scale", "clip", "scale", "elementwise_add", "sgd"])
# TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer
class TestL2DecayWithPiecewise(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
base_lr = 1.0
bd = [1, 10, 20, 30]
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
sgd_optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
trainer, _ = self.get_trainer()
self.assertEqual(len(pserver.blocks), 9)
self.assertEqual([op.type for op in pserver.blocks[1].ops], [
"increment", "cast", "fill_constant", "fill_constant", "less_than",
"logical_not", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"fill_constant", "less_than", "logical_not", "logical_and",
"logical_and", "conditional_block", "fill_constant",
"conditional_block"
])
self.assertEqual(
[op.type for op in pserver.blocks[7].ops],
["sum", "scale", "scale", "elementwise_add", "momentum"])
self.assertEqual(
[op.type for op in pserver.blocks[8].ops],
["sum", "scale", "scale", "elementwise_add", "momentum"])
class TestEmptyPserverOptimizeBlocks(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
# only one parameter
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=False)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=1.0)
sgd_optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
config.slice_var_up = False
pserver, startup = self.get_pserver(ep=self.pserver2_ep, config=config)
self.assertEqual(len(pserver.blocks), 2)
self.assertEqual(len(pserver.blocks[1].ops), 0)
class TestDistLookupTableBase(TranspilerTest):
def network_with_table(self, is_sparse, is_distributed):
self.table_size = 1000
self.emb_size = 64
self.lookup_table_name = 'shared_w'
def emb_pool(ids, table_name, is_distributed):
emb = fluid.layers.embedding(
input=ids,
size=[self.table_size, self.emb_size],
dtype='float32',
param_attr=table_name,
is_sparse=is_sparse,
is_distributed=is_distributed)
pool = fluid.layers.sequence_pool(input=emb, pool_type='average')
return pool
title_ids = fluid.layers.data(
name='title_ids', shape=[1], dtype='int64', lod_level=1)
brand_ids = fluid.layers.data(
name='brand_ids', shape=[1], dtype='int64', lod_level=1)
profile_ids = fluid.layers.data(
name='brand_ids', shape=[1], dtype='int64', lod_level=1)
title_emb = emb_pool(title_ids, self.lookup_table_name, is_distributed)
brand_emb = emb_pool(brand_ids, self.lookup_table_name, is_distributed)
profile_emb = emb_pool(profile_ids, "profile_emb", False)
fc0 = fluid.layers.concat(
input=[title_emb, brand_emb, profile_emb], axis=1)
predict = fluid.layers.fc(input=fc0,
size=2,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.003)
optimizer.minimize(avg_cost)
class TestLocalLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
pserver1, startup1 = self.get_pserver(self.pserver1_ep)
self.assertEqual(len(pserver1.blocks), 4)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["sum", "scale", "adam", "scale", "scale"])
# 2 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["sum", "scale", "adam", "scale", "scale"])
# 3 optimize for table 2 adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["sum", "scale", "adam", "scale", "scale"])
trainer, _ = self.get_trainer()
self.assertEqual(len(trainer.blocks), 1)
ops = [
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add',
'cross_entropy', 'mean', 'fill_constant', 'mean_grad',
'cross_entropy_grad', 'elementwise_add_grad', 'send', 'mul_grad',
'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad',
'split_selected_rows', 'send', 'sequence_pool_grad',
'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad',
'sum', 'split_selected_rows', 'send', 'send_barrier', 'recv',
'recv', 'recv', 'recv', 'fetch_barrier', 'concat', 'concat'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
class TestDistLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
pserver1, startup1 = self.get_pserver(self.pserver1_ep)
self.assertEqual(len(pserver1.blocks), 6)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["sum", "scale", "adam", "scale", "scale"])
# 4 prefetch -> lookup_sparse_table for data0
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["sum", "scale", "adam", "scale", "scale"])
# 2 optimize for table sgd
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["sum", "sgd"])
# 3 prefetch -> lookup_sparse_table for data0
self.assertEqual([op.type for op in pserver1.blocks[4].ops],
["lookup_sparse_table"])
# 5 save table
self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
trainer, trainer_startup = self.get_trainer()
self.assertEqual(len(trainer.blocks), 1)
ops = [
'split_ids', 'prefetch', 'merge_ids', 'sequence_pool',
'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul',
'elementwise_add', 'cross_entropy', 'mean', 'fill_constant',
'mean_grad', 'cross_entropy_grad', 'elementwise_add_grad', 'send',
'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad',
'lookup_table_grad', 'split_selected_rows', 'send',
'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad',
'lookup_table_grad', 'sum', 'split_ids', 'send', 'send_barrier',
'recv', 'recv', 'recv', 'fetch_barrier', 'concat'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
startup_ops = [
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'fill_constant', 'fill_constant',
'fill_constant', 'fill_constant', 'uniform_random',
'uniform_random', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat',
'fake_init'
]
self.assertEqual([op.type for op in trainer_startup.blocks[0].ops],
startup_ops)
class TestAsyncLocalLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=False)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
self.assertEqual(len(pserver1.blocks), 4)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["adam", "scale", "scale"])
# 2 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["adam", "scale", "scale"])
# 3 optimize for table adam
# NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num
self.assertEqual([op.type for op in pserver1.blocks[3].ops],
["adam", "scale", "scale"])
trainer, _ = self.get_trainer(config)
self.assertEqual(len(trainer.blocks), 1)
ops = [
'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool',
'lookup_table', 'sequence_pool', 'concat', 'mul', 'elementwise_add',
'cross_entropy', 'mean', 'fill_constant', 'mean_grad',
'cross_entropy_grad', 'elementwise_add_grad', 'send', 'mul_grad',
'send', 'concat_grad', 'sequence_pool_grad', 'lookup_table_grad',
'split_selected_rows', 'send', 'sequence_pool_grad',
'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad',
'sum', 'split_selected_rows', 'send', 'recv', 'recv', 'recv',
'recv', 'concat', 'concat'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
class TestAsyncDistLookupTable(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False)
self.assertEqual(len(pserver1.blocks), 6)
# 0 listen_and_serv
# 1 optimize for fc_w or fc_b adam
self.assertEqual([op.type for op in pserver1.blocks[1].ops],
["adam", "scale", "scale"])
# 2 optimize for table adam
self.assertEqual([op.type for op in pserver1.blocks[2].ops],
["adam", "scale", "scale"])
# 3 optimize for table sgd
self.assertEqual([op.type for op in pserver1.blocks[3].ops], ["sgd"])
# 4 prefetch -> lookup_sparse_table for data0
self.assertEqual([op.type for op in pserver1.blocks[4].ops],
["lookup_sparse_table"])
# 5 save table
self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"])
trainer, _ = self.get_trainer(config)
self.assertEqual(len(trainer.blocks), 1)
ops = [
'split_ids', 'prefetch', 'merge_ids', 'sequence_pool',
'sequence_pool', 'lookup_table', 'sequence_pool', 'concat', 'mul',
'elementwise_add', 'cross_entropy', 'mean', 'fill_constant',
'mean_grad', 'cross_entropy_grad', 'elementwise_add_grad', 'send',
'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad',
'lookup_table_grad', 'split_selected_rows', 'send',
'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad',
'lookup_table_grad', 'sum', 'split_ids', 'send', 'recv', 'recv',
'recv', 'concat'
]
self.assertEqual([op.type for op in trainer.blocks[0].ops], ops)
class TestDistLookupTableSliceSize(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
config = fluid.DistributeTranspilerConfig()
pserver1, _ = self.get_pserver(self.pserver1_ep, config)
self.assertTrue(self.transpiler.has_distributed_lookup_table)
lookup_table_var = pserver1.global_block().vars[
self.transpiler.table_name]
row_size = lookup_table_var.shape[0]
calc_row_size = int(math.ceil(self.table_size / self.pservers))
self.assertEqual(row_size, calc_row_size)
class TestDistArgsInProgram(TestDistLookupTableBase):
def net_conf(self):
self.network_with_table(is_sparse=True, is_distributed=True)
def transpiler_test_impl(self):
trainer, _ = self.get_trainer()
self.assertTrue(trainer._is_distributed)
self.assertTrue(trainer._is_chief)
self.assertEqual(trainer._distributed_lookup_table,
self.lookup_table_name)
self.assertEqual(trainer._endpoints,
[self.pserver1_ep, self.pserver2_ep])
class TestRMSPropOptimizer(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, startup = self.get_pserver(self.pserver1_ep)
pserver2, startup2 = self.get_pserver(self.pserver2_ep)
self.assertEqual(len(pserver.blocks), 3)
# block1~2: optimize pass
self.assertEqual([op.type for op in pserver.blocks[1].ops],
["sum", "scale", "rmsprop"])
# the variable #fc_w will be split into two blocks
fc_w_var = startup.global_block().var("fc_w.block1")
self.assertEqual(fc_w_var.shape, (500, 1000))
moment_var = startup.global_block().var("momentum_1")
self.assertEqual(moment_var.shape, (500, 1000))
class TestLoadSliceVar(TranspilerTest):
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'),
bias_attr=fluid.ParamAttr(name='fc_b'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)
optimizer.minimize(avg_cost)
def transpiler_test_impl(self):
pserver, _ = self.get_pserver(self.pserver1_ep)
pserver2, _ = self.get_pserver(self.pserver2_ep)
self.assertTrue(pserver._slice_vars_and_attrs)
self.assertTrue(pserver2._slice_vars_and_attrs)
for idx in six.moves.xrange(len(pserver._slice_vars_and_attrs)):
self.assertEqual(pserver._slice_vars_and_attrs[idx][0],
pserver2._slice_vars_and_attrs[idx][0])
total_numel = six.moves.reduce(
lambda x, y: x * y, pserver._slice_vars_and_attrs[idx][0].shape)
self.assertEqual(
total_numel,
six.moves.reduce(lambda x, y: x * y,
pserver._slice_vars_and_attrs[idx][2].shape) +
six.moves.reduce(lambda x, y: x * y,
pserver2._slice_vars_and_attrs[idx][2].shape))
class TestNCCL2Transpile(TranspilerTest):
def test_nccl2_transpile(self):
if fluid.core.is_compiled_with_cuda(): #test nccl2 only with cuda
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
self.net_conf()
config = fluid.DistributeTranspilerConfig()
config.mode = "nccl2"
t = fluid.DistributeTranspiler(config=config)
t.transpile(
0,
trainers="127.0.0.1:6174,127.0.0.1:6175",
current_endpoint="127.0.0.1:6174",
startup_program=startup)
print([op.type for op in startup.global_block().ops])
self.assertEqual(startup.global_block().ops[-1].type, "gen_nccl_id")
self.assertIsNotNone(startup.global_block().vars.get("NCCLID"))
else:
pass
if __name__ == "__main__":
unittest.main()
| |
# django imports
import hashlib
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.db.models.signals import post_save, m2m_changed, post_delete
from django.db.models.signals import pre_save
from django.db.models.signals import pre_delete
# lfs imports
from lfs.caching.utils import clear_cache, delete_cache, invalidate_cache_group_id
from lfs.cart.models import Cart
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.models import StaticBlock
from lfs.core.models import Shop
from lfs.core.signals import cart_changed
from lfs.core.signals import product_changed
from lfs.core.signals import category_changed
from lfs.core.signals import shop_changed
from lfs.core.signals import topseller_changed
from lfs.core.signals import manufacturer_changed
from lfs.criteria.models import CountryCriterion, Criterion, WeightCriterion, WidthCriterion, ShippingMethodCriterion, \
PaymentMethodCriterion, LengthCriterion, HeightCriterion, CombinedLengthAndGirthCriterion, CartPriceCriterion
from lfs.customer_tax.models import CustomerTax
from lfs.marketing.models import Topseller
from lfs.order.models import OrderItem
from lfs.page.models import Page
from lfs.shipping.models import ShippingMethod
from lfs.tax.models import Tax
# reviews imports
from reviews.signals import review_added
# Shop
def shop_changed_listener(sender, **kwargs):
clear_cache()
shop_changed.connect(shop_changed_listener)
# Cart
def cart_changed_listener(sender, **kwargs):
update_cart_cache(sender)
cart_changed.connect(cart_changed_listener)
def cart_deleted_listener(sender, instance, **kwargs):
update_cart_cache(instance)
pre_delete.connect(cart_deleted_listener, sender=Cart)
# Category
def category_deleted_listener(sender, instance, **kwargs):
update_category_cache(instance)
pre_delete.connect(category_deleted_listener, sender=Category)
def category_saved_listener(sender, instance, **kwargs):
update_category_cache(instance)
pre_save.connect(category_saved_listener, sender=Category)
def category_changed_listener(sender, **kwargs):
update_category_cache(sender)
category_changed.connect(category_changed_listener)
def product_categories_changed_listener(sender, **kwargs):
instance = kwargs['instance']
reverse = kwargs['reverse']
pk_set = kwargs['pk_set']
if reverse:
product = instance
delete_cache("%s-product-categories-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, product.id, True))
delete_cache("%s-product-categories-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, product.id, False))
else:
if pk_set:
for product in Product.objects.filter(pk__in=pk_set):
delete_cache("%s-product-categories-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, product.id, True))
delete_cache("%s-product-categories-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, product.id, False))
m2m_changed.connect(product_categories_changed_listener, sender=Category.products.through)
# Manufacturer
def manufacturer_changed_listener(sender, **kwargs):
# filtered lists of products assigned to manufacturer used at manufacturer page
delete_cache("%s-manufacturer-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, sender.slug))
# list of all manufacturer products
delete_cache("%s-manufacturer-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, sender.pk))
# if manufacturer assignment was changed then product navigation might be different too
invalidate_cache_group_id('product_navigation')
manufacturer_changed.connect(manufacturer_changed_listener)
# OrderItem
def order_item_listener(sender, instance, **kwargs):
"""Deletes topseller after an OrderItem has been updated. Topseller are
calculated automatically on base of OrderItems, hence we have to take of
that.
"""
delete_cache("%s-topseller" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
try:
for category in instance.product.get_categories(with_parents=True):
delete_cache("%s-topseller-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, category.id))
except:
pass # fail silently
pre_delete.connect(order_item_listener, sender=OrderItem)
post_save.connect(order_item_listener, sender=OrderItem)
# Page
def page_saved_listener(sender, instance, **kwargs):
delete_cache("%s-page-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
delete_cache("%s-pages" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
post_save.connect(page_saved_listener, sender=Page)
# Product
def product_changed_listener(sender, **kwargs):
update_product_cache(sender)
product_changed.connect(product_changed_listener)
def product_saved_listener(sender, instance, **kwargs):
# update_product_cache(instance)
update_category_cache(instance)
post_save.connect(product_saved_listener, sender=Product)
def product_pre_saved_listener(sender, instance, **kwargs):
""" If product slug was changed we should have cleared slug based product cache"""
# check if product already exists in database
if instance.pk:
if instance.is_variant():
parent = instance.parent
else:
parent = instance
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
try:
old_product = Product.objects.get(pk=parent.pk)
except Product.DoesNotExist:
pass
else:
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, old_product.slug))
pre_save.connect(product_pre_saved_listener, sender=Product)
# Shipping Method
def shipping_method_saved_listener(sender, instance, **kwargs):
delete_cache("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
delete_cache("%s-shipping-delivery-time-cart" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
delete_cache("all_active_shipping_methods")
post_save.connect(shipping_method_saved_listener, sender=ShippingMethod)
def shipping_method_deleted_listener(sender, instance, **kwargs):
delete_cache("all_active_shipping_methods")
post_delete.connect(shipping_method_deleted_listener, sender=ShippingMethod)
# Shop
def shop_saved_listener(sender, instance, **kwargs):
delete_cache("%s-shop-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
post_save.connect(shop_saved_listener, sender=Shop)
# Static blocks
def static_blocks_saved_listener(sender, instance, **kwargs):
update_static_block_cache(instance)
post_save.connect(static_blocks_saved_listener, sender=StaticBlock)
# Topseller
def topseller_changed_listener(sender, **kwargs):
update_topseller_cache(sender)
topseller_changed.connect(topseller_changed_listener)
def topseller_saved_listener(sender, instance, **kwargs):
update_topseller_cache(instance)
post_save.connect(topseller_saved_listener, sender=Topseller)
def review_added_listener(sender, **kwargs):
ctype = ContentType.objects.get_for_id(sender.content_type_id)
product = ctype.get_object_for_this_type(pk=sender.content_id)
update_product_cache(product)
review_added.connect(review_added_listener)
def criterion_countries_changed(sender, instance, action, reverse, model, pk_set, **kwargs):
if action in ('post_add', 'post_remove', 'post_clear'):
if not reverse:
delete_cache(u'country_values_{}'.format(instance.pk))
else:
for pk in pk_set:
delete_cache(u'country_values_{}'.format(pk))
m2m_changed.connect(criterion_countries_changed, sender=CountryCriterion.value.through)
def customer_tax_created_listener(sender, instance, created, **kwargs):
if created:
delete_cache(u'all_customer_taxes')
post_save.connect(customer_tax_created_listener, sender=CustomerTax)
def customer_tax_deleted_listener(sender, instance, **kwargs):
delete_cache(u'all_customer_taxes')
post_delete.connect(customer_tax_deleted_listener, sender=CustomerTax)
def tax_rate_created_listener(sender, instance, created, **kwargs):
delete_cache(u'tax_rate_{}'.format(instance.pk))
post_save.connect(tax_rate_created_listener, sender=Tax)
def tax_rate_deleted_listener(sender, instance, **kwargs):
delete_cache(u'tax_rate_{}'.format(instance.pk))
post_delete.connect(tax_rate_deleted_listener, sender=Tax)
#####
def update_category_cache(instance):
# NOTE: ATM, we clear the whole cache if a category has been changed.
# Otherwise is lasts to long when the a category has a lot of products
# (1000s) and the shop admin changes a category.
clear_cache()
return
delete_cache("%s-category-breadcrumbs-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
delete_cache("%s-category-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
delete_cache("%s-category-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
delete_cache("%s-category-categories-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
for category in Category.objects.all():
delete_cache("%s-categories-portlet-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, category.slug))
delete_cache("%s-category-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-category-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.slug))
delete_cache("%s-category-all-children-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-category-children-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-category-parents-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-category-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-category-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
# Note: As this is called "pre-saved" newly created categories don't have
# the many-to-many attribute "products", hence we have to take care of it
# here.
try:
for product in instance.products.all():
update_product_cache(product)
except ValueError:
pass
def update_product_cache(instance):
# If the instance is a product with variant or a variant we have to
# delete also the parent and all other variants
if instance.is_variant():
parent = instance.parent
else:
parent = instance
# if product was changed then we have to clear all product_navigation caches
invalidate_cache_group_id('product_navigation')
invalidate_cache_group_id('properties-%s' % parent.id)
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug))
delete_cache("%s-product-images-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
delete_cache("%s-related-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
delete_cache("%s-product-categories-%s-False" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
delete_cache("%s-product-categories-%s-True" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
delete_cache("%s-default-variant-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.id))
if parent.manufacturer:
delete_cache("%s-manufacturer-all-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.manufacturer.pk))
delete_cache("%s-manufacturer-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.manufacturer.slug))
try:
c = cache.get("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
del c["%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug)]
cache.set("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX, c)
except (KeyError, TypeError):
pass
for variant in parent.get_variants():
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id))
delete_cache("%s-product-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, parent.slug))
delete_cache("%s-product-images-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id))
delete_cache("%s-related-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id))
delete_cache("%s-product-categories-%s-False" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id))
delete_cache("%s-product-categories-%s-True" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.id))
delete_cache("%s-product-shipping-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, variant.slug))
def update_cart_cache(instance):
"""Deletes all cart relevant caches.
"""
if instance.user:
delete_cache("%s-cart-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.user.pk))
delete_cache("%s-cart-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.session))
delete_cache("%s-cart-items-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-cart-costs-True-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-cart-costs-False-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
delete_cache("%s-shipping-delivery-time-cart" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
delete_cache("%s-shipping-delivery-time" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
def update_static_block_cache(instance):
"""Deletes all static block relevant caches.
"""
delete_cache("%s-static-block-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, instance.id))
for category in instance.categories.all():
delete_cache("%s-category-inline-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, category.slug))
def update_topseller_cache(topseller):
"""Deletes all topseller relevant caches.
"""
delete_cache("%s-topseller" % settings.CACHE_MIDDLEWARE_KEY_PREFIX)
product = topseller.product
for category in product.get_categories(with_parents=True):
delete_cache("%s-topseller-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, category.id))
def clear_criterion_cache(sender, instance, created, **kwargs):
cache_key = u'criteria_for_model_{}_{}'.format(instance.content_id, instance.content_type.pk)
cache.delete(cache_key)
post_save.connect(clear_criterion_cache, sender=WeightCriterion)
post_save.connect(clear_criterion_cache, sender=CartPriceCriterion)
post_save.connect(clear_criterion_cache, sender=CombinedLengthAndGirthCriterion)
post_save.connect(clear_criterion_cache, sender=CountryCriterion)
post_save.connect(clear_criterion_cache, sender=HeightCriterion)
post_save.connect(clear_criterion_cache, sender=LengthCriterion)
post_save.connect(clear_criterion_cache, sender=PaymentMethodCriterion)
post_save.connect(clear_criterion_cache, sender=ShippingMethodCriterion)
post_save.connect(clear_criterion_cache, sender=WidthCriterion)
pre_delete.connect(clear_criterion_cache, sender=WeightCriterion)
pre_delete.connect(clear_criterion_cache, sender=CartPriceCriterion)
pre_delete.connect(clear_criterion_cache, sender=CombinedLengthAndGirthCriterion)
pre_delete.connect(clear_criterion_cache, sender=CountryCriterion)
pre_delete.connect(clear_criterion_cache, sender=HeightCriterion)
pre_delete.connect(clear_criterion_cache, sender=LengthCriterion)
pre_delete.connect(clear_criterion_cache, sender=PaymentMethodCriterion)
pre_delete.connect(clear_criterion_cache, sender=ShippingMethodCriterion)
pre_delete.connect(clear_criterion_cache, sender=WidthCriterion)
| |
#!/opt/local/bin/python2.6
###################################################################################################
#
# pyral.context - Python module for tracking Rally connection context
#
# used by pyral.restapi
#
###################################################################################################
__version__ = (0, 9, 3)
import sys, os
import platform
import subprocess
import time
import socket
import json
import re # we use compile, match
from pprint import pprint
# intra-package imports
from .rallyresp import RallyRESTResponse
###################################################################################################
__all__ = ["RallyContext", "RallyContextHelper"]
###################################################################################################
REQUEST_TIME_LIMIT = 5 # in seconds
IPV4_ADDRESS_PATT = re.compile(r'^\d+\.\d+\.\d+\.\d+$')
FORMATTED_ID_PATT = re.compile(r'^[A-Z]{1,2}\d+$')
##################################################################################################
class RallyRESTAPIError(Exception): pass
##################################################################################################
class RallyContext(object):
def __init__(self, server, user, password, service_url,
subscription=None, workspace=None, project=None):
self.server = server
self.user = user
self.password = password
self.service_url = service_url
self.subs_name = subscription
self.workspace = workspace
self.project = project
def asDict(self):
context_dict = { 'server' : self.server,
'user' : self.user,
'password': self.password,
'service_url': self.service_url,
}
if self.subs_name:
context_dict['subscription'] = self.subs_name
if self.workspace:
context_dict['workspace'] = self.workspace
if self.project:
context_dict['project'] = self.project
return context_dict
def subscription(self):
return self._subs_name
def serviceURL(self):
return self.service_url
def identity(self):
workspace = self.workspace or 'None'
project = self.project or 'None'
return " | ".join([self.server, self.user, self.password, workspace or "None", project or "None"])
def __repr__(self):
return self.identity()
##################################################################################################
class RallyContextHelper(object):
def __init__(self, agent, server, user, password):
self.agent = agent
self.server = server
self.user = user
self.password = password
# capture this user's User, UserProfile, Subscription records to extract
# the workspaces and projects this user has access to (and their defaults)
self._subs_name = ""
self._subs_workspaces = [] # a list of Workspace "shell" objects
self._workspaces = []
self._workspace_ref = {}
self._defaultWorkspace = None
self._currentWorkspace = None
self._inflated = False
self._projects = {} # key by workspace name with list of projects per workspace
self._project_ref = {} # key by workspace name with dict of project_name: project_ref
self._defaultProject = None
self._currentProject = None
self.context = RallyContext(server, user, password, self.agent.serviceURL())
self.defaultContext = self.context # to be updated on check call
def check(self, server):
"""
Make an initial attempt to contact the Rally web server and retrieve info
for the user associated with the credentials supplied upon instantiation.
Raise a RallyRESTAPIError if any problem is encountered.
Otherwise call our internal method to set some relevant default information
from the returned response.
This method serves double-duty of verifying that the server can be contacted
and speaks Rally WSAPI, and establishes the default workspace and project for
the user.
"""
##
## print " RallyContextHelper.check starting ..."
## sys.stdout.flush()
##
socket.setdefaulttimeout(REQUEST_TIME_LIMIT)
target_host = server
big_proxy = os.environ.get('HTTPS_PROXY', False)
small_proxy = os.environ.get('https_proxy', False)
proxy = big_proxy if big_proxy else small_proxy if small_proxy else False
proxy_host = False
if proxy:
proxy_host, proxy_port = proxy.split(':')
target_host = proxy_host or server
reachable = Pinger.ping(target_host)
if not reachable:
problem = "host: '%s' non-existent or unreachable" % target_host
raise RallyRESTAPIError(problem)
#if IPV4_ADDRESS_PATT.match(target_host): # is server an IPV4 address?
# try:
# info = socket.gethostbyaddr(target_host)
# except socket.herror as ex:
# pass
# problem = "IP v4 address '%s' not valid or unreachable" % target_host
# raise RallyRESTAPIError(problem)
# except Exception as ex:
# print "Exception detected: %s" % ex.args[0]
# problem = "Exception detected trying to obtain host info for: %s" % target_host
# raise RallyRESTAPIError(problem)
# TODO: look for IPV6 type address also?
#else:
# try:
# target_host = socket.gethostbyname(target_host)
# except socket.gaierror as ex:
# problem = "hostname: '%s' non-existent or unreachable" % target_host
# raise RallyRESTAPIError(problem)
# note the use of the _disableAugments keyword arg in the call
user_name_query = 'UserName = "%s"' % self.user
try:
timer_start = time.time()
response = self.agent.get('User', fetch=True, query=user_name_query,
_disableAugments=True)
timer_stop = time.time()
except Exception as ex:
if str(ex.args[0]).startswith('404 Service unavailable'):
# TODO: discern whether we should mention server or target_host as the culprit
raise RallyRESTAPIError("hostname: '%s' non-existent or unreachable" % server)
else:
raise
elapsed = timer_stop - timer_start
if response.status_code != 200:
##
## print "context check response: %s" % response
## print "request attempt elapsed time: %6.2f" % elapsed
##
if response.status_code == 404:
if elapsed >= float(REQUEST_TIME_LIMIT):
problem = "Request timed out on attempt to reach %s" % server
elif response.errors and 'certificate verify failed' in response.errors[0]:
problem = "SSL certificate verification failed"
elif response.errors and 'Max retries exceeded with url' in response.errors[0]:
problem = "Target Rally host: '%s' non-existent or unreachable" % server
elif response.errors and 'NoneType' in response.errors[0]:
problem = "Target Rally host: '%s' non-existent or unreachable" % server
else:
sys.stderr.write("404 Response for request\n")
sys.stderr.write("\n".join(response.errors) + "\n")
if response.warnings:
sys.stderr.write("\n".join(response.warnings) + "\n")
sys.stderr.flush()
problem = "404 Target host: '%s' doesn't support the Rally WSAPI" % server
else: # might be a 401 No Authentication or 401 The username or password you entered is incorrect.
##
## print response.status_code
## print response.headers
## print response.errors
##
if 'The username or password you entered is incorrect.' in response.errors[0]:
problem = "%s The username or password you entered is incorrect." % response.status_code
else:
error_blurb = response.errors[0][:80] if response.errors else ""
problem = "%s %s" % (response.status_code, error_blurb)
raise RallyRESTAPIError(problem)
##
## print " RallyContextHelper.check got the User info ..."
## sys.stdout.flush()
##
self._loadSubscription()
self._getDefaults(response)
self._getWorkspacesAndProjects(workspace=self._defaultWorkspace, project=self._defaultProject)
self.inflated = 'minimal'
def _loadSubscription(self):
sub = self.agent.get('Subscription', fetch=True, _disableAugments=True)
if sub.errors:
raise Exception(sub.errors[0])
subscription = sub.next()
self._subs_name = subscription.Name
self._subs_workspaces = subscription.Workspaces
if subscription.Workspaces:
self._defaultWorkspace = subscription.Workspaces[0]
def _getDefaults(self, response):
"""
We have to circumvent the normal machinery as this is part of setting up the
normal machinery. So, once having obtained the User object, we grab the
User.UserProfile.OID value and issue a GET for that using _getResourceByOID
and handling the response (wrapped in a RallyRESTResponse).
"""
user = response.next()
self.user_oid = user.oid
##
## print " RallyContextHelper._getDefaults calling _getResourceByOID to get UserProfile info..."
## sys.stdout.flush()
##
upraw = self.agent._getResourceByOID(self.context, 'UserProfile', user.UserProfile.oid, _disableAugments=True)
##
## print " RallyContextHelper._getDefaults got the raw UserProfile info via _getResourceByOID..."
## sys.stdout.flush()
##
resp = RallyRESTResponse(self.agent, self.context, 'UserProfile', upraw, "full", 0)
up = resp.data[u'QueryResult'][u'Results']['UserProfile']
##
## print "got the UserProfile info..."
## pprint(up)
## print "+" * 80
##
if up['DefaultWorkspace']:
self._defaultWorkspace = up['DefaultWorkspace']['_refObjectName']
##
## print " set _defaultWorkspace to: %s" % self._defaultWorkspace
##
self._currentWorkspace = self._defaultWorkspace
wkspace_ref = up['DefaultWorkspace']['_ref']
else:
self._currentWorkspace = self._defaultWorkspace.Name
wkspace_ref = self._defaultWorkspace._ref
self._defaultWorkspace = self._defaultWorkspace.Name
if up['DefaultProject']:
self._defaultProject = up['DefaultProject']['_refObjectName']
self._currentProject = self._defaultProject
proj_ref = up['DefaultProject']['_ref']
else:
self._defaultProject = ""
self._currentProject = ""
proj_ref = ""
projects = self.agent.get('Project', fetch="Name", workspace=self._defaultWorkspace)
if projects:
proj = projects.next()
proj_ref = proj._ref
self._defaultProject = proj.Name
self._currentProject = proj.Name
##
## print " Default Workspace : %s" % self._defaultWorkspace
## print " Default Project : %s" % self._defaultProject
##
if not self._workspaces:
self._workspaces = [self._defaultWorkspace]
if not self._projects:
self._projects = {self._defaultWorkspace : [self._defaultProject]}
if not self._workspace_ref:
if wkspace_ref.endswith('.js'):
wkspace_ref = wkspace_ref[:-3]
short_ref = "/".join(wkspace_ref.split('/')[-2:]) # we only need the 'workspace/<oid>' part to be a valid ref
self._workspace_ref = {self._defaultWorkspace : short_ref}
if not self._project_ref:
if proj_ref.endswith('.js'):
proj_ref = proj_ref[:-3]
short_ref = "/".join(proj_ref.split('/')[-2:]) # we only need the 'project/<oid>' part to be a valid ref
self._project_ref = {self._defaultWorkspace : {self._defaultProject : short_ref}}
self.defaultContext = RallyContext(self.server,
self.user,
self.password,
self.agent.serviceURL(),
workspace=self._defaultWorkspace,
project=self._defaultProject)
self.context = self.defaultContext
##
## print " completed _getDefaults processing..."
##
def currentContext(self):
return self.context
def setWorkspace(self, workspace_name):
if self.isAccessibleWorkspaceName(workspace_name):
if workspace_name not in self._workspaces:
self._getWorkspacesAndProjects(workspace=workspace_name)
self._currentWorkspace = workspace_name
self.context.workspace = workspace_name
else:
raise Exception("Attempt to set workspace to an invalid setting: %s" % workspace_name)
def getWorkspace(self):
"""
Return a 2 tuple of (name of the current workspace, ref for the current workspace)
"""
return (self._currentWorkspace, self.currentWorkspaceRef())
def isAccessibleWorkspaceName(self, workspace_name):
"""
"""
hits = [sub.Name for sub in self._subs_workspaces if workspace_name == sub.Name]
accessible = True if hits else False
return accessible
def getAccessibleWorkspaces(self):
"""
fill the instance cache items if not already done, then
return a list of (workspaceName, workspaceRef) tuples
"""
if self._inflated != 'wide':
self._inflated = 'wide' # to avoid recursion limits hell
self._getWorkspacesAndProjects(workspace='*')
workspaceInfo = []
for workspace in self._workspaces:
if workspace in self._workspace_ref:
workspaceInfo.append((workspace, self._workspace_ref[workspace]))
return workspaceInfo
def getCurrentWorkspace(self):
"""
Return the name of the current workspace
"""
return self._currentWorkspace
def currentWorkspaceRef(self):
"""
Return the ref associated with the current workspace if you can find one
"""
##
## print "default workspace: %s" % self._defaultWorkspace
## print "current workspace: %s" % self._currentWorkspace
##
if self._currentWorkspace:
return self._workspace_ref[self._currentWorkspace]
else:
return None
def setProject(self, project_name):
"""
Set the current context project with the given project_name
"""
projects = self.getAccessibleProjects(self._currentWorkspace)
hits = [name for name, ref in projects if project_name == name]
if hits and len(hits) == 1:
self._currentProject = project_name
self.context.project = project_name
else:
raise Exception("Attempt to set project to an invalid setting: %s" % project_name)
def getProject(self):
"""
Return a two tuple of (name of the current project, ref for the current project)
"""
return (self._currentProject, self.currentProjectRef())
def getAccessibleProjects(self, workspace='default'):
"""
Return a list of (projectName, projectRef) tuples
"""
##
## print "getAccessibleProjects(workspace=%s)" % workspace
##
projectInfo = []
if workspace == 'default' or not workspace:
workspace = self._defaultWorkspace
elif workspace == 'current':
workspace = self._currentWorkspace
if workspace not in self._workspaces: # can't return anything meaningful then...
if self._inflated == 'wide': # can't return anything meaningful then...
return projectInfo
##
## print " calling _getWorkspacesAndProjects(workspace='%s')..." % workspace
##
self._getWorkspacesAndProjects(workspace=workspace)
# check self._workspaces again...
if workspace not in self._workspaces:
return projectInfo
## else:
## print " self._workspaces augmented, now has your target workspace"
## sys.stdout.flush()
##
for projName, projRef in self._project_ref[workspace].items():
projectInfo.append((projName, projRef))
return projectInfo
def resetDefaultProject(self):
"""
Get the set of current valid projects by calling
getAccessibleProjects(self._currentWorkspace)
If _currentProject and _defaultProject are in set of currently valid projects,
then merely return (_currentProject, ref for _currentProject)
Otherwise set _defaultProject to the first project name (sorted alphabetically)
in the set of currently valid projects.
if the _currentProject isn't valid at this point, reset it to the _defaultProject value
Then return a 2 tuple of (_defaultProject, ref for the _defaultProject)
"""
current_valid_projects = self.getAccessibleProjects(self._currentWorkspace)
proj_names = sorted([name for name, ref in current_valid_projects])
proj_refs = self._project_ref[self._currentWorkspace]
if unicode(self._defaultProject) in proj_names and unicode(self._currentProject) in proj_names:
return (self._defaultProject, proj_refs[self._defaultProject])
if unicode(self._defaultProject) not in proj_names:
self._defaultProject = proj_names[0]
if unicode(self._currentProject) not in proj_names:
self.setProject(self._defaultProject)
return (self._defaultProject, proj_refs[self._defaultProject])
def currentProjectRef(self):
"""
Return the ref associated with the project in the currently selected workspace.
If there isn't a currently selected workspace, return an empty string.
"""
if not self._currentWorkspace:
return ""
if not self._currentProject:
return ""
##
## print " currentProjectRef() ... "
## print " _currentWorkspace: '%s'" % self._currentWorkspace
## print " _currentProject : '%s'" % self._currentProject
## print " _project_ref keys: %s" % repr(self._project_ref.keys())
##
#
# this next condition could be True in limited circumstances, like on initialization
# when info for the _currentProject hasn't yet been retrieved,
# which will be manifested by the _currentWorkspace not having an entry in _project_ref
#
if self._currentWorkspace not in self._project_ref:
return ""
proj_refs = self._project_ref[self._currentWorkspace]
if self._currentProject in proj_refs:
return proj_refs[self._currentProject]
else:
return ""
def _establishContext(self, kwargs):
workspace = None
project = None
if kwargs and 'workspace' in kwargs:
workspace = kwargs['workspace']
if kwargs and 'project' in kwargs:
project = kwargs['project']
##
## print "_establishContext calling _getWorkspacesAndProjects(workspace=%s, project=%s)" % (workspace, project)
##
self._getWorkspacesAndProjects(workspace=workspace, project=project)
if workspace:
self._inflated = 'minimal'
def identifyContext(self, **kwargs):
"""
Look for workspace, project, projectScopeUp, projectScopeDown entries in kwargs.
If present, check cache for values to provide for hrefs.
Return back a tuple of (RallyContext instance, augment list with hrefs)
"""
##
## print "... RallyContextHelper.identifyContext kwargs: %s" % repr(kwargs)
## sys.stdout.flush()
##
augments = []
if '_disableAugments' in kwargs:
return self.context, augments
if not self._inflated:
self._inflated = 'minimal' # to avoid recursion limits hell
self._establishContext(kwargs)
workspace = None
if 'workspace' in kwargs and kwargs['workspace']:
workspace = kwargs['workspace']
eligible_workspace_names = [wksp.Name for wksp in self._subs_workspaces]
if workspace not in eligible_workspace_names:
problem = 'Workspace specified: "%s" not accessible with current credentials'
raise RallyRESTAPIError(problem % workspace)
if workspace not in self._workspaces and self._inflated != 'wide':
ec_kwargs = {'workspace' : workspace}
self._establishContext(ec_kwargs)
self._inflated = 'narrow'
wks_ref = self._workspace_ref[workspace]
if wks_ref.endswith('.js'):
wks_ref = wks_ref[:-3]
augments.append("workspace=%s" % wks_ref)
self.context.workspace = workspace
project = None
if 'project' in kwargs:
if not kwargs['project']:
self.context.project = None
return self.context, augments
project = kwargs['project']
wks = workspace or self._currentWorkspace or self._defaultWorkspace
if project not in self._projects[wks]:
problem = 'Project specified: "%s" (in workspace: "%s") not accessible with current credentials' % \
(project, workspace)
raise RallyRESTAPIError(problem)
prj_ref = self._project_ref[wks][project]
if prj_ref.endswith('.js'):
prj_ref = prj_ref[:-3]
augments.append("project=%s" % prj_ref)
self.context.project = project
if 'projectScopeUp' in kwargs:
projectScopeUp = kwargs['projectScopeUp']
if projectScopeUp not in [0, False, 'false', 'False']:
augments.append("projectScopeUp=true")
else:
augments.append("projectScopeUp=false")
if 'projectScopeDown' in kwargs:
projectScopeDown = kwargs['projectScopeDown']
if projectScopeDown not in [1, True, 'true', 'True']:
augments.append("projectScopeDown=false")
elif projectScopeDown in [0, False, 'false', 'False']:
augments.append("projectScopeDown=true")
if not workspace and project:
self.context = self.defaultContext
return self.context, augments
def _getWorkspacesAndProjects(self, **kwargs):
"""
"""
target_workspace = self._currentWorkspace or self._defaultWorkspace
if kwargs:
if 'workspace' in kwargs and kwargs['workspace']:
target_workspace = kwargs['workspace']
if target_workspace == '*': # wild card value to specify all workspaces
target_workspace = None
##
## print "in _getWorkspacesAndProjects(%s)" % repr(kwargs)
## print "_getWorkspacesAndProjects, target_workspace: %s" % target_workspace
## print "_getWorkspacesAndProjects, self._currentWorkspace: %s" % self._currentWorkspace
## print "_getWorkspacesAndProjects, self._defaultWorkspace: %s" % self._defaultWorkspace
##
# fill out self._workspaces and self._workspace_ref
for workspace in self._subs_workspaces:
if target_workspace and workspace.Name != target_workspace:
# short-circuit issuing a WS call if we don't need to
continue
##
## print workspace.Name, workspace.oid
##
if workspace.Name not in self._workspaces:
self._workspaces.append(workspace.Name)
#self._workspace_ref[workspace.Name] = workspace._ref
# we only need the 'workspace/<oid>' fragment to qualify as a valid ref
wksp_ref = workspace._ref[:-3] if workspace._ref.endswith('.js') else workspace._ref
self._workspace_ref[workspace.Name] = '/'.join(wksp_ref.split('/')[-2:])
if workspace.Name not in self._projects:
self._projects[ workspace.Name] = []
self._project_ref[workspace.Name] = {}
# TODO: cache results of next WS call and bypass if we aleady have info for workspace.Name...
resp = self.agent._getResourceByOID( self.context, 'workspace', workspace.oid, _disableAugments=True)
response = json.loads(resp.content)
# If SLM gave back consistent responses, we could use RallyRESTResponse, but no joy...
# Carefully weasel into the response to get to the guts of what we need
projects = response['Workspace']['Projects']
for project in projects:
projName = project['_refObjectName']
#projRef = project['_ref']
# we only need the project/123534 section to qualify as a valid ref
projRef = '/'.join(project['_ref'][:-3].split('/')[-2:])
if projName not in self._projects[workspace.Name]:
self._projects[ workspace.Name].append(projName)
self._project_ref[workspace.Name][projName] = projRef
if target_workspace != self._defaultWorkspace:
if 'workspace' in kwargs and kwargs['workspace']:
self._inflated = 'narrow'
##
## print "setting _inflated to 'narrow'"
##
else:
self._inflated = 'wide'
##
## print "setting _inflated to 'wide'"
##
def __repr__(self):
items = []
items.append('%s = %s' % ('server', self.server))
items.append('%s = %s' % ('defaultContext', self.defaultContext))
items.append('%s = %s' % ('_subs_name', self._subs_name))
items.append('%s = %s' % ('_workspaces', repr(self._workspaces)))
items.append('%s = %s' % ('_projects', repr(self._projects)))
items.append('%s = %s' % ('_workspace_ref', repr(self._workspace_ref)))
items.append('%s = %s' % ('_project_ref', repr(self._project_ref)))
items.append('%s = %s' % ('_defaultWorkspace', self._defaultWorkspace))
items.append('%s = %s' % ('_defaultProject', self._defaultProject))
items.append('%s = %s' % ('_currentWorkspace', self._currentWorkspace))
items.append('%s = %s' % ('_currentProject', self._currentProject))
representation = "\n".join(items)
return representation
##################################################################################################
class Pinger(object):
"""
An instance of this class attempts a single ping against a given target.
Response to the ping command results in the ping method returning True,
otherwise a False is returned
"""
PING_COMMAND = {'Darwin' : ["ping", "-o", "-c", "1", "-t", "2"],
'Unix' : ["ping", "-c", "1", "-w", "2"],
'Linux' : ["ping", "-c", "1", "-w", "2"],
'Windows' : ["ping", "-n", "1", "-w", "2"]
}
BLACK_HOLE = {'Darwin' : "/dev/null",
'Unix' : "/dev/null",
'Linux' : "/dev/null",
'Windows' : "NUL"
}
@classmethod
def ping(self, target):
plat_ident = platform.system()
vector = Pinger.PING_COMMAND[plat_ident][:]
vector.append(target)
abyss = Pinger.BLACK_HOLE[plat_ident]
rc = subprocess.call(vector, stdout=open(abyss, "w"))
return rc == 0
##################################################################################################
| |
def workerid_to_nodename(workerid):
return workerid.split(".")[-1]
class WorkerInstance(object):
_morphing = {
"evin": {"default": "default_evio", "plain": "plain_evio", "simplified": "default_evio"},
"evout": {"default": "default_evio", "plain": "plain_evio", "simplified": "default_evio"},
"everr": {"default": "default_evio", "plain": "plain_evio", "simplified": "default_evio"},
"evexc": {"default": "default_evio", "plain": "plain_evio", "simplified": "default_evio"},
}
def __init__(self, type_, profiles, paramnames, paramtypelist, block, guiparams, tooltip):
self.type = type_
self.profiles = profiles
self.paramnames = paramnames
self.paramtypelist = paramtypelist
self.block = block
self.guiparams = guiparams
self.tooltip = tooltip
self.curr_profile = None
self.curr_blockvalues = []
def _get_morph(self, attribute):
if attribute not in self._morphing:
if self.curr_profile == "simplified":
return "default"
else:
return None
m = self._morphing[attribute]
if self.curr_profile not in m: return None
return m[self.curr_profile]
def profile(self):
return self.profiles[self.curr_profile]
def check_antenna(self, attribute):
if self.block is not None and self.block.io == "antenna":
if attribute in self.curr_blockvalues: return None # OK
prof = self.profiles[self.curr_profile]
attribs, mapping = prof
at = mapping.inmap[attribute]
for a in attribs:
if a.name == at:
if a.inhook is not None:
return None # OK
else:
break
ret = self._get_morph(attribute)
if ret is None:
raise KeyError(attribute)
return ret
def check_output(self, attribute_name):
if self.block is not None and self.block.io == "output":
if attribute_name in self.curr_blockvalues:
return None # OK
profile = self.profiles[self.curr_profile]
attribs, mapping = profile
attribute_identifier = mapping.outmap[attribute_name]
for attribute in attribs:
if attribute.name == attribute_identifier:
if attribute.outhook is not None:
return None # OK
else:
break
result = self._get_morph(attribute_name)
if result is None:
raise KeyError(attribute_name)
return result
def update_blockvalues(self, blockvalues):
self.curr_blockvalues = blockvalues
from ..HGui import Node
class WorkerInstanceManager(object):
def __init__(self, canvas):
self._canvas = canvas
self.observers_selection = canvas.observers_selection
self.observers_remove = canvas.observers_remove
self._workerinstances = {}
self._workerparams = {}
self._empties = set()
self.default_profile = "default"
def add_workerinstance(self, workerid, workerinstance, x, y):
assert workerid not in self._workerinstances, workerid
prof = self.default_profile
if prof not in workerinstance.profiles:
prof = "default"
attribs, mapping = workerinstance.profiles[prof]
nodename = workerid_to_nodename(workerid)
node = Node(nodename, (x, y), attribs, workerinstance.tooltip)
self._canvas.add_node(workerid, node)
workerinstance.curr_profile = prof
self._workerinstances[workerid] = workerinstance
def select(self, workerids):
self._canvas.select(workerids)
def _morph_worker(self, workerid, attributes, maps):
worker_instance = self._workerinstances[workerid]
if worker_instance.block is not None:
if worker_instance.block.io == "antenna":
cmap = maps[0]
elif worker_instance.block.io == "output":
cmap = maps[1]
else:
raise Exception(worker_instance.block.io)
if cmap is not None:
cmap.update(worker_instance.block.blockmap)
nodename = workerid_to_nodename(workerid)
original_node = self._canvas.get_node(workerid)
x, y = original_node.position
new_node = Node(nodename, (x, y), attributes)
self._canvas.morph_node(workerid, new_node, maps[0], maps[1])
def morph_worker(self, workerid, morph):
worker_instance = self._workerinstances[workerid]
from_attributes, from_worker_mapping = worker_instance.profile()
to_attributes, to_worker_mapping = worker_instance.profiles[morph]
maps = []
for at in "in", "out":
cmap = {}
#TODO fix the string mapr and map name convention
from_map = getattr(from_worker_mapping, "_" + at + "mapr")
to_map = getattr(to_worker_mapping, "_" + at + "map")
# TODO explain this
for v, k in from_map.items():
if k is None:
continue
if to_map[k] is None:
continue
vv = to_map[k]
cmap[v] = vv
maps.append(cmap)
self._morph_worker(workerid, to_attributes, maps)
worker_instance.curr_profile = morph
if workerid in self._workerparams:
self.set_parameters(workerid, self._workerparams[workerid])
def worker_update_blockvalues(self, workerid, blockvalues):
worker_instance = self._workerinstances[workerid]
assert worker_instance.block is not None
block_attributes = worker_instance.block.attributes
attributes = list(worker_instance.profile()[0])
for blockvalue in blockvalues:
if not blockvalue:
continue
attribute = block_attributes[blockvalue]
attributes.append(attribute)
worker_instance.update_blockvalues(blockvalues)
self._morph_worker(workerid, attributes, (None, None))
def get_blockvalues(self, workerid):
worker_instance = self._workerinstances[workerid]
if worker_instance.block is None:
return None
return worker_instance.curr_blockvalues
def add_connection(self, id_, start, end, interpoints=[]):
start_id, start_attr = start
wi_start = self._workerinstances[start_id]
morph = wi_start.check_output(start_attr)
if morph is not None:
self.morph_worker(start_id, morph)
end_id, end_attr = end
wi_end = self._workerinstances[end_id]
morph = wi_end.check_antenna(end_attr)
if morph is not None:
self.morph_worker(end_id, morph)
do_map_start = True
if wi_start.block is not None:
if wi_start.block.io == "output":
if start[1] in wi_start.block.tree:
do_map_start = False
if do_map_start:
map_start = wi_start.profile()[1].outmap
m_start = map_start[start[1]]
else:
m_start = start[1]
do_map_end = True
if wi_end.block is not None:
if wi_end.block.io == "antenna":
if end[1] in wi_end.block.tree:
do_map_end = False
if do_map_end:
map_end = wi_end.profile()[1].inmap
m_end = map_end[end[1]]
else:
m_end = end[1]
startm = (start[0], m_start)
endm = (end[0], m_end)
self._canvas.add_connection(id_, startm, endm, interpoints)
def get_node(self, workerid):
node = self._canvas.get_node(workerid)
try:
wi = self._workerinstances[workerid]
mapping = wi.profile()[1]
except KeyError: # empty
mapping = None
return node, mapping
def get_workerinstance(self, workerid):
return self._workerinstances[workerid]
def get_workerinstances(self):
return self._workerinstances.keys()
def get_connections(self):
return self._canvas.get_connections()
def get_connection_ids(self):
return self._canvas.get_connection_ids()
def has_connection(self, workerid, io, segio):
"""
Returns whether a worker has one or more connections to the specified segio
workerid: the ID of the worker
io: "antenna" or "output"
segio: the name of the antenna or output
"""
assert io in ("antenna", "output"), io
for con in self._canvas.get_connections():
if io == "antenna":
w, seg = con.end_node, con.end_attribute
else:
w, seg = con.start_node, con.start_attribute
if w == workerid and segio == seg: return True
return False
def add_empty(self, workerid, x, y):
nodename = workerid_to_nodename(workerid)
node = Node(nodename, (x, y), [], empty=True)
self._canvas.add_node(workerid, node)
self._empties.add(workerid)
def gui_removes_workerinstance(self, workerid):
if workerid in self._empties:
self._empties.remove(workerid)
else:
workerinstance = self._workerinstances.pop(workerid)
self._workerparams.pop(workerid, None)
def remove_workerinstance(self, workerid):
workerinstance = self._workerinstances.pop(workerid)
self._canvas.remove_node(workerid)
self._workerparams.pop(workerid, None)
def remove_empty(self, workerid):
self._canvas.remove_node(workerid)
self._empties.remove(workerid)
def rename_workerinstance(self, old_workerid, new_workerid):
new_nodename = workerid_to_nodename(new_workerid)
self._canvas.rename_node(old_workerid, new_workerid, new_nodename)
inst = self._workerinstances.pop(old_workerid)
self._workerinstances[new_workerid] = inst
def set_parameters(self, workerid, params):
self._workerparams[workerid] = params
wi = self._workerinstances[workerid]
pmapping = wi.profile()[1].pmap
mparams = {}
for p in params:
mp = pmapping[p]
if mp is None:
continue
if not mp in params:
continue
mparams[p] = params[mp]
for p, v in mparams.items():
self._canvas.set_attribute_value(workerid, p, v)
| |
import unittest
import numpy as np
import numpy.testing as np_test
from pgmpy.inference import VariableElimination
from pgmpy.inference import BeliefPropagation
from pgmpy.models import BayesianModel, MarkovModel
from pgmpy.models import JunctionTree
from pgmpy.factors.discrete import TabularCPD
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.extern.six.moves import range
class TestVariableElimination(unittest.TestCase):
def setUp(self):
self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
('J', 'L'), ('G', 'L')])
cpd_a = TabularCPD('A', 2, values=[[0.2], [0.8]])
cpd_r = TabularCPD('R', 2, values=[[0.4], [0.6]])
cpd_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1],
[0.1, 0.4, 0.3, 0.9]],
evidence=['A', 'R'], evidence_card=[2, 2])
cpd_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]],
evidence=['J'], evidence_card=[2])
cpd_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1],
[0.1, 0.55, 0.2, 0.9]],
evidence=['J', 'G'], evidence_card=[2, 2])
cpd_g = TabularCPD('G', 2, values=[[0.6], [0.4]])
self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r)
self.bayesian_inference = VariableElimination(self.bayesian_model)
# All the values that are used for comparision in the all the tests are
# found using SAMIAM (assuming that it is correct ;))
def test_query_single_variable(self):
query_result = self.bayesian_inference.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
def test_query_multiple_variable(self):
query_result = self.bayesian_inference.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
def test_query_single_variable_with_evidence(self):
query_result = self.bayesian_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
def test_query_multiple_variable_with_evidence(self):
query_result = self.bayesian_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_query_multiple_times(self):
# This just tests that the models are not getting modified while querying them
query_result = self.bayesian_inference.query(['J'])
query_result = self.bayesian_inference.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
query_result = self.bayesian_inference.query(['Q', 'J'])
query_result = self.bayesian_inference.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
query_result = self.bayesian_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
query_result = self.bayesian_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
query_result = self.bayesian_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
query_result = self.bayesian_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_max_marginal(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(), 0.1659, decimal=4)
def test_max_marginal_var(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G']), 0.5714, decimal=4)
def test_max_marginal_var1(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R']),
0.4055, decimal=4)
def test_max_marginal_var2(self):
np_test.assert_almost_equal(self.bayesian_inference.max_marginal(['G', 'R', 'A']),
0.3260, decimal=4)
def test_map_query(self):
map_query = self.bayesian_inference.map_query()
self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0,
'L': 0})
def test_map_query_with_evidence(self):
map_query = self.bayesian_inference.map_query(['A', 'R', 'L'],
{'J': 0, 'Q': 1, 'G': 0})
self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def test_induced_graph(self):
induced_graph = self.bayesian_inference.induced_graph(['G', 'Q', 'A', 'J', 'L', 'R'])
result_edges = sorted([sorted(x) for x in induced_graph.edges()])
self.assertEqual([['A', 'J'], ['A', 'R'], ['G', 'J'], ['G', 'L'],
['J', 'L'], ['J', 'Q'], ['J', 'R'], ['L', 'R']],
result_edges)
def test_induced_width(self):
result_width = self.bayesian_inference.induced_width(['G', 'Q', 'A', 'J', 'L', 'R'])
self.assertEqual(2, result_width)
def tearDown(self):
del self.bayesian_inference
del self.bayesian_model
class TestVariableEliminationMarkov(unittest.TestCase):
def setUp(self):
# It is just a moralised version of the above Bayesian network so all the results are same. Only factors
# are under consideration for inference so this should be fine.
self.markov_model = MarkovModel([('A', 'J'), ('R', 'J'), ('J', 'Q'), ('J', 'L'),
('G', 'L'), ('A', 'R'), ('J', 'G')])
factor_a = TabularCPD('A', 2, values=[[0.2], [0.8]]).to_factor()
factor_r = TabularCPD('R', 2, values=[[0.4], [0.6]]).to_factor()
factor_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1],
[0.1, 0.4, 0.3, 0.9]],
evidence=['A', 'R'], evidence_card=[2, 2]).to_factor()
factor_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]],
evidence=['J'], evidence_card=[2]).to_factor()
factor_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1],
[0.1, 0.55, 0.2, 0.9]],
evidence=['J', 'G'], evidence_card=[2, 2]).to_factor()
factor_g = TabularCPD('G', 2, [[0.6], [0.4]]).to_factor()
self.markov_model.add_factors(factor_a, factor_r, factor_j, factor_q, factor_l, factor_g)
self.markov_inference = VariableElimination(self.markov_model)
# All the values that are used for comparision in the all the tests are
# found using SAMIAM (assuming that it is correct ;))
def test_query_single_variable(self):
query_result = self.markov_inference.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
def test_query_multiple_variable(self):
query_result = self.markov_inference.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
def test_query_single_variable_with_evidence(self):
query_result = self.markov_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
def test_query_multiple_variable_with_evidence(self):
query_result = self.markov_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_query_multiple_times(self):
# This just tests that the models are not getting modified while querying them
query_result = self.markov_inference.query(['J'])
query_result = self.markov_inference.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
query_result = self.markov_inference.query(['Q', 'J'])
query_result = self.markov_inference.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
query_result = self.markov_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
query_result = self.markov_inference.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
query_result = self.markov_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
query_result = self.markov_inference.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_max_marginal(self):
np_test.assert_almost_equal(self.markov_inference.max_marginal(), 0.1659, decimal=4)
def test_max_marginal_var(self):
np_test.assert_almost_equal(self.markov_inference.max_marginal(['G']), 0.5714, decimal=4)
def test_max_marginal_var1(self):
np_test.assert_almost_equal(self.markov_inference.max_marginal(['G', 'R']),
0.4055, decimal=4)
def test_max_marginal_var2(self):
np_test.assert_almost_equal(self.markov_inference.max_marginal(['G', 'R', 'A']),
0.3260, decimal=4)
def test_map_query(self):
map_query = self.markov_inference.map_query()
self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0, 'L': 0})
def test_map_query_with_evidence(self):
map_query = self.markov_inference.map_query(['A', 'R', 'L'],
{'J': 0, 'Q': 1, 'G': 0})
self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def test_induced_graph(self):
induced_graph = self.markov_inference.induced_graph(['G', 'Q', 'A', 'J', 'L', 'R'])
result_edges = sorted([sorted(x) for x in induced_graph.edges()])
self.assertEqual([['A', 'J'], ['A', 'R'], ['G', 'J'], ['G', 'L'],
['J', 'L'], ['J', 'Q'], ['J', 'R'], ['L', 'R']],
result_edges)
def test_induced_width(self):
result_width = self.markov_inference.induced_width(['G', 'Q', 'A', 'J', 'L', 'R'])
self.assertEqual(2, result_width)
def tearDown(self):
del self.markov_inference
del self.markov_model
class TestBeliefPropagation(unittest.TestCase):
def setUp(self):
self.junction_tree = JunctionTree([(('A', 'B'), ('B', 'C')),
(('B', 'C'), ('C', 'D'))])
phi1 = DiscreteFactor(['A', 'B'], [2, 3], range(6))
phi2 = DiscreteFactor(['B', 'C'], [3, 2], range(6))
phi3 = DiscreteFactor(['C', 'D'], [2, 2], range(4))
self.junction_tree.add_factors(phi1, phi2, phi3)
self.bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
('J', 'L'), ('G', 'L')])
cpd_a = TabularCPD('A', 2, values=[[0.2], [0.8]])
cpd_r = TabularCPD('R', 2, values=[[0.4], [0.6]])
cpd_j = TabularCPD('J', 2, values=[[0.9, 0.6, 0.7, 0.1],
[0.1, 0.4, 0.3, 0.9]],
evidence=['A', 'R'], evidence_card=[2, 2])
cpd_q = TabularCPD('Q', 2, values=[[0.9, 0.2], [0.1, 0.8]],
evidence=['J'], evidence_card=[2])
cpd_l = TabularCPD('L', 2, values=[[0.9, 0.45, 0.8, 0.1],
[0.1, 0.55, 0.2, 0.9]],
evidence=['J', 'G'], evidence_card=[2, 2])
cpd_g = TabularCPD('G', 2, values=[[0.6], [0.4]])
self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r)
def test_calibrate_clique_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.calibrate()
clique_belief = belief_propagation.get_clique_beliefs()
phi1 = DiscreteFactor(['A', 'B'], [2, 3], range(6))
phi2 = DiscreteFactor(['B', 'C'], [3, 2], range(6))
phi3 = DiscreteFactor(['C', 'D'], [2, 2], range(4))
b_A_B = phi1 * (phi3.marginalize(['D'], inplace=False) * phi2).marginalize(['C'], inplace=False)
b_B_C = phi2 * (phi1.marginalize(['A'], inplace=False) * phi3.marginalize(['D'], inplace=False))
b_C_D = phi3 * (phi1.marginalize(['A'], inplace=False) * phi2).marginalize(['B'], inplace=False)
np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values)
np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values)
np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values)
def test_calibrate_sepset_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.calibrate()
sepset_belief = belief_propagation.get_sepset_beliefs()
phi1 = DiscreteFactor(['A', 'B'], [2, 3], range(6))
phi2 = DiscreteFactor(['B', 'C'], [3, 2], range(6))
phi3 = DiscreteFactor(['C', 'D'], [2, 2], range(4))
b_B = (phi1 * (phi3.marginalize(['D'], inplace=False) *
phi2).marginalize(['C'], inplace=False)).marginalize(['A'], inplace=False)
b_C = (phi2 * (phi1.marginalize(['A'], inplace=False) *
phi3.marginalize(['D'], inplace=False))).marginalize(['B'], inplace=False)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values)
def test_max_calibrate_clique_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.max_calibrate()
clique_belief = belief_propagation.get_clique_beliefs()
phi1 = DiscreteFactor(['A', 'B'], [2, 3], range(6))
phi2 = DiscreteFactor(['B', 'C'], [3, 2], range(6))
phi3 = DiscreteFactor(['C', 'D'], [2, 2], range(4))
b_A_B = phi1 * (phi3.maximize(['D'], inplace=False) * phi2).maximize(['C'], inplace=False)
b_B_C = phi2 * (phi1.maximize(['A'], inplace=False) * phi3.maximize(['D'], inplace=False))
b_C_D = phi3 * (phi1.maximize(['A'], inplace=False) * phi2).maximize(['B'], inplace=False)
np_test.assert_array_almost_equal(clique_belief[('A', 'B')].values, b_A_B.values)
np_test.assert_array_almost_equal(clique_belief[('B', 'C')].values, b_B_C.values)
np_test.assert_array_almost_equal(clique_belief[('C', 'D')].values, b_C_D.values)
def test_max_calibrate_sepset_belief(self):
belief_propagation = BeliefPropagation(self.junction_tree)
belief_propagation.max_calibrate()
sepset_belief = belief_propagation.get_sepset_beliefs()
phi1 = DiscreteFactor(['A', 'B'], [2, 3], range(6))
phi2 = DiscreteFactor(['B', 'C'], [3, 2], range(6))
phi3 = DiscreteFactor(['C', 'D'], [2, 2], range(4))
b_B = (phi1 * (phi3.maximize(['D'], inplace=False) *
phi2).maximize(['C'], inplace=False)).maximize(['A'], inplace=False)
b_C = (phi2 * (phi1.maximize(['A'], inplace=False) *
phi3.maximize(['D'], inplace=False))).maximize(['B'], inplace=False)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('A', 'B'), ('B', 'C')))].values, b_B.values)
np_test.assert_array_almost_equal(sepset_belief[frozenset((('B', 'C'), ('C', 'D')))].values, b_C.values)
# All the values that are used for comparision in the all the tests are
# found using SAMIAM (assuming that it is correct ;))
def test_query_single_variable(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(['J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
def test_query_multiple_variable(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(['Q', 'J'])
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.416, 0.584]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.4912, 0.5088]))
def test_query_single_variable_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(variables=['J'],
evidence={'A': 0, 'R': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.60, 0.40]))
def test_query_multiple_variable_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
query_result = belief_propagation.query(variables=['J', 'Q'],
evidence={'A': 0, 'R': 0,
'G': 0, 'L': 1})
np_test.assert_array_almost_equal(query_result['J'].values,
np.array([0.818182, 0.181818]))
np_test.assert_array_almost_equal(query_result['Q'].values,
np.array([0.772727, 0.227273]))
def test_map_query(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
map_query = belief_propagation.map_query()
self.assertDictEqual(map_query, {'A': 1, 'R': 1, 'J': 1, 'Q': 1, 'G': 0,
'L': 0})
def test_map_query_with_evidence(self):
belief_propagation = BeliefPropagation(self.bayesian_model)
map_query = belief_propagation.map_query(['A', 'R', 'L'],
{'J': 0, 'Q': 1, 'G': 0})
self.assertDictEqual(map_query, {'A': 1, 'R': 0, 'L': 0})
def tearDown(self):
del self.junction_tree
del self.bayesian_model
| |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Multi-engine Test Daemon in Python
Original concept by Jeff Winkler in:
http://jeffwinkler.net/nosy-run-python-unit-tests-automatically/
The present code is published under the terms of the MIT License. See LICENSE
file for more details.
"""
import sys
import os
import optparse
from time import sleep
import hashlib
import subprocess
import datetime
import re
import subprocess
SPECIAL_CHARS_REGEX_PATTERN = r'[#&;`|*?~<>^()\[\]{}$\\]+'
IGNORE_EXTENSIONS = ('pyc', 'pyo')
IGNORE_DIRS = ('.bzr', '.git', '.hg', '.darcs', '.svn', '.tox')
IMPLEMENTED_TEST_PROGRAMS = ('nose', 'nosetests', 'django', 'py', 'symfony',
'jelix', 'phpunit', 'sphinx', 'tox'
)
# -------- Exceptions
class InvalidTestProgram(Exception):
"""Raised as soon as an unexpected test program is chosen"""
pass
class InvalidFilePath(Exception):
"""Raised if the path to project/module is unknown/missing."""
pass
class CancelDueToUserRequest(Exception):
"""Raised when user wants to cancel execution"""
pass
# -------- Utils
def ask(message='Are you sure? [y/N]'):
"""Asks the user his opinion."""
agree = False
answer = raw_input(message).lower()
if answer.startswith('y'):
agree = True
return agree
def escapearg(args):
"""Escapes characters you don't want in arguments (preventing shell
injection)"""
return re.sub(SPECIAL_CHARS_REGEX_PATTERN, '', args)
class Watcher(object):
"""
Watcher class. This is the daemon that is watching every file in the
directory and subdirectories, and that runs the test process.
"""
file_list = {}
debug = False
def __init__(self, file_path, test_program, debug=False, custom_args='',
ignore_dirs=None, quiet=False):
# Safe filter
custom_args = escapearg(custom_args)
self.file_path = file_path
self.ignore_dirs = list(IGNORE_DIRS)
if ignore_dirs:
self.ignore_dirs.extend([d for d in ignore_dirs.split(',')])
self.file_list = self.walk(file_path)
self.test_program = test_program
self.custom_args = custom_args
self.quiet = quiet
# check configuration
self.check_configuration(file_path, test_program, custom_args)
self.check_dependencies()
self.debug = debug
self.cmd = self.get_cmd()
def check_configuration(self, file_path, test_program, custom_args):
"""Checks if configuration is ok."""
# checking filepath
if not os.path.isdir(file_path):
raise InvalidFilePath("INVALID CONFIGURATION: file path %s is not a directory" %
os.path.abspath(file_path)
)
if not test_program in IMPLEMENTED_TEST_PROGRAMS:
raise InvalidTestProgram('The `%s` is unknown, or not yet implemented. Please chose another one.' % test_program)
if custom_args:
if not self.quiet and not ask("WARNING!!!\nYou are about to run the following command\n\n $ %s\n\nAre you sure you still want to proceed [y/N]? " % self.get_cmd()):
raise CancelDueToUserRequest('Test cancelled...')
def check_dependencies(self):
"Checks if the test program is available in the python environnement"
if self.test_program == 'nose':
try:
import nose
except ImportError:
sys.exit('Nosetests is not available on your system. Please install it and try to run it again')
if self.test_program == 'py':
try:
import py
except:
sys.exit('py.test is not available on your system. Please install it and try to run it again')
if self.test_program == 'django':
try:
import django
except:
sys.exit('django is not available on your system. Please install it and try to run it again')
if self.test_program == 'phpunit':
try:
process = subprocess.check_call(['phpunit','--version'])
except:
sys.exit('phpunit is not available on your system. Please install it and try to run it again')
if self.test_program == 'tox':
try:
import tox
except ImportError:
sys.exit('tox is not available on your system. Please install it and try to run it again')
def get_cmd(self):
"""Returns the full command to be executed at runtime"""
cmd = None
if self.test_program in ('nose', 'nosetests'):
cmd = "nosetests %s" % self.file_path
elif self.test_program == 'django':
executable = "%s/manage.py" % self.file_path
if os.path.exists(executable):
cmd = "python %s/manage.py test" % self.file_path
else:
cmd = "django-admin.py test"
elif self.test_program == 'py':
cmd = 'py.test %s' % self.file_path
elif self.test_program == 'symfony':
cmd = 'symfony test-all'
elif self.test_program == 'jelix':
# as seen on http://jelix.org/articles/fr/manuel-1.1/tests_unitaires
cmd = 'php tests.php'
elif self.test_program == 'phpunit':
cmd = 'phpunit'
elif self.test_program == 'sphinx':
cmd = 'make html'
elif self.test_program == 'tox':
cmd = 'tox'
if not cmd:
raise InvalidTestProgram("The test program %s is unknown. Valid options are: `nose`, `django` and `py`" % self.test_program)
# adding custom args
if self.custom_args:
cmd = '%s %s' % (cmd, self.custom_args)
return cmd
# Path manipulation
def include(self, path):
"""Returns `True` if the file is not ignored"""
for extension in IGNORE_EXTENSIONS:
if path.endswith(extension):
return False
parts = path.split(os.path.sep)
for part in parts:
if part in self.ignore_dirs:
return False
return True
def walk(self, top, file_list={}):
"""Walks the walk. nah, seriously: reads the file and stores a hashkey
corresponding to its content."""
for root, dirs, files in os.walk(top, topdown=False):
if os.path.basename(root) in self.ignore_dirs:
# Do not dig in ignored dirs
continue
for name in files:
full_path = os.path.join(root, name)
if self.include(full_path):
if os.path.isfile(full_path):
# preventing fail if the file vanishes
content = open(full_path).read()
hashcode = hashlib.sha224(content).hexdigest()
file_list[full_path] = hashcode
for name in dirs:
if name not in self.ignore_dirs:
self.walk(os.path.join(root, name), file_list)
return file_list
def file_sizes(self):
"""Returns total filesize (in MB)"""
size = sum(map(os.path.getsize, self.file_list))
return size / 1024 / 1024
def diff_list(self, list1, list2):
"""Extracts differences between lists. For debug purposes"""
for key in list1:
if key in list2 and list2[key] != list1[key]:
print key
elif key not in list2:
print key
def run(self, cmd):
"""Runs the appropriate command"""
print datetime.datetime.now()
output = subprocess.Popen(cmd, shell=True)
output = output.communicate()[0]
print output
def run_tests(self):
"""Execute tests"""
self.run(self.cmd)
def loop(self):
"""Main loop daemon."""
while True:
sleep(1)
new_file_list = self.walk(self.file_path, {})
if new_file_list != self.file_list:
if self.debug:
self.diff_list(new_file_list, self.file_list)
self.run_tests()
self.file_list = new_file_list
def main(prog_args=None):
"""
What do you expect?
"""
if prog_args is None:
prog_args = sys.argv
parser = optparse.OptionParser()
parser.usage = """Usage: %[prog] [options] [<path>]"""
parser.add_option("-t", "--test-program", dest="test_program",
default="nose", help="specifies the test-program to use. Valid values"
" include `nose` (or `nosetests`), `django`, `py` (for `py.test`), "
'`symfony`, `jelix` `phpunit` and `tox`')
parser.add_option("-d", "--debug", dest="debug", action="store_true",
default=False)
parser.add_option('-s', '--size-max', dest='size_max', default=25,
type="int", help="Sets the maximum size (in MB) of files.")
parser.add_option('--custom-args', dest='custom_args', default='',
type="str",
help="Defines custom arguments to pass after the test program command")
parser.add_option('--ignore-dirs', dest='ignore_dirs', default='',
type="str",
help="Defines directories to ignore. Use a comma-separated list.")
parser.add_option('-y', '--quiet', dest='quiet', action="store_true",
default=False,
help="Don't ask for any input.")
opt, args = parser.parse_args(prog_args)
if args[1:]:
path = args[1]
else:
path = '.'
try:
watcher = Watcher(path, opt.test_program, opt.debug, opt.custom_args,
opt.ignore_dirs, opt.quiet)
watcher_file_size = watcher.file_sizes()
if watcher_file_size > opt.size_max:
message = "It looks like the total file size (%dMb) is larger than the `max size` option (%dMb).\nThis may slow down the file comparison process, and thus the daemon performances.\nDo you wish to continue? [y/N] " % (watcher_file_size, opt.size_max)
if not opt.quiet and not ask(message):
raise CancelDueToUserRequest('Ok, thx, bye...')
print "Ready to watch file changes..."
watcher.loop()
except (KeyboardInterrupt, SystemExit):
# Ignore when you exit via Crtl-C
pass
except Exception, msg:
print msg
print "Bye"
if __name__ == '__main__':
main()
| |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating instances locally.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
import random
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.objects import instance_group as instance_group_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.pci import pci_request
from nova import rpc
from nova.scheduler import driver
from nova.scheduler import scheduler_options
from nova.scheduler import utils as scheduler_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
filter_scheduler_opts = [
cfg.IntOpt('scheduler_host_subset_size',
default=1,
help='New instances will be scheduled on a host chosen '
'randomly from a subset of the N best hosts. This '
'property defines the subset size that a host is '
'chosen from. A value of 1 chooses the '
'first host returned by the weighing functions. '
'This value must be at least 1. Any value less than 1 '
'will be ignored, and 1 will be used instead')
]
CONF.register_opts(filter_scheduler_opts)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.options = scheduler_options.SchedulerOptions()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.notifier = rpc.get_notifier('scheduler')
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec):
"""Provisions instances that needs to be scheduled
Applies filters and weighters on request properties to get a list of
compute hosts and calls them to spawn instance(s).
"""
payload = dict(request_spec=request_spec)
self.notifier.info(context, 'scheduler.run_instance.start', payload)
instance_uuids = request_spec.get('instance_uuids')
LOG.info(_("Attempting to build %(num_instances)d instance(s) "
"uuids: %(instance_uuids)s"),
{'num_instances': len(instance_uuids),
'instance_uuids': instance_uuids})
LOG.debug("Request Spec: %s" % request_spec)
weighed_hosts = self._schedule(context, request_spec,
filter_properties, instance_uuids)
# NOTE: Pop instance_uuids as individual creates do not need the
# set of uuids. Do not pop before here as the upper exception
# handler fo NoValidHost needs the uuid to set error state
instance_uuids = request_spec.pop('instance_uuids')
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
filter_properties.pop('context', None)
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
try:
try:
weighed_host = weighed_hosts.pop(0)
LOG.info(_("Choosing host %(weighed_host)s "
"for instance %(instance_uuid)s"),
{'weighed_host': weighed_host,
'instance_uuid': instance_uuid})
except IndexError:
raise exception.NoValidHost(reason="")
self._provision_resource(context, weighed_host,
request_spec,
filter_properties,
requested_networks,
injected_files, admin_password,
is_first_time,
instance_uuid=instance_uuid,
legacy_bdm_in_spec=legacy_bdm_in_spec)
except Exception as ex:
# NOTE(vish): we don't reraise the exception here to make sure
# that all instances in the request get set to
# error properly
driver.handle_schedule_error(context, ex, instance_uuid,
request_spec)
# scrub retry host list in case we're scheduling multiple
# instances:
retry = filter_properties.get('retry', {})
retry['hosts'] = []
self.notifier.info(context, 'scheduler.run_instance.end', payload)
def select_destinations(self, context, request_spec, filter_properties):
"""Selects a filtered set of hosts and nodes."""
num_instances = request_spec['num_instances']
instance_uuids = request_spec.get('instance_uuids')
selected_hosts = self._schedule(context, request_spec,
filter_properties, instance_uuids)
# Couldn't fulfill the request_spec
if len(selected_hosts) < num_instances:
raise exception.NoValidHost(reason='')
dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
limits=host.obj.limits) for host in selected_hosts]
return dests
def _provision_resource(self, context, weighed_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance_uuid=None,
legacy_bdm_in_spec=True):
"""Create the requested resource in this Zone."""
# NOTE(vish): add our current instance back into the request spec
request_spec['instance_uuids'] = [instance_uuid]
payload = dict(request_spec=request_spec,
weighted_host=weighed_host.to_dict(),
instance_id=instance_uuid)
self.notifier.info(context,
'scheduler.run_instance.scheduled', payload)
# Update the metadata if necessary
scheduler_hints = filter_properties.get('scheduler_hints') or {}
try:
updated_instance = driver.instance_update_db(context,
instance_uuid)
except exception.InstanceNotFound:
LOG.warning(_("Instance disappeared during scheduling"),
context=context, instance_uuid=instance_uuid)
else:
scheduler_utils.populate_filter_properties(filter_properties,
weighed_host.obj)
self.compute_rpcapi.run_instance(context,
instance=updated_instance,
host=weighed_host.obj.host,
request_spec=request_spec,
filter_properties=filter_properties,
requested_networks=requested_networks,
injected_files=injected_files,
admin_password=admin_password, is_first_time=is_first_time,
node=weighed_host.obj.nodename,
legacy_bdm_in_spec=legacy_bdm_in_spec)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
# Save useful information from the request spec for filter processing:
project_id = request_spec['instance_properties']['project_id']
os_type = request_spec['instance_properties']['os_type']
filter_properties['project_id'] = project_id
filter_properties['os_type'] = os_type
pci_requests = pci_request.get_pci_requests_from_flavor(
request_spec.get('instance_type') or {})
if pci_requests:
filter_properties['pci_requests'] = pci_requests
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.NovaException(_("Invalid value for "
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
def _log_compute_error(self, instance_uuid, retry):
"""If the request contained an exception from a previous compute
build/resize operation, log it to aid debugging
"""
exc = retry.pop('exc', None) # string-ified exception from compute
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host, last_node = hosts[-1]
LOG.error(_('Error from last host: %(last_host)s (node %(last_node)s):'
' %(exc)s'),
{'last_host': last_host,
'last_node': last_node,
'exc': exc},
instance_uuid=instance_uuid)
def _populate_retry(self, filter_properties, instance_properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self._max_attempts()
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if max_attempts == 1 or force_hosts or force_nodes:
# re-scheduling is disabled.
return
retry = filter_properties.pop('retry', {})
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of compute hosts tried
}
filter_properties['retry'] = retry
instance_uuid = instance_properties.get('uuid')
self._log_compute_error(instance_uuid, retry)
if retry['num_attempts'] > max_attempts:
msg = (_('Exceeded max scheduling attempts %(max_attempts)d for '
'instance %(instance_uuid)s')
% {'max_attempts': max_attempts,
'instance_uuid': instance_uuid})
raise exception.NoValidHost(reason=msg)
@staticmethod
def _setup_instance_group(context, filter_properties):
update_group_hosts = False
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group', None)
if group_hint:
group = instance_group_obj.InstanceGroup.get_by_hint(context,
group_hint)
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
update_group_hosts = True
filter_properties.setdefault('group_hosts', set())
user_hosts = set(filter_properties['group_hosts'])
group_hosts = set(group.get_hosts(context))
filter_properties['group_hosts'] = user_hosts | group_hosts
filter_properties['group_policies'] = group.policies
return update_group_hosts
def _schedule(self, context, request_spec, filter_properties,
instance_uuids=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
update_group_hosts = self._setup_instance_group(context,
filter_properties)
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
properties = instance_properties.copy()
if instance_uuids:
properties['uuid'] = instance_uuids[0]
self._populate_retry(filter_properties, properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'instance_type': instance_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(elevated)
selected_hosts = []
if instance_uuids:
num_instances = len(instance_uuids)
else:
num_instances = request_spec.get('num_instances', 1)
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties, index=num)
if not hosts:
# Can't get any more locally.
break
LOG.debug("Filtered %(hosts)s", {'hosts': hosts})
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
LOG.debug("Weighed %(hosts)s", {'hosts': weighed_hosts})
scheduler_host_subset_size = CONF.scheduler_host_subset_size
if scheduler_host_subset_size > len(weighed_hosts):
scheduler_host_subset_size = len(weighed_hosts)
if scheduler_host_subset_size < 1:
scheduler_host_subset_size = 1
chosen_host = random.choice(
weighed_hosts[0:scheduler_host_subset_size])
selected_hosts.append(chosen_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
chosen_host.obj.consume_from_instance(instance_properties)
if update_group_hosts is True:
filter_properties['group_hosts'].add(chosen_host.obj.host)
return selected_hosts
def _get_all_host_states(self, context):
"""Template method, so a subclass can implement caching."""
return self.host_manager.get_all_host_states(context)
| |
#!/usr/bin/env python
# Copyright 2012 NEC Corporation.
# Based on ryu/openvswitch agents.
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
import socket
import time
import eventlet
eventlet.monkey_patch()
from neutron.agent.linux import ovs_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as logging_config
from neutron.common import constants as q_const
from neutron.common import rpc_compat
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.rpc import dispatcher
from neutron.plugins.nec.common import config
LOG = logging.getLogger(__name__)
class NECPluginApi(agent_rpc.PluginApi):
BASE_RPC_API_VERSION = '1.0'
def update_ports(self, context, agent_id, datapath_id,
port_added, port_removed):
"""RPC to update information of ports on Neutron Server."""
LOG.info(_("Update ports: added=%(added)s, "
"removed=%(removed)s"),
{'added': port_added, 'removed': port_removed})
self.call(context,
self.make_msg('update_ports',
topic=topics.AGENT,
agent_id=agent_id,
datapath_id=datapath_id,
port_added=port_added,
port_removed=port_removed))
class NECAgentRpcCallback(object):
RPC_API_VERSION = '1.0'
def __init__(self, context, agent, sg_agent):
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug(_("port_update received: %s"), kwargs)
port = kwargs.get('port')
# Validate that port is on OVS
vif_port = self.agent.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
class SecurityGroupServerRpcApi(rpc_compat.RpcProxy,
sg_rpc.SecurityGroupServerRpcApiMixin):
def __init__(self, topic):
super(SecurityGroupServerRpcApi, self).__init__(
topic=topic, default_version=sg_rpc.SG_RPC_VERSION)
class SecurityGroupAgentRpcCallback(
sg_rpc.SecurityGroupAgentRpcCallbackMixin):
RPC_API_VERSION = sg_rpc.SG_RPC_VERSION
def __init__(self, context, sg_agent):
self.context = context
self.sg_agent = sg_agent
class SecurityGroupAgentRpc(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context):
self.context = context
self.plugin_rpc = SecurityGroupServerRpcApi(topics.PLUGIN)
self.init_firewall()
class NECNeutronAgent(object):
def __init__(self, integ_br, root_helper, polling_interval):
'''Constructor.
:param integ_br: name of the integration bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to check the bridge.
'''
self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)
self.polling_interval = polling_interval
self.cur_ports = []
self.need_sync = True
self.datapath_id = "0x%s" % self.int_br.get_datapath_id()
self.agent_state = {
'binary': 'neutron-nec-agent',
'host': config.CONF.host,
'topic': q_const.L2_AGENT_TOPIC,
'configurations': {},
'agent_type': q_const.AGENT_TYPE_NEC,
'start_flag': True}
self.setup_rpc()
def setup_rpc(self):
self.host = socket.gethostname()
self.agent_id = 'nec-q-agent.%s' % self.host
LOG.info(_("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.context = q_context.get_admin_context_without_session()
self.plugin_rpc = NECPluginApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.sg_agent = SecurityGroupAgentRpc(self.context)
# RPC network init
# Handle updates from service
self.callback_nec = NECAgentRpcCallback(self.context,
self, self.sg_agent)
self.callback_sg = SecurityGroupAgentRpcCallback(self.context,
self.sg_agent)
self.dispatcher = dispatcher.RpcDispatcher([self.callback_nec,
self.callback_sg])
# Define the listening consumer for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.dispatcher,
self.topic,
consumers)
report_interval = config.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
# How many devices are likely used by a VM
num_devices = len(self.cur_ports)
self.agent_state['configurations']['devices'] = num_devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def _vif_port_to_port_info(self, vif_port):
return dict(id=vif_port.vif_id, port_no=vif_port.ofport,
mac=vif_port.vif_mac)
def _process_security_group(self, port_added, port_removed):
if port_added:
devices_added = [p['id'] for p in port_added]
self.sg_agent.prepare_devices_filter(devices_added)
if port_removed:
self.sg_agent.remove_devices_filter(port_removed)
def loop_handler(self):
try:
# self.cur_ports will be kept until loop_handler succeeds.
cur_ports = [] if self.need_sync else self.cur_ports
new_ports = []
port_added = []
for vif_port in self.int_br.get_vif_ports():
port_id = vif_port.vif_id
new_ports.append(port_id)
if port_id not in cur_ports:
port_info = self._vif_port_to_port_info(vif_port)
port_added.append(port_info)
port_removed = []
for port_id in cur_ports:
if port_id not in new_ports:
port_removed.append(port_id)
if port_added or port_removed:
self.plugin_rpc.update_ports(self.context,
self.agent_id, self.datapath_id,
port_added, port_removed)
self._process_security_group(port_added, port_removed)
else:
LOG.debug(_("No port changed."))
self.cur_ports = new_ports
self.need_sync = False
except Exception:
LOG.exception(_("Error in agent event loop"))
self.need_sync = True
def daemon_loop(self):
"""Main processing loop for NEC Plugin Agent."""
while True:
self.loop_handler()
time.sleep(self.polling_interval)
def main():
config.CONF(project='neutron')
logging_config.setup_logging(config.CONF)
# Determine which agent type to use.
integ_br = config.OVS.integration_bridge
root_helper = config.AGENT.root_helper
polling_interval = config.AGENT.polling_interval
agent = NECNeutronAgent(integ_br, root_helper, polling_interval)
# Start everything.
agent.daemon_loop()
if __name__ == "__main__":
main()
| |
import json
import mock
from nose.tools import eq_, ok_, raises
from rest_framework.test import APIClient
from rest_framework.exceptions import APIException
from kitsune.sumo.tests import TestCase
from kitsune.questions import api
from kitsune.questions.models import Question, Answer
from kitsune.questions.tests import question, answer, questionvote, answervote
from kitsune.users.tests import profile, user
from kitsune.products.tests import product, topic
from kitsune.sumo.urlresolvers import reverse
class TestQuestionSerializerDeserialization(TestCase):
def setUp(self):
self.profile = profile()
self.user = self.profile.user
self.product = product(save=True)
self.topic = topic(product=self.product, save=True)
self.request = mock.Mock()
self.request.user = self.user
self.context = {
'request': self.request,
}
self.data = {
'creator': self.user,
'title': 'How do I test programs?',
'content': "Help, I don't know what to do.",
'product': self.product.slug,
'topic': self.topic.slug,
}
def test_it_works(self):
serializer = api.QuestionSerializer(
context=self.context, data=self.data)
eq_(serializer.errors, {})
ok_(serializer.is_valid())
def test_automatic_creator(self):
del self.data['creator']
serializer = api.QuestionSerializer(
context=self.context, data=self.data)
eq_(serializer.errors, {})
ok_(serializer.is_valid())
eq_(serializer.object.creator, self.user)
def test_product_required(self):
del self.data['product']
serializer = api.QuestionSerializer(
context=self.context, data=self.data)
eq_(serializer.errors, {
'product': [u'This field is required.'],
'topic': [u'A product must be specified to select a topic.'],
})
ok_(not serializer.is_valid())
def test_topic_required(self):
del self.data['topic']
serializer = api.QuestionSerializer(
context=self.context, data=self.data)
eq_(serializer.errors, {
'topic': [u'This field is required.'],
})
ok_(not serializer.is_valid())
def test_topic_disambiguation(self):
# First make another product, and a colliding topic.
# It has the same slug, but a different product.
new_product = product(save=True)
topic(product=new_product, slug=self.topic.slug, save=True)
serializer = api.QuestionSerializer(
context=self.context, data=self.data)
eq_(serializer.errors, {})
ok_(serializer.is_valid())
eq_(serializer.object.topic, self.topic)
def test_solution_is_readonly(self):
q = question(save=True)
a = answer(question=q, save=True)
self.data['solution'] = a.id
serializer = api.QuestionSerializer(context=self.context, data=self.data, instance=q)
serializer.save()
eq_(q.solution, None)
class TestQuestionSerializerSerialization(TestCase):
def setUp(self):
self.asker = user(save=True)
self.helper1 = user(save=True)
self.helper2 = user(save=True)
self.question = question(creator=self.asker, save=True)
def _names(self, *users):
return sorted(u.username for u in users)
def _answer(self, user):
return answer(question=self.question, creator=user, save=True)
def test_no_votes(self):
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data['num_votes'], 0)
def test_with_votes(self):
questionvote(question=self.question, save=True)
questionvote(question=self.question, save=True)
questionvote(save=True)
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data['num_votes'], 2)
def test_just_asker(self):
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data['involved'], self._names(self.asker))
def test_one_answer(self):
self._answer(self.helper1)
serializer = api.QuestionSerializer(instance=self.question)
eq_(sorted(serializer.data['involved']),
self._names(self.asker, self.helper1))
def test_asker_and_response(self):
self._answer(self.helper1)
self._answer(self.asker)
serializer = api.QuestionSerializer(instance=self.question)
eq_(sorted(serializer.data['involved']),
self._names(self.asker, self.helper1))
def test_asker_and_two_answers(self):
self._answer(self.helper1)
self._answer(self.asker)
self._answer(self.helper2)
serializer = api.QuestionSerializer(instance=self.question)
eq_(sorted(serializer.data['involved']),
self._names(self.asker, self.helper1, self.helper2))
def test_solution_is_id(self):
a = self._answer(self.helper1)
self.question.solution = a
self.question.save()
serializer = api.QuestionSerializer(instance=self.question)
eq_(serializer.data['solution'], a.id)
class TestQuestionViewSet(TestCase):
def setUp(self):
self.client = APIClient()
def test_create(self):
u = user(save=True)
p = product(save=True)
t = topic(product=p, save=True)
self.client.force_authenticate(user=u)
data = {
'title': 'How do I start Firefox?',
'content': 'Seriously, what do I do?',
'product': p.slug,
'topic': t.slug,
}
eq_(Question.objects.count(), 0)
res = self.client.post(reverse('question-list'), data)
eq_(res.status_code, 201)
eq_(Question.objects.count(), 1)
q = Question.objects.all()[0]
eq_(q.title, data['title'])
eq_(q.content, data['content'])
def test_delete_permissions(self):
u1 = user(save=True)
u2 = user(save=True)
q = question(creator=u1, save=True)
# Anonymous user can't delete
self.client.force_authenticate(user=None)
res = self.client.delete(reverse('question-detail', args=[q.id]))
eq_(res.status_code, 401) # Unauthorized
# Non-owner can't delete
self.client.force_authenticate(user=u2)
res = self.client.delete(reverse('question-detail', args=[q.id]))
eq_(res.status_code, 403) # Forbidden
# Owner can delete
self.client.force_authenticate(user=u1)
res = self.client.delete(reverse('question-detail', args=[q.id]))
eq_(res.status_code, 204) # No content
def test_solve(self):
q = question(save=True)
a = answer(question=q, save=True)
self.client.force_authenticate(user=q.creator)
res = self.client.post(reverse('question-solve', args=[q.id]),
data={'answer': a.id})
eq_(res.status_code, 204)
q = Question.objects.get(id=q.id)
eq_(q.solution, a)
def test_ordering(self):
q1 = question(save=True)
q2 = question(save=True)
res = self.client.get(reverse('question-list'))
eq_(res.data['results'][0]['id'], q2.id)
eq_(res.data['results'][1]['id'], q1.id)
res = self.client.get(reverse('question-list') + '?ordering=id')
eq_(res.data['results'][0]['id'], q1.id)
eq_(res.data['results'][1]['id'], q2.id)
res = self.client.get(reverse('question-list') + '?ordering=-id')
eq_(res.data['results'][0]['id'], q2.id)
eq_(res.data['results'][1]['id'], q1.id)
def test_filter_product_with_slug(self):
p1 = product(save=True)
p2 = product(save=True)
q1 = question(product=p1, save=True)
question(product=p2, save=True)
querystring = '?product={0}'.format(p1.slug)
res = self.client.get(reverse('question-list') + querystring)
eq_(len(res.data['results']), 1)
eq_(res.data['results'][0]['id'], q1.id)
def test_filter_creator_with_username(self):
q1 = question(save=True)
question(save=True)
querystring = '?creator={0}'.format(q1.creator.username)
res = self.client.get(reverse('question-list') + querystring)
eq_(res.status_code, 200)
eq_(len(res.data['results']), 1)
eq_(res.data['results'][0]['id'], q1.id)
def test_filter_involved(self):
q1 = question(save=True)
a1 = answer(question=q1, save=True)
q2 = question(creator=a1.creator, save=True)
querystring = '?involved={0}'.format(q1.creator.username)
res = self.client.get(reverse('question-list') + querystring)
eq_(res.status_code, 200)
eq_(len(res.data['results']), 1)
eq_(res.data['results'][0]['id'], q1.id)
querystring = '?involved={0}'.format(q2.creator.username)
res = self.client.get(reverse('question-list') + querystring)
eq_(res.status_code, 200)
eq_(len(res.data['results']), 2)
# The API has a default sort, so ordering will be consistent.
eq_(res.data['results'][0]['id'], q2.id)
eq_(res.data['results'][1]['id'], q1.id)
class TestAnswerSerializerDeserialization(TestCase):
def test_no_votes(self):
a = answer(save=True)
serializer = api.AnswerSerializer(instance=a)
eq_(serializer.data['num_helpful_votes'], 0)
eq_(serializer.data['num_unhelpful_votes'], 0)
def test_with_votes(self):
a = answer(save=True)
answervote(answer=a, helpful=True, save=True)
answervote(answer=a, helpful=True, save=True)
answervote(answer=a, helpful=False, save=True)
answervote(save=True)
serializer = api.AnswerSerializer(instance=a)
eq_(serializer.data['num_helpful_votes'], 2)
eq_(serializer.data['num_unhelpful_votes'], 1)
class TestAnswerViewSet(TestCase):
def setUp(self):
self.client = APIClient()
def test_create(self):
q = question(save=True)
u = user(save=True)
self.client.force_authenticate(user=u)
data = {
'question': q.id,
'content': 'You just need to click the fox.',
}
eq_(Answer.objects.count(), 0)
res = self.client.post(reverse('answer-list'), data)
eq_(res.status_code, 201)
eq_(Answer.objects.count(), 1)
a = Answer.objects.all()[0]
eq_(a.content, data['content'])
eq_(a.question, q)
def test_delete_permissions(self):
u1 = user(save=True)
u2 = user(save=True)
a = answer(creator=u1, save=True)
# Anonymous user can't delete
self.client.force_authenticate(user=None)
res = self.client.delete(reverse('answer-detail', args=[a.id]))
eq_(res.status_code, 401) # Unauthorized
# Non-owner can't deletea
self.client.force_authenticate(user=u2)
res = self.client.delete(reverse('answer-detail', args=[a.id]))
eq_(res.status_code, 403) # Forbidden
# Owner can delete
self.client.force_authenticate(user=u1)
res = self.client.delete(reverse('answer-detail', args=[a.id]))
eq_(res.status_code, 204) # No content
def test_ordering(self):
a1 = answer(save=True)
a2 = answer(save=True)
res = self.client.get(reverse('answer-list'))
eq_(res.data['results'][0]['id'], a2.id)
eq_(res.data['results'][1]['id'], a1.id)
res = self.client.get(reverse('answer-list') + '?ordering=id')
eq_(res.data['results'][0]['id'], a1.id)
eq_(res.data['results'][1]['id'], a2.id)
res = self.client.get(reverse('answer-list') + '?ordering=-id')
eq_(res.data['results'][0]['id'], a2.id)
eq_(res.data['results'][1]['id'], a1.id)
class TestQuestionFilter(TestCase):
def setUp(self):
self.filter_instance = api.QuestionFilter()
self.queryset = Question.objects.all()
def filter(self, filter_data):
return self.filter_instance.filter_metadata(self.queryset, json.dumps(filter_data))
def test_filter_involved(self):
q1 = question(save=True)
a1 = answer(question=q1, save=True)
q2 = question(creator=a1.creator, save=True)
res = self.filter_instance.filter_involved(self.queryset, q1.creator.username)
eq_(list(res), [q1])
res = self.filter_instance.filter_involved(self.queryset, q2.creator.username)
# The filter does not have a strong order.
res = sorted(res, key=lambda q: q.id)
eq_(res, [q1, q2])
def test_filter_is_solved(self):
q1 = question(save=True)
a1 = answer(question=q1, save=True)
q1.solution = a1
q1.save()
q2 = question(save=True)
res = self.filter_instance.filter_is_solved(self.queryset, True)
eq_(list(res), [q1])
res = self.filter_instance.filter_is_solved(self.queryset, False)
eq_(list(res), [q2])
@raises(APIException)
def test_metadata_not_json(self):
self.filter_instance.filter_metadata(self.queryset, 'not json')
@raises(APIException)
def test_metadata_bad_json(self):
self.filter({'foo': []})
def test_single_filter_match(self):
q1 = question(metadata={'os': 'Linux'}, save=True)
question(metadata={'os': 'OSX'}, save=True)
res = self.filter({'os': 'Linux'})
eq_(list(res), [q1])
def test_single_filter_no_match(self):
question(metadata={'os': 'Linux'}, save=True)
question(metadata={'os': 'OSX'}, save=True)
res = self.filter({"os": "Windows 8"})
eq_(list(res), [])
def test_multi_filter_is_and(self):
q1 = question(metadata={'os': 'Linux', 'category': 'troubleshooting'}, save=True)
question(metadata={'os': 'OSX', 'category': 'troubleshooting'}, save=True)
res = self.filter({'os': 'Linux', 'category': 'troubleshooting'})
eq_(list(res), [q1])
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes for calculating the ewald sum of a structure.
"""
__author__ = "Shyue Ping Ong, William Davidson Richard"
__copyright__ = "Copyright 2011, The Materials Project"
__credits__ = "Christopher Fischer"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Aug 1 2012"
from math import pi, sqrt, log
from datetime import datetime
from copy import deepcopy, copy
from warnings import warn
import bisect
import numpy as np
from scipy.special import erfc
from scipy.misc import comb
import scipy.constants as constants
class EwaldSummation(object):
"""
Calculates the electrostatic energy of a periodic array of charges using
the Ewald technique.
Ref: http://www.ee.duke.edu/~ayt/ewaldpaper/ewaldpaper.html
This matrix can be used to do fast calculations of ewald sums after species
removal.
E = E_recip + E_real + E_point
Atomic units used in the code, then converted to eV.
"""
# Converts unit of q*q/r into eV
CONV_FACT = 1e10 * constants.e / (4 * pi * constants.epsilon_0)
def __init__(self, structure, real_space_cut=None, recip_space_cut=None,
eta=None, acc_factor=12.0, w=1 / sqrt(2), compute_forces=False):
"""
Initializes and calculates the Ewald sum. Default convergence
parameters have been specified, but you can override them if you wish.
Args:
structure (Structure): Input structure that must have proper
Specie on all sites, i.e. Element with oxidation state. Use
Structure.add_oxidation_state... for example.
real_space_cut (float): Real space cutoff radius dictating how
many terms are used in the real space sum. Defaults to None,
which means determine automagically using the formula given
in gulp 3.1 documentation.
recip_space_cut (float): Reciprocal space cutoff radius.
Defaults to None, which means determine automagically using
the formula given in gulp 3.1 documentation.
eta (float): The screening parameter. Defaults to None, which means
determine automatically.
acc_factor (float): No. of significant figures each sum is
converged to.
w (float): Weight parameter, w, has been included that represents
the relative computational expense of calculating a term in
real and reciprocal space. Default of 0.7 reproduces result
similar to GULP 4.2. This has little effect on the total
energy, but may influence speed of computation in large
systems. Note that this parameter is used only when the
cutoffs are set to None.
compute_forces (bool): Whether to compute forces. False by
default since it is usually not needed.
"""
self._s = structure
self._charged = abs(structure.charge) > 1e-8
self._vol = structure.volume
self._compute_forces = compute_forces
self._acc_factor = acc_factor
# set screening length
self._eta = eta if eta \
else (len(structure) * w / (self._vol ** 2)) ** (1 / 3) * pi
self._sqrt_eta = sqrt(self._eta)
# acc factor used to automatically determine the optimal real and
# reciprocal space cutoff radii
self._accf = sqrt(log(10 ** acc_factor))
self._rmax = real_space_cut if real_space_cut \
else self._accf / self._sqrt_eta
self._gmax = recip_space_cut if recip_space_cut \
else 2 * self._sqrt_eta * self._accf
# The next few lines pre-compute certain quantities and store them.
# Ewald summation is rather expensive, and these shortcuts are
# necessary to obtain several factors of improvement in speedup.
self._oxi_states = [compute_average_oxidation_state(site)
for site in structure]
self._coords = np.array(self._s.cart_coords)
# Now we call the relevant private methods to calculate the reciprocal
# and real space terms.
(self._recip, recip_forces) = self._calc_recip()
(self._real, self._point, real_point_forces) = \
self._calc_real_and_point()
if self._compute_forces:
self._forces = recip_forces + real_point_forces
def compute_partial_energy(self, removed_indices):
"""
Gives total ewald energy for certain sites being removed, i.e. zeroed
out.
"""
total_energy_matrix = self.total_energy_matrix.copy()
for i in removed_indices:
total_energy_matrix[i, :] = 0
total_energy_matrix[:, i] = 0
return sum(sum(total_energy_matrix))
def compute_sub_structure(self, sub_structure, tol=1e-3):
"""
Gives total ewald energy for an sub structure in the same
lattice. The sub_structure must be a subset of the original
structure, with possible different charges.
Args:
substructure (Structure): Substructure to compute Ewald sum for.
tol (float): Tolerance for site matching in fractional coordinates.
Returns:
Ewald sum of substructure.
"""
total_energy_matrix = self.total_energy_matrix.copy()
def find_match(site):
for test_site in sub_structure:
frac_diff = abs(np.array(site.frac_coords)
- np.array(test_site.frac_coords)) % 1
frac_diff = [abs(a) < tol or abs(a) > 1 - tol
for a in frac_diff]
if all(frac_diff):
return test_site
return None
matches = []
for i, site in enumerate(self._s):
matching_site = find_match(site)
if matching_site:
new_charge = compute_average_oxidation_state(matching_site)
old_charge = self._oxi_states[i]
scaling_factor = new_charge / old_charge
matches.append(matching_site)
else:
scaling_factor = 0
total_energy_matrix[i, :] *= scaling_factor
total_energy_matrix[:, i] *= scaling_factor
if len(matches) != len(sub_structure):
output = ["Missing sites."]
for site in sub_structure:
if site not in matches:
output.append("unmatched = {}".format(site))
raise ValueError("\n".join(output))
return sum(sum(total_energy_matrix))
@property
def reciprocal_space_energy(self):
"""
The reciprocal space energy.
"""
return sum(sum(self._recip))
@property
def reciprocal_space_energy_matrix(self):
"""
The reciprocal space energy matrix. Each matrix element (i, j)
corresponds to the interaction energy between site i and site j in
reciprocal space.
"""
return self._recip
@property
def real_space_energy(self):
"""
The real space space energy.
"""
return sum(sum(self._real))
@property
def real_space_energy_matrix(self):
"""
The real space energy matrix. Each matrix element (i, j) corresponds to
the interaction energy between site i and site j in real space.
"""
return self._real
@property
def point_energy(self):
"""
The point energy.
"""
return sum(self._point)
@property
def point_energy_matrix(self):
"""
The point space matrix. A diagonal matrix with the point terms for each
site in the diagonal elements.
"""
return self._point
@property
def total_energy(self):
"""
The total energy.
"""
if self._charged:
warn('Charged structures not supported in EwaldSummation, but '
'charged input structures can be used for '
'EwaldSummation.compute_sub_structure')
return sum(sum(self._recip)) + sum(sum(self._real)) + sum(self._point)
@property
def total_energy_matrix(self):
"""
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
"""
totalenergy = self._recip + self._real
for i in range(len(self._point)):
totalenergy[i, i] += self._point[i]
return totalenergy
@property
def forces(self):
"""
The forces on each site as a Nx3 matrix. Each row corresponds to a
site.
"""
if not self._compute_forces:
raise AttributeError(
"Forces are available only if compute_forces is True!")
return self._forces
def _calc_recip(self):
"""
Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed.
"""
numsites = self._s.num_sites
prefactor = 2 * pi / self._vol
erecip = np.zeros((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
coords = self._coords
rcp_latt = self._s.lattice.reciprocal_lattice
recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0],
self._gmax)
frac_coords = [fcoords for (fcoords, dist, i) in recip_nn if dist != 0]
gs = rcp_latt.get_cartesian_coords(frac_coords)
g2s = np.sum(gs ** 2, 1)
expvals = np.exp(-g2s / (4 * self._eta))
grs = np.sum(gs[:, None] * coords[None, :], 2)
oxistates = np.array(self._oxi_states)
# create array where q_2[i,j] is qi * qj
qiqj = oxistates[None, :] * oxistates[:, None]
# calculate the structure factor
sreals = np.sum(oxistates[None, :] * np.cos(grs), 1)
simags = np.sum(oxistates[None, :] * np.sin(grs), 1)
for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals,
sreals, simags):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = (gr[None, :] + pi / 4) - gr[:, None]
np.sin(m, m)
m *= expval / g2
erecip += m
if self._compute_forces:
pref = 2 * expval / g2 * oxistates
factor = prefactor * pref * (
sreal * np.sin(gr) - simag * np.cos(gr))
forces += factor[:, None] * g[None, :]
forces *= EwaldSummation.CONV_FACT
erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5
return erecip, forces
def _calc_real_and_point(self):
"""
Determines the self energy -(eta/pi)**(1/2) * sum_{i=1}^{N} q_i**2
If cell is charged a compensating background is added (i.e. a G=0 term)
"""
fcoords = self._s.frac_coords
forcepf = 2.0 * self._sqrt_eta / sqrt(pi)
coords = self._coords
numsites = self._s.num_sites
ereal = np.empty((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
qs = np.array(self._oxi_states)
epoint = - qs ** 2 * sqrt(self._eta / pi)
for i in range(numsites):
nfcoords, rij, js = self._s.lattice.get_points_in_sphere(fcoords,
coords[i], self._rmax, zip_results=False)
# remove the rii term
inds = rij > 1e-8
js = js[inds]
rij = rij[inds]
nfcoords = nfcoords[inds]
qi = qs[i]
qj = qs[js]
erfcval = erfc(self._sqrt_eta * rij)
new_ereals = erfcval * qi * qj / rij
# insert new_ereals
for k in range(numsites):
ereal[k, i] = np.sum(new_ereals[js == k])
if self._compute_forces:
nccoords = self._s.lattice.get_cartesian_coords(nfcoords)
fijpf = qj / rij ** 3 * (erfcval + forcepf * rij *
np.exp(-self._eta * rij ** 2))
forces[i] += np.sum(np.expand_dims(fijpf, 1) *
(np.array([coords[i]]) - nccoords) *
qi * EwaldSummation.CONV_FACT, axis=0)
ereal *= 0.5 * EwaldSummation.CONV_FACT
epoint *= EwaldSummation.CONV_FACT
return ereal, epoint, forces
@property
def eta(self):
return self._eta
def __str__(self):
output = ["Real = " + str(self.real_space_energy),
"Reciprocal = " + str(self.reciprocal_space_energy),
"Point = " + str(self.point_energy),
"Total = " + str(self.total_energy),
"Forces:\n" + str(self.forces)]
return "\n".join(output)
class EwaldMinimizer:
"""
This class determines the manipulations that will minimize an ewald matrix,
given a list of possible manipulations. This class does not perform the
manipulations on a structure, but will return the list of manipulations
that should be done on one to produce the minimal structure. It returns the
manipulations for the n lowest energy orderings. This class should be used
to perform fractional species substitution or fractional species removal to
produce a new structure. These manipulations create large numbers of
candidate structures, and this class can be used to pick out those with the
lowest ewald sum.
An alternative (possibly more intuitive) interface to this class is the
order disordered structure transformation.
Author - Will Richards
Args:
matrix: A matrix of the ewald sum interaction energies. This is stored
in the class as a diagonally symmetric array and so
self._matrix will not be the same as the input matrix.
m_list: list of manipulations. each item is of the form
(multiplication fraction, number_of_indices, indices, species)
These are sorted such that the first manipulation contains the
most permutations. this is actually evaluated last in the
recursion since I'm using pop.
num_to_return: The minimizer will find the number_returned lowest
energy structures. This is likely to return a number of duplicate
structures so it may be necessary to overestimate and then
remove the duplicates later. (duplicate checking in this
process is extremely expensive)
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
"""
ALGO_TIME_LIMIT: Slowly increases the speed (with the cost of decreasing
accuracy) as the minimizer runs. Attempts to limit the run time to
approximately 30 minutes.
"""
ALGO_TIME_LIMIT = 3
def __init__(self, matrix, m_list, num_to_return=1, algo=ALGO_FAST):
# Setup and checking of inputs
self._matrix = copy(matrix)
# Make the matrix diagonally symmetric (so matrix[i,:] == matrix[:,j])
for i in range(len(self._matrix)):
for j in range(i, len(self._matrix)):
value = (self._matrix[i, j] + self._matrix[j, i]) / 2
self._matrix[i, j] = value
self._matrix[j, i] = value
# sort the m_list based on number of permutations
self._m_list = sorted(m_list, key=lambda x: comb(len(x[2]), x[1]),
reverse=True)
for mlist in self._m_list:
if mlist[0] > 1:
raise ValueError('multiplication fractions must be <= 1')
self._current_minimum = float('inf')
self._num_to_return = num_to_return
self._algo = algo
if algo == EwaldMinimizer.ALGO_COMPLETE:
raise NotImplementedError('Complete algo not yet implemented for '
'EwaldMinimizer')
self._output_lists = []
# Tag that the recurse function looks at at each level. If a method
# sets this to true it breaks the recursion and stops the search.
self._finished = False
self._start_time = datetime.utcnow()
self.minimize_matrix()
self._best_m_list = self._output_lists[0][1]
self._minimized_sum = self._output_lists[0][0]
def minimize_matrix(self):
"""
This method finds and returns the permutations that produce the lowest
ewald sum calls recursive function to iterate through permutations
"""
if self._algo == EwaldMinimizer.ALGO_FAST or \
self._algo == EwaldMinimizer.ALGO_BEST_FIRST:
return self._recurse(self._matrix, self._m_list,
set(range(len(self._matrix))))
def add_m_list(self, matrix_sum, m_list):
"""
This adds an m_list to the output_lists and updates the current
minimum if the list is full.
"""
if self._output_lists is None:
self._output_lists = [[matrix_sum, m_list]]
else:
bisect.insort(self._output_lists, [matrix_sum, m_list])
if self._algo == EwaldMinimizer.ALGO_BEST_FIRST and \
len(self._output_lists) == self._num_to_return:
self._finished = True
if len(self._output_lists) > self._num_to_return:
self._output_lists.pop()
if len(self._output_lists) == self._num_to_return:
self._current_minimum = self._output_lists[-1][0]
def best_case(self, matrix, m_list, indices_left):
"""
Computes a best case given a matrix and manipulation list.
Args:
matrix: the current matrix (with some permutations already
performed)
m_list: [(multiplication fraction, number_of_indices, indices,
species)] describing the manipulation
indices: Set of indices which haven't had a permutation
performed on them.
"""
m_indices = []
fraction_list = []
for m in m_list:
m_indices.extend(m[2])
fraction_list.extend([m[0]] * m[1])
indices = list(indices_left.intersection(m_indices))
interaction_matrix = matrix[indices, :][:, indices]
fractions = np.zeros(len(interaction_matrix)) + 1
fractions[:len(fraction_list)] = fraction_list
fractions = np.sort(fractions)
# Sum associated with each index (disregarding interactions between
# indices)
sums = 2 * np.sum(matrix[indices], axis=1)
sums = np.sort(sums)
# Interaction corrections. Can be reduced to (1-x)(1-y) for x,y in
# fractions each element in a column gets multiplied by (1-x), and then
# the sum of the columns gets multiplied by (1-y) since fractions are
# less than 1, there is no effect of one choice on the other
step1 = np.sort(interaction_matrix) * (1 - fractions)
step2 = np.sort(np.sum(step1, axis=1))
step3 = step2 * (1 - fractions)
interaction_correction = np.sum(step3)
if self._algo == self.ALGO_TIME_LIMIT:
elapsed_time = datetime.utcnow() - self._start_time
speedup_parameter = elapsed_time.total_seconds() / 1800
avg_int = np.sum(interaction_matrix, axis=None)
avg_frac = np.average(np.outer(1 - fractions, 1 - fractions))
average_correction = avg_int * avg_frac
interaction_correction = average_correction * speedup_parameter \
+ interaction_correction * (1 - speedup_parameter)
best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) \
+ interaction_correction
return best_case
def get_next_index(self, matrix, manipulation, indices_left):
"""
Returns an index that should have the most negative effect on the
matrix sum
"""
f = manipulation[0]
indices = list(indices_left.intersection(manipulation[2]))
sums = np.sum(matrix[indices], axis=1)
if f < 1:
next_index = indices[sums.argmax(axis=0)]
else:
next_index = indices[sums.argmin(axis=0)]
return next_index
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list)
@property
def best_m_list(self):
return self._best_m_list
@property
def minimized_sum(self):
return self._minimized_sum
@property
def output_lists(self):
return self._output_lists
def compute_average_oxidation_state(site):
"""
Calculates the average oxidation state of a site
Args:
site: Site to compute average oxidation state
Returns:
Average oxidation state of site.
"""
try:
avg_oxi = sum([sp.oxi_state * occu
for sp, occu in site.species_and_occu.items()
if sp is not None])
return avg_oxi
except AttributeError:
pass
try:
return site.charge
except AttributeError:
raise ValueError("Ewald summation can only be performed on structures "
"that are either oxidation state decorated or have "
"site charges.")
| |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_parad_k_stc_imodem
@file marine-integrations/mi/dataset/parser/test/test_parad_k_stc_imodem.py
@author Mike Nicoletti, Steve Myerson (recovered)
@brief Test code for a Parad_k_stc_imodem data parser
"""
import struct, ntplib
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.parad_k_stc_imodem import \
Parad_k_stc_imodemParser,\
Parad_k_stc_imodemRecoveredParser, \
Parad_k_stc_imodemDataParticle, \
Parad_k_stc_imodemRecoveredDataParticle
from mi.dataset.parser.WFP_E_file_common import StateKey
@attr('UNIT', group='mi')
class Parad_k_stc_imodemParserUnitTestCase(ParserUnitTestCase):
"""
Parad_k_stc_imodem Parser unit test suite
"""
TEST_DATA_SHORT = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac" \
"\x1d\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
TEST_DATA = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d\x00" \
"\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00OR\x9d\xac/C\xb8COA6\xde" \
"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9d\x00p\x00QR\x9d\xac3C\x98\xe5TA733\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\xa4\x00u\x00OR\x9d\xac8C\x9566A7!-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9a\x00o\x00OR\x9d\xac?C\xa1\xd7\xc3" \
"A6\xa6LB\x8bG\xae\x00\x00\x00\x00\x00\xb6\x00v\x00PR\x9d\xacECsS\xfeA7e\xfeB\x88\x00\x00\x00\x00\x00\x00\x00" \
"\x98\x00s\x00QR\x9d\xacKC\x89\x17\x8cA6\xe2\xecB\x84\x99\x9a\x00\x00\x00\x00\x00\xa4\x00\x81\x00PR\x9d\xacQC}\n" \
"\xbfA7\x00hB\x81G\xae\x00\x00\x00\x00\x00\xa2\x00|\x00NR\x9d\xacWCyW\xc7A6\x97\x8dB{\xe1H\x00\x00\x00\x00\x00\x9a" \
"\x00m\x00NR\x9d\xac]C\x8c!#A6\x9f\xbeBuQ\xec\x00\x00\x00\x00\x00\x97\x00s\x00QR\x9d\xaccC\x84!9A6h\nBn\x8f\\\x00" \
"\x00\x00\x00\x00\x9f\x00v\x00NR\x9d\xaciCE\xa5UA6a|Bh=q\x00\x00\x00\x00\x00\x97\x00l\x00PR\x9d\xacoC\xa5\xa5\xad" \
"A5\x94\xafBa\\)\x00\x00\x00\x00\x00\x9b\x00n\x00RR\x9d\xacuC\\\r\x08A6\x14{B[\n=\x00\x00\x00\x00\x00\x9a\x00s\x00" \
"OR\x9d\xac{C\xa3\x0b\xb8A5F\nBT33\x00\x00\x00\x00\x00\x98\x00q\x00NR\x9d\xac\x81CO\xc0+A5\xd7\xdcBM\xd7\n\x00\x00" \
"\x00\x00\x00\x97\x00n\x00PR\x9d\xac\x87Cxp\xd0A5#\xa3BGG\xae\x00\x00\x00\x00\x00\x9b\x00n\x00PR\x9d\xac\x8dC\x84" \
"\xdd\xd9A5X\x10B@\xae\x14\x00\x00\x00\x00\x00\xa5\x00v\x00OR\x9d\xac\x93C\xa0\x85\x01A4j\x7fB:\x14{\x00\x00\x00\x00" \
"\x00\x9c\x00t\x00QR\x9d\xac\x99Cq\xa4\xdbA5:\x92B3\xc2\x8f\x00\x00\x00\x00\x00\x9c\x00x\x00PR\x9d\xac\x9fCg\x07#A5" \
"\x18+B-\x00\x00\x00\x00\x00\x00\x00\x9e\x00m\x00QR\x9d\xac\xa5C\x9bw\x96A4FtB&z\xe1\x00\x00\x00\x00\x00\xd7\x00s" \
"\x00OR\x9d\xac\xabCmP5A4\x9dJB\x1f\xd7\n\x00\x00\x00\x00\x00\x99\x00s\x00PR\x9d\xac\xb1C\xad\x960A3\x8a\tB\x19" \
"(\xf6\x00\x00\x00\x00\x00\x95\x00n\x00OR\x9d\xac\xb7C\x0c\xce]A5\x0f\xfaB\x12\xe1H\x00\x00\x00\x00\x00\x9c\x00u" \
"\x00PR\x9d\xac\xbdC\xa1\xeb\x02A3Z\x85B\x0c=q\x00\x00\x00\x00\x00\x95\x00u\x00OR\x9d\xac\xc3C$\xafOA4\xa23B\x05" \
"\xe1H\x00\x00\x00\x00\x00\x99\x00r\x00PR\x9d\xac\xc9C\xae\xddeA3\x0f(A\xfe(\xf6\x00\x00\x00\x00\x00\x9a\x00o\x00O" \
"R\x9d\xac\xcfA\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P\xff\xff\xff\xff\x00\x00\x00\rR\x9d" \
"\xac\xd4R\x9d\xadQ"
# all flags set to zero
TEST_DATA_BAD_FLAGS = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
# took 5 bytes out of second engineering sample
TEST_DATA_BAD_ENG = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
# Has a NaN for par_value
TEST_DATA_NAN = \
'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00' \
'\x52\x9D\xAB\xA2\x52\x9D\xAC\x19' \
'\x52\x9D\xAC\x1D' \
'\x00\x00\x00\x00\x41\x3A\x36\xE3\x00\x00\x00\x00' \
'\xFF\xC0\x00\x00' \
'\x01\x03\x00\x68\x00\x4E'
def create_rec_parser(self, new_state, file_handle):
"""
This function creates a Parad_k_stc parser for recovered data.
"""
if new_state is None:
new_state = self.state
parser = Parad_k_stc_imodemRecoveredParser(self.rec_config, new_state, file_handle,
self.state_callback, self.pub_callback)
return parser
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.file_ingested = file_ingested
self.state_callback_value = state
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.parad_k_stc_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS:
['Parad_k_stc_imodem_statusParserDataParticle',
'Parad_k_stc_imodem_startParserDataParticle',
'Parad_k_stc_imodem_engineeringParserDataParticle']
}
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.parad_k_stc_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS:
['Parad_k_stc_imodemRecoveredDataParticle']
}
self.start_state = {StateKey.POSITION: 0}
# Define test data particles and their associated timestamps which will be
# compared with returned results
self.timestamp1_eng = self.timestamp_to_ntp('R\x9d\xac\x1d')
log.debug("Converted timestamp #1: %s",self.timestamp1_eng)
self.particle_a_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac\x1d' \
'\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=self.timestamp1_eng)
self.timestamp2_eng = self.timestamp_to_ntp('R\x9d\xac!')
self.particle_b_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac!C\t' \
'\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=self.timestamp2_eng)
self.timestamp3_eng = self.timestamp_to_ntp('R\x9d\xac&')
self.particle_c_eng = Parad_k_stc_imodemDataParticle(b"R\x9d\xac&C\xbc" \
"\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=self.timestamp3_eng)
self.timestamp4_eng = self.timestamp_to_ntp('R\x9d\xac*')
self.particle_d_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac' \
'*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=self.timestamp4_eng)
self.timestamp_last_eng = self.timestamp_to_ntp('R\x9d\xac\xcf')
self.particle_last_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac\xcfA' \
'\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=self.timestamp_last_eng)
# Recovered expected particles
self.particle_a_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac\x1d' \
'\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=self.timestamp1_eng)
self.particle_b_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac!C\t' \
'\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=self.timestamp2_eng)
self.particle_c_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b"R\x9d\xac&C\xbc" \
"\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=self.timestamp3_eng)
self.particle_d_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac' \
'*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=self.timestamp4_eng)
self.particle_last_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac\xcfA' \
'\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=self.timestamp_last_eng)
# uncomment the following to generate particles in yml format for driver testing results files
#self.particle_to_yml(self.particle_a_eng)
#self.particle_to_yml(self.particle_b_eng)
#self.particle_to_yml(self.particle_c_eng)
#self.particle_to_yml(self.particle_d_eng)
self.file_ingested = False
self.state_callback_value = None
self.publish_callback_value = None
self.state = None
def particle_to_yml(self, particle):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml files here.
"""
particle_dict = particle.generate_dict()
# open write append, if you want to start from scratch manually delete this file
fid = open('particle.yml', 'a')
fid.write(' - _index: 0\n')
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
fid.write(' particle_object: %s\n' % particle.__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def test_simple(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT) #turn into a data stream to look like file ingestion
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback) # last one is the link to the data source
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng)
def test_simple_recovered(self):
"""
Read recovered test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT) #turn into a data stream to look like file ingestion
self.parser = self.create_rec_parser(None, stream_handle)
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
# no data left, don't move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng_rec)
def timestamp_to_ntp(self, hex_timestamp):
fields = struct.unpack('>I', hex_timestamp)
timestamp = int(fields[0])
return ntplib.system_to_ntp_time(timestamp)
def assert_result(self, result, position, particle, ingested):
self.assertEqual(result, [particle])
self.assertEqual(self.file_ingested, ingested)
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.state_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng, self.particle_b_eng, self.particle_c_eng, self.particle_d_eng])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng)
self.assertEqual(self.file_ingested, True)
def test_get_many_recovered(self):
"""
Read recovered test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(None, stream_handle)
# start with the start time record
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng_rec, self.particle_b_eng_rec,
self.particle_c_eng_rec, self.particle_d_eng_rec])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng_rec)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng_rec)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng_rec)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng_rec)
self.assertEqual(self.file_ingested, True)
def test_long_stream(self):
"""
Test a long stream of data
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng)
self.assertEqual(result[-1], self.particle_last_eng)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng)
def test_long_stream_recovered(self):
"""
Test a long stream of recovered data
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA)
self.parser = self.create_rec_parser(None, stream_handle)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng_rec)
self.assertEqual(result[-1], self.particle_last_eng_rec)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng_rec)
def test_after_header(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:24}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_after_header_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:24}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(new_state, stream_handle)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_mid_state_start(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_mid_state_start_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(new_state, stream_handle)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_set_state(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_set_state_recovered(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION:76}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(None, stream_handle)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_bad_flags(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_bad_flags_recovered(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = self.create_rec_parser(None, stream_handle)
def test_bad_data(self):
"""
Ensure that missing data causes us to miss records
TODO: This test should be improved if we come up with a more accurate regex for the data sample
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def test_bad_data_recovered(self):
"""
Ensure that missing data causes us to miss records
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = self.create_rec_parser(None, stream_handle)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def test_nan(self):
"""
Verify that an exception occurs when the par_value has a value of NaN.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_NAN)
self.parser = self.create_rec_parser(None, stream_handle)
with self.assertRaises(SampleException):
self.parser.get_records(1)
| |
"""
A simple VTK widget for PyQt v5, the Qt v5 bindings for Python.
See http://www.trolltech.com for Qt documentation, and
http://www.riverbankcomputing.co.uk for PyQt.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
Changes by Phil Thompson, Nov. 2006
Ported to PyQt v4.
Added support for wheel events.
Changes by Phil Thompson, Oct. 2007
Bug fixes.
Changes by Phil Thompson, Mar. 2008
Added cursor support.
Changes by Rodrigo Mologni, Sep. 2013 (Credit to Daniele Esposti)
Bug fix to PySide: Converts PyCObject to void pointer.
Changes by Alex Tsui, Apr. 2015
Port from PyQt4 widget.
"""
try:
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QTimer
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QSize
from PyQt5.QtCore import QEvent
except ImportError:
raise ImportError("Cannot load PyQt5")
import vtk
class QVTKRenderWindowInteractor(QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
# Map between VTK and Qt cursors.
_CURSOR_MAP = {
0: Qt.ArrowCursor, # VTK_CURSOR_DEFAULT
1: Qt.ArrowCursor, # VTK_CURSOR_ARROW
2: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZENE
3: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZENWSE
4: Qt.SizeBDiagCursor, # VTK_CURSOR_SIZESW
5: Qt.SizeFDiagCursor, # VTK_CURSOR_SIZESE
6: Qt.SizeVerCursor, # VTK_CURSOR_SIZENS
7: Qt.SizeHorCursor, # VTK_CURSOR_SIZEWE
8: Qt.SizeAllCursor, # VTK_CURSOR_SIZEALL
9: Qt.PointingHandCursor, # VTK_CURSOR_HAND
10: Qt.CrossCursor, # VTK_CURSOR_CROSSHAIR
}
def __init__(self, parent=None, wflags=Qt.WindowFlags(), **kw):
# the current button
self._ActiveButton = Qt.NoButton
# private attributes
self.__saveX = 0
self.__saveY = 0
self.__saveModifiers = Qt.NoModifier
self.__saveButtons = Qt.NoButton
# do special handling of some keywords:
# stereo, rw
stereo = 0
if 'stereo' in kw:
if kw['stereo']:
stereo = 1
rw = None
if 'rw' in kw:
rw = kw['rw']
# create qt-level widget
QWidget.__init__(self, parent, wflags|Qt.MSWindowsOwnDC)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
WId = self.winId()
if type(WId).__name__ == 'PyCObject':
from ctypes import pythonapi, c_void_p, py_object
pythonapi.PyCObject_AsVoidPtr.restype = c_void_p
pythonapi.PyCObject_AsVoidPtr.argtypes = [py_object]
WId = pythonapi.PyCObject_AsVoidPtr(WId)
self._RenderWindow.SetWindowInfo(str(int(WId)))
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
if 'iren' in kw:
self._Iren = kw['iren']
else:
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.setAttribute(Qt.WA_PaintOnScreen)
self.setMouseTracking(True) # get all mouse events
self.setFocusPolicy(Qt.WheelFocus)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self._Timer = QTimer(self)
self._Timer.timeout.connect(self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent',
self.CursorChangedEvent)
#Create a hidden child widget and connect its destroyed signal to its
#parent ``Finalize`` slot. The hidden children will be destroyed before
#its parent thus allowing cleanup of VTK elements.
#self._hidden = QtGui.QWidget(self)
self._hidden = QWidget(self)
self._hidden.hide()
self._hidden.destroyed.connect(self.Finalize)
def __getattr__(self, attr):
"""Makes the object behave like a vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
else:
raise AttributeError(self.__class__.__name__ + \
" has no attribute named " + attr)
def Finalize(self):
'''
Call internal cleanup method on VTK objects
'''
self._RenderWindow.Finalize()
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def CursorChangedEvent(self, obj, evt):
"""Called when the CursorChangedEvent fires on the render window."""
# This indirection is needed since when the event fires, the current
# cursor is not yet set so we defer this by which time the current
# cursor should have been set.
QTimer.singleShot(0, self.ShowCursor)
def HideCursor(self):
"""Hides the cursor."""
self.setCursor(Qt.BlankCursor)
def ShowCursor(self):
"""Shows the cursor."""
vtk_cursor = self._Iren.GetRenderWindow().GetCurrentCursor()
qt_cursor = self._CURSOR_MAP.get(vtk_cursor, Qt.ArrowCursor)
self.setCursor(qt_cursor)
def closeEvent(self, evt):
self.Finalize()
def sizeHint(self):
return QSize(400, 400)
def paintEngine(self):
return None
def paintEvent(self, ev):
self._Iren.Render()
def resizeEvent(self, ev):
w = self.width()
h = self.height()
vtk.vtkRenderWindow.SetSize(self._RenderWindow, w, h)
self._Iren.SetSize(w, h)
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl = shift = False
if hasattr(ev, 'modifiers'):
if ev.modifiers() & Qt.ShiftModifier:
shift = True
if ev.modifiers() & Qt.ControlModifier:
ctrl = True
else:
if self.__saveModifiers & Qt.ShiftModifier:
shift = True
if self.__saveModifiers & Qt.ControlModifier:
ctrl = True
return ctrl, shift
def enterEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = ev.button()
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonPressEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonPressEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonPressEvent()
def mouseReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == Qt.LeftButton:
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == Qt.RightButton:
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == Qt.MidButton:
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self, ev):
self.__saveModifiers = ev.modifiers()
self.__saveButtons = ev.buttons()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = str(ev.text())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self, ev):
ctrl, shift = self._GetCtrlShift(ev)
if ev.key() < 256:
key = chr(ev.key())
else:
key = chr(0)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def wheelEvent(self, ev):
if ev.delta() >= 0:
self._Iren.MouseWheelForwardEvent()
else:
self._Iren.MouseWheelBackwardEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self.update()
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor class."""
# every QT app needs an app
app = QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# start event processing
app.exec_()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
| |
#!/usr/env/python
"""
cts_lattice_grain_silo.py:
Continuous-time stochastic CA lattice grain model configured as a silo.
GT Oct 2014
"""
_DEBUG = False
import time
import random
import numpy
from landlab import HexModelGrid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_hex_cts import OrientedHexCTS
from pylab import savefig
def setup_transition_list(g=0.0, f=0.0, d=0.0, w=0.0):
"""
Creates and returns a list of Transition() objects to represent state
transitions for simple granular mechanics model.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
The transitions for this version of lattice gas have 11 pair-transition
rules. The shorthand for the states is as follows:
AR = air/empty
IN = incoming particle (moving toward its neighbor)
OU = outgoing particle (moving away from its neighbor)
IO = incoming at an oblique angle
OO = outgoing at an oblique angle
RE = rest particle
WA = wall particle
op = oblique pair moving in opposite perpendicular direction
sm = oblique pair moving in same perpendicular direction
The 11 pairs with transitions are:
1. AR-IN => IN-AR (move to empty particle)
2. IN-IN => OO-OO-op (1/3 each dir), OU-OU (1/3) (head-on collision)
3. IN-IO => OO-OU (oblique collision)
4. IN-OO => IO-OU (oblique collision from behind)
5. IN-OU => IO-OO (1/4 each of 2 directions) (collision from behind)
6. IN-RE => RE-OU (1/3) RE-OO (1/3 each dir) (collision with rest)
7. IN-WA => OU-WA (1/3) OO-WA (1/3 each dir) (wall collision)
8. IO-IO-op => OO-OO-op (1/2 each dir) (glacing collision)
9. IO-IO-sm => OO-OO-sm (30-degree collision)
10. IO-RE => RE-OU (oblique collision with rest particle)
11. IO-WA => OO-WA (oblique collision with wall)
"""
xn_list = []
p_elast = 1.0-f # probability of elastic (non-dissipative) collision
# Rule 1: Transitions for particle movement into an empty cell
xn_list.append( Transition((1,0,0), (0,1,0), 1., 'motion') )
xn_list.append( Transition((2,0,1), (0,2,1), 1., 'motion') )
xn_list.append( Transition((3,0,2), (0,3,2), 1., 'motion') )
xn_list.append( Transition((0,4,0), (4,0,0), 1., 'motion') )
xn_list.append( Transition((0,5,1), (5,0,1), 1., 'motion') )
xn_list.append( Transition((0,6,2), (6,0,2), 1., 'motion') )
# Rule 2: Transitions for head-on collision: elastic
xn_list.append( Transition((1,4,0), (4,1,0), p_elast/3, 'head-on collision') )
xn_list.append( Transition((1,4,0), (3,6,0), p_elast/3, 'head-on collision') )
xn_list.append( Transition((1,4,0), (5,2,0), p_elast/3, 'head-on collision') )
xn_list.append( Transition((2,5,1), (5,2,1), p_elast/3, 'head-on collision') )
xn_list.append( Transition((2,5,1), (4,1,1), p_elast/3, 'head-on collision') )
xn_list.append( Transition((2,5,1), (6,3,1), p_elast/3, 'head-on collision') )
xn_list.append( Transition((3,6,2), (6,3,2), p_elast/3, 'head-on collision') )
xn_list.append( Transition((3,6,2), (1,4,2), p_elast/3, 'head-on collision') )
xn_list.append( Transition((3,6,2), (5,2,2), p_elast/3, 'head-on collision') )
# Rule 2: Transitions for head-on collision: frictional dissipation
xn_list.append( Transition((1,4,0), (7,7,0), f, 'head-on collision') )
xn_list.append( Transition((2,5,1), (7,7,1), f, 'head-on collision') )
xn_list.append( Transition((3,6,2), (7,7,2), f, 'head-on collision') )
# Rule 3: Transitions for oblique collision: elastic
xn_list.append( Transition((1,3,0), (3,1,0), p_elast, 'oblique collision') )
xn_list.append( Transition((1,5,0), (5,1,0), p_elast, 'oblique collision') )
xn_list.append( Transition((2,4,0), (4,2,0), p_elast, 'oblique collision') )
xn_list.append( Transition((6,4,0), (4,6,0), p_elast, 'oblique collision') )
xn_list.append( Transition((2,4,1), (4,2,1), p_elast, 'oblique collision') )
xn_list.append( Transition((2,6,1), (6,2,1), p_elast, 'oblique collision') )
xn_list.append( Transition((1,5,1), (5,1,1), p_elast, 'oblique collision') )
xn_list.append( Transition((3,5,1), (5,3,1), p_elast, 'oblique collision') )
xn_list.append( Transition((3,1,2), (1,3,2), p_elast, 'oblique collision') )
xn_list.append( Transition((3,5,2), (5,3,2), p_elast, 'oblique collision') )
xn_list.append( Transition((2,6,2), (6,2,2), p_elast, 'oblique collision') )
xn_list.append( Transition((4,6,2), (6,4,2), p_elast, 'oblique collision') )
# Rule 3 frictional
xn_list.append( Transition((1,3,0), (7,7,0), f, 'oblique collision') )
xn_list.append( Transition((1,5,0), (7,7,0), f, 'oblique collision') )
xn_list.append( Transition((2,4,0), (7,7,0), f, 'oblique collision') )
xn_list.append( Transition((6,4,0), (7,7,0), f, 'oblique collision') )
xn_list.append( Transition((2,4,1), (7,7,1), f, 'oblique collision') )
xn_list.append( Transition((2,6,1), (7,7,1), f, 'oblique collision') )
xn_list.append( Transition((1,5,1), (7,7,1), f, 'oblique collision') )
xn_list.append( Transition((3,5,1), (7,7,1), f, 'oblique collision') )
xn_list.append( Transition((3,1,2), (7,7,2), f, 'oblique collision') )
xn_list.append( Transition((3,5,2), (7,7,2), f, 'oblique collision') )
xn_list.append( Transition((2,6,2), (7,7,2), f, 'oblique collision') )
xn_list.append( Transition((4,6,2), (7,7,2), f, 'oblique collision') )
# Rule 4: Transitions for oblique-from-behind collisions
xn_list.append( Transition((1,2,0), (2,1,0), p_elast, 'oblique') )
xn_list.append( Transition((1,6,0), (6,1,0), p_elast, 'oblique') )
xn_list.append( Transition((3,4,0), (4,3,0), p_elast, 'oblique') )
xn_list.append( Transition((5,4,0), (4,5,0), p_elast, 'oblique') )
xn_list.append( Transition((2,1,1), (1,2,1), p_elast, 'oblique') )
xn_list.append( Transition((2,3,1), (3,2,1), p_elast, 'oblique') )
xn_list.append( Transition((4,5,1), (5,4,1), p_elast, 'oblique') )
xn_list.append( Transition((6,5,1), (5,6,1), p_elast, 'oblique') )
xn_list.append( Transition((3,2,2), (2,3,2), p_elast, 'oblique') )
xn_list.append( Transition((3,4,2), (4,3,2), p_elast, 'oblique') )
xn_list.append( Transition((1,6,2), (6,1,2), p_elast, 'oblique') )
xn_list.append( Transition((5,6,2), (6,5,2), p_elast, 'oblique') )
# Rule 4 frictional
xn_list.append( Transition((1,2,0), (7,1,0), f, 'oblique') )
xn_list.append( Transition((1,6,0), (7,1,0), f, 'oblique') )
xn_list.append( Transition((3,4,0), (4,7,0), f, 'oblique') )
xn_list.append( Transition((5,4,0), (4,7,0), f, 'oblique') )
xn_list.append( Transition((2,1,1), (7,2,1), f, 'oblique') )
xn_list.append( Transition((2,3,1), (7,2,1), f, 'oblique') )
xn_list.append( Transition((4,5,1), (5,7,1), f, 'oblique') )
xn_list.append( Transition((6,5,1), (5,7,1), f, 'oblique') )
xn_list.append( Transition((3,2,2), (7,3,2), f, 'oblique') )
xn_list.append( Transition((3,4,2), (7,3,2), f, 'oblique') )
xn_list.append( Transition((1,6,2), (6,7,2), f, 'oblique') )
xn_list.append( Transition((5,6,2), (6,7,2), f, 'oblique') )
# Rule 5: Transitions for direct-from-behind collisions
xn_list.append( Transition((1,1,0), (2,6,0), p_elast/4, 'behind') )
xn_list.append( Transition((1,1,0), (6,2,0), p_elast/4, 'behind') )
xn_list.append( Transition((4,4,0), (3,5,0), p_elast/4, 'behind') )
xn_list.append( Transition((4,4,0), (5,3,0), p_elast/4, 'behind') )
xn_list.append( Transition((2,2,1), (1,3,1), p_elast/4, 'behind') )
xn_list.append( Transition((2,2,1), (3,1,1), p_elast/4, 'behind') )
xn_list.append( Transition((5,5,1), (4,6,1), p_elast/4, 'behind') )
xn_list.append( Transition((5,5,1), (6,4,1), p_elast/4, 'behind') )
xn_list.append( Transition((3,3,2), (2,4,2), p_elast/4, 'behind') )
xn_list.append( Transition((3,3,2), (4,2,2), p_elast/4, 'behind') )
xn_list.append( Transition((6,6,2), (1,5,2), p_elast/4, 'behind') )
xn_list.append( Transition((6,6,2), (5,1,2), p_elast/4, 'behind') )
# Rule 5 frictional
xn_list.append( Transition((1,1,0), (7,1,0), f/4, 'behind') )
xn_list.append( Transition((4,4,0), (4,7,0), f/4, 'behind') )
xn_list.append( Transition((2,2,1), (7,2,1), f/4, 'behind') )
xn_list.append( Transition((5,5,1), (5,7,1), f/4, 'behind') )
xn_list.append( Transition((3,3,2), (7,3,2), f/4, 'behind') )
xn_list.append( Transition((6,6,2), (6,7,2), f/4, 'behind') )
# Rule 6: Transitions for direct collision with stationary (resting) particle
xn_list.append( Transition((1,7,0), (7,1,0), p_elast/3., 'rest') )
xn_list.append( Transition((1,7,0), (7,2,0), p_elast/3., 'rest') )
xn_list.append( Transition((1,7,0), (7,6,0), p_elast/3., 'rest') )
xn_list.append( Transition((7,4,0), (4,7,0), p_elast/3., 'rest') )
xn_list.append( Transition((7,4,0), (3,7,0), p_elast/3., 'rest') )
xn_list.append( Transition((7,4,0), (5,7,0), p_elast/3., 'rest') )
xn_list.append( Transition((2,7,1), (7,2,1), p_elast/3., 'rest') )
xn_list.append( Transition((2,7,1), (7,1,1), p_elast/3., 'rest') )
xn_list.append( Transition((2,7,1), (7,3,1), p_elast/3., 'rest') )
xn_list.append( Transition((7,5,1), (5,7,1), p_elast/3., 'rest') )
xn_list.append( Transition((7,5,1), (4,7,1), p_elast/3., 'rest') )
xn_list.append( Transition((7,5,1), (6,7,1), p_elast/3., 'rest') )
xn_list.append( Transition((3,7,2), (7,3,2), p_elast/3., 'rest') )
xn_list.append( Transition((3,7,2), (7,2,2), p_elast/3., 'rest') )
xn_list.append( Transition((3,7,2), (7,4,2), p_elast/3., 'rest') )
xn_list.append( Transition((7,6,2), (6,7,2), p_elast/3., 'rest') )
xn_list.append( Transition((7,6,2), (1,7,2), p_elast/3., 'rest') )
xn_list.append( Transition((7,6,2), (5,7,2), p_elast/3., 'rest') )
# Rule 6 frictionl
xn_list.append( Transition((1,7,0), (7,7,0), f, 'rest') )
xn_list.append( Transition((7,4,0), (7,7,0), f, 'rest') )
xn_list.append( Transition((2,7,1), (7,7,1), f, 'rest') )
xn_list.append( Transition((7,5,1), (7,7,1), f, 'rest') )
xn_list.append( Transition((3,7,2), (7,7,2), f, 'rest') )
xn_list.append( Transition((7,6,2), (7,7,2), f, 'rest') )
# Rule 7: Transitions for wall impact
xn_list.append( Transition((1,8,0), (4,8,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((1,8,0), (3,8,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((1,8,0), (5,8,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((2,8,1), (5,8,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((2,8,1), (4,8,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((2,8,1), (6,8,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((3,8,2), (6,8,2), p_elast/3, 'wall rebound') )
xn_list.append( Transition((3,8,2), (5,8,2), p_elast/3, 'wall rebound') )
xn_list.append( Transition((3,8,2), (1,8,2), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,4,0), (8,1,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,4,0), (8,6,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,4,0), (8,2,0), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,5,1), (8,1,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,5,1), (8,2,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,5,1), (8,3,1), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,6,2), (8,2,2), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,6,2), (8,3,2), p_elast/3, 'wall rebound') )
xn_list.append( Transition((8,6,2), (8,4,2), p_elast/3, 'wall rebound') )
# Rule 7 frictional
xn_list.append( Transition((1,8,0), (7,8,0), f, 'wall rebound') )
xn_list.append( Transition((2,8,1), (7,8,1), f, 'wall rebound') )
xn_list.append( Transition((3,8,2), (7,8,2), f, 'wall rebound') )
xn_list.append( Transition((8,4,0), (8,7,0), f, 'wall rebound') )
xn_list.append( Transition((8,5,1), (8,7,1), f, 'wall rebound') )
xn_list.append( Transition((8,6,2), (8,7,2), f, 'wall rebound') )
# Rule 8: Transitions for glancing oblique collision
xn_list.append( Transition((2,5,0), (3,6,0), p_elast, 'glancing') )
xn_list.append( Transition((6,3,0), (5,2,0), p_elast, 'glancing') )
xn_list.append( Transition((3,6,1), (4,1,1), p_elast, 'glancing') )
xn_list.append( Transition((1,4,1), (6,3,1), p_elast, 'glancing') )
xn_list.append( Transition((4,1,2), (5,2,2), p_elast, 'glancing') )
xn_list.append( Transition((2,5,2), (1,4,2), p_elast, 'glancing') )
# Rule 8 frictional
xn_list.append( Transition((2,5,0), (7,7,0), f, 'glancing') )
xn_list.append( Transition((6,3,0), (7,7,0), f, 'glancing') )
xn_list.append( Transition((3,6,1), (7,7,1), f, 'glancing') )
xn_list.append( Transition((1,4,1), (7,7,1), f, 'glancing') )
xn_list.append( Transition((4,1,2), (7,7,2), f, 'glancing') )
xn_list.append( Transition((2,5,2), (7,7,2), f, 'glancing') )
# Rule 9: Transitions for "near-on" collisions
xn_list.append( Transition((6,5,0), (5,6,0), p_elast, 'near-on') )
xn_list.append( Transition((2,3,0), (3,2,0), p_elast, 'near-on') )
xn_list.append( Transition((1,6,1), (6,1,1), p_elast, 'near-on') )
xn_list.append( Transition((3,4,1), (4,3,1), p_elast, 'near-on') )
xn_list.append( Transition((2,1,2), (1,2,2), p_elast, 'near-on') )
xn_list.append( Transition((4,5,2), (5,4,2), p_elast, 'near-on') )
# Rule 9 frictional
xn_list.append( Transition((6,5,0), (7,6,0), f/2, 'near-on') )
xn_list.append( Transition((6,5,0), (5,7,0), f/2, 'near-on') )
xn_list.append( Transition((2,3,0), (7,2,0), f/2, 'near-on') )
xn_list.append( Transition((2,3,0), (3,7,0), f/2, 'near-on') )
xn_list.append( Transition((1,6,1), (7,1,1), f/2, 'near-on') )
xn_list.append( Transition((1,6,1), (6,7,1), f/2, 'near-on') )
xn_list.append( Transition((3,4,1), (7,3,1), f/2, 'near-on') )
xn_list.append( Transition((3,4,1), (4,7,1), f/2, 'near-on') )
xn_list.append( Transition((2,1,2), (7,2,2), f/2, 'near-on') )
xn_list.append( Transition((2,1,2), (1,7,2), f/2, 'near-on') )
xn_list.append( Transition((4,5,2), (7,4,2), f/2, 'near-on') )
xn_list.append( Transition((4,5,2), (5,7,2), f/2, 'near-on') )
# Rule 10: Transitions for oblique collision with rest particle
xn_list.append( Transition((2,7,0), (7,1,0), p_elast, 'oblique with rest') )
xn_list.append( Transition((6,7,0), (7,1,0), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,3,0), (4,7,0), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,5,0), (4,7,0), p_elast, 'oblique with rest') )
xn_list.append( Transition((3,7,1), (7,2,1), p_elast, 'oblique with rest') )
xn_list.append( Transition((1,7,1), (7,2,1), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,6,1), (5,7,1), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,4,1), (5,7,1), p_elast, 'oblique with rest') )
xn_list.append( Transition((4,7,2), (7,3,2), p_elast, 'oblique with rest') )
xn_list.append( Transition((2,7,2), (7,3,2), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,5,2), (6,7,2), p_elast, 'oblique with rest') )
xn_list.append( Transition((7,1,2), (6,7,2), p_elast, 'oblique with rest') )
# Rule 10 frictional
xn_list.append( Transition((2,7,0), (7,7,0), f, 'oblique with rest') )
xn_list.append( Transition((6,7,0), (7,7,0), f, 'oblique with rest') )
xn_list.append( Transition((7,3,0), (7,7,0), f, 'oblique with rest') )
xn_list.append( Transition((7,5,0), (7,7,0), f, 'oblique with rest') )
xn_list.append( Transition((3,7,1), (7,7,1), f, 'oblique with rest') )
xn_list.append( Transition((1,7,1), (7,7,1), f, 'oblique with rest') )
xn_list.append( Transition((7,6,1), (7,7,1), f, 'oblique with rest') )
xn_list.append( Transition((7,4,1), (7,7,1), f, 'oblique with rest') )
xn_list.append( Transition((4,7,2), (7,7,2), f, 'oblique with rest') )
xn_list.append( Transition((2,7,2), (7,7,2), f, 'oblique with rest') )
xn_list.append( Transition((7,5,2), (7,7,2), f, 'oblique with rest') )
xn_list.append( Transition((7,1,2), (7,7,2), f, 'oblique with rest') )
# Rule 11: Transitions for oblique collision with wall particle
xn_list.append( Transition((2,8,0), (3,8,0), p_elast, 'oblique with wall') )
xn_list.append( Transition((6,8,0), (5,8,0), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,3,0), (8,2,0), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,5,0), (8,6,0), p_elast, 'oblique with wall') )
xn_list.append( Transition((1,8,1), (6,8,1), p_elast, 'oblique with wall') )
xn_list.append( Transition((3,8,1), (4,8,1), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,4,1), (8,3,1), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,6,1), (8,1,1), p_elast, 'oblique with wall') )
xn_list.append( Transition((4,8,2), (5,8,2), p_elast, 'oblique with wall') )
xn_list.append( Transition((2,8,2), (1,8,2), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,1,2), (8,2,2), p_elast, 'oblique with wall') )
xn_list.append( Transition((8,5,2), (8,4,2), p_elast, 'oblique with wall') )
# Rule 11 frictional
xn_list.append( Transition((2,8,0), (7,8,0), f, 'oblique with wall') )
xn_list.append( Transition((6,8,0), (7,8,0), f, 'oblique with wall') )
xn_list.append( Transition((8,3,0), (8,7,0), f, 'oblique with wall') )
xn_list.append( Transition((8,5,0), (8,7,0), f, 'oblique with wall') )
xn_list.append( Transition((1,8,1), (7,8,1), f, 'oblique with wall') )
xn_list.append( Transition((3,8,1), (7,8,1), f, 'oblique with wall') )
xn_list.append( Transition((8,4,1), (8,7,1), f, 'oblique with wall') )
xn_list.append( Transition((8,6,1), (8,7,1), f, 'oblique with wall') )
xn_list.append( Transition((4,8,2), (7,8,2), f, 'oblique with wall') )
xn_list.append( Transition((2,8,2), (7,8,2), f, 'oblique with wall') )
xn_list.append( Transition((8,1,2), (8,7,2), f, 'oblique with wall') )
xn_list.append( Transition((8,5,2), (8,7,2), f, 'oblique with wall') )
# Gravity rule 1: rising particles become rest particles
xn_list.append( Transition((0,1,0), (0,7,0), g, 'gravity 1') )
xn_list.append( Transition((1,1,0), (1,7,0), g, 'gravity 1') )
xn_list.append( Transition((2,1,0), (2,7,0), g, 'gravity 1') )
xn_list.append( Transition((3,1,0), (3,7,0), g, 'gravity 1') )
xn_list.append( Transition((4,1,0), (4,7,0), g, 'gravity 1') )
xn_list.append( Transition((5,1,0), (5,7,0), g, 'gravity 1') )
xn_list.append( Transition((6,1,0), (6,7,0), g, 'gravity 1') )
xn_list.append( Transition((7,1,0), (7,7,0), g, 'gravity 1') )
xn_list.append( Transition((8,1,0), (8,7,0), g, 'gravity 1') )
# Gravity rule 2: resting particles become falling particles (if not above
# rest or wall?)
xn_list.append( Transition((0,7,0), (0,4,0), g, 'gravity 2') )
xn_list.append( Transition((1,7,0), (1,4,0), g, 'gravity 2') )
xn_list.append( Transition((2,7,0), (2,4,0), g, 'gravity 2') )
xn_list.append( Transition((3,7,0), (3,4,0), g, 'gravity 2') )
xn_list.append( Transition((4,7,0), (4,4,0), g, 'gravity 2') )
xn_list.append( Transition((5,7,0), (5,4,0), g, 'gravity 2') )
xn_list.append( Transition((6,7,0), (6,4,0), g, 'gravity 2') )
# Gravity rule 3: up/sideways particles become down/sideways particles
xn_list.append( Transition((0,2,0), (0,3,0), g, 'gravity 3') )
xn_list.append( Transition((1,2,0), (1,3,0), g, 'gravity 3') )
xn_list.append( Transition((2,2,0), (2,3,0), g, 'gravity 3') )
xn_list.append( Transition((3,2,0), (3,3,0), g, 'gravity 3') )
xn_list.append( Transition((4,2,0), (4,3,0), g, 'gravity 3') )
xn_list.append( Transition((5,2,0), (5,3,0), g, 'gravity 3') )
xn_list.append( Transition((6,2,0), (6,3,0), g, 'gravity 3') )
xn_list.append( Transition((7,2,0), (7,3,0), g, 'gravity 3') )
xn_list.append( Transition((8,2,0), (8,3,0), g, 'gravity 3') )
xn_list.append( Transition((0,6,0), (0,5,0), g, 'gravity 3') )
xn_list.append( Transition((1,6,0), (1,5,0), g, 'gravity 3') )
xn_list.append( Transition((2,6,0), (2,5,0), g, 'gravity 3') )
xn_list.append( Transition((3,6,0), (3,5,0), g, 'gravity 3') )
xn_list.append( Transition((4,6,0), (4,5,0), g, 'gravity 3') )
xn_list.append( Transition((5,6,0), (5,5,0), g, 'gravity 3') )
xn_list.append( Transition((6,6,0), (6,5,0), g, 'gravity 3') )
xn_list.append( Transition((7,6,0), (7,5,0), g, 'gravity 3') )
xn_list.append( Transition((8,6,0), (8,5,0), g, 'gravity 3') )
# Gravity rule 4: down/side to straight down
xn_list.append( Transition((0,3,0), (0,4,0), g, 'gravity 4') )
xn_list.append( Transition((1,3,0), (1,4,0), g, 'gravity 4') )
xn_list.append( Transition((2,3,0), (2,4,0), g, 'gravity 4') )
xn_list.append( Transition((3,3,0), (3,4,0), g, 'gravity 4') )
xn_list.append( Transition((4,3,0), (4,4,0), g, 'gravity 4') )
xn_list.append( Transition((5,3,0), (5,4,0), g, 'gravity 4') )
xn_list.append( Transition((6,3,0), (6,4,0), g, 'gravity 4') )
xn_list.append( Transition((7,3,0), (7,4,0), g, 'gravity 4') )
xn_list.append( Transition((8,3,0), (8,4,0), g, 'gravity 4') )
xn_list.append( Transition((0,5,0), (0,4,0), g, 'gravity 4') )
xn_list.append( Transition((1,5,0), (1,4,0), g, 'gravity 4') )
xn_list.append( Transition((2,5,0), (2,4,0), g, 'gravity 4') )
xn_list.append( Transition((3,5,0), (3,4,0), g, 'gravity 4') )
xn_list.append( Transition((4,5,0), (4,4,0), g, 'gravity 4') )
xn_list.append( Transition((5,5,0), (5,4,0), g, 'gravity 4') )
xn_list.append( Transition((6,5,0), (6,4,0), g, 'gravity 4') )
xn_list.append( Transition((7,5,0), (7,4,0), g, 'gravity 4') )
xn_list.append( Transition((8,5,0), (8,4,0), g, 'gravity 4') )
# Uncertain gravity rule!
xn_list.append( Transition((7,0,2), (3,0,2), g/2.0, 'gravity??') )
xn_list.append( Transition((0,7,1), (0,5,1), g/2.0, 'gravity??') )
if _DEBUG:
print
print 'setup_transition_list(): list has',len(xn_list),'transitions:'
for t in xn_list:
print ' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 41
nc = 61
g = 1.0
f = 0.7
silo_y0 = 30.0
silo_opening_half_width = 6
plot_interval = 10.0
run_duration = 240.0
report_interval = 300.0 # report interval, in real-time seconds
p_init = 0.4 # probability that a cell is occupied at start
plot_every_transition = False
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Create a grid
hmg = HexModelGrid(nr, nc, 1.0, orientation='vertical', shape='rect', reorient_links=True)
# Set up the states and pair transitions.
# Transition data here represent particles moving on a lattice: one state
# per direction (for 6 directions), plus an empty state, a stationary
# state, and a wall state.
ns_dict = { 0 : 'empty',
1 : 'moving up',
2 : 'moving right and up',
3 : 'moving right and down',
4 : 'moving down',
5 : 'moving left and down',
6 : 'moving left and up',
7 : 'rest',
8 : 'wall'}
xn_list = setup_transition_list(g, f)
# Create data and initialize values.
node_state_grid = hmg.add_zeros('node', 'node_state_grid')
# Make the grid boundary all wall particles
node_state_grid[hmg.boundary_nodes] = 8
# Place wall particles to form the base of the silo, initially closed
tan30deg = numpy.tan(numpy.pi/6.)
rampy1 = silo_y0-hmg.node_x*tan30deg
rampy2 = silo_y0-((nc*0.866-1.)-hmg.node_x)*tan30deg
rampy = numpy.maximum(rampy1, rampy2)
(ramp_nodes, ) = numpy.where(numpy.logical_and(hmg.node_y>rampy-0.5, \
hmg.node_y<rampy+0.5))
node_state_grid[ramp_nodes] = 8
# Seed the grid interior with randomly oriented particles
for i in hmg.core_nodes:
if hmg.node_y[i]>rampy[i] and random.random()<p_init:
node_state_grid[i] = random.randint(1, 7)
# Create the CA model
ca = OrientedHexCTS(hmg, ns_dict, xn_list, node_state_grid)
import matplotlib
rock = (0.0, 0.0, 0.0) #'#5F594D'
sed = (0.6, 0.6, 0.6) #'#A4874B'
#sky = '#CBD5E1'
#sky = '#85A5CC'
sky = (1.0, 1.0, 1.0) #'#D0E4F2'
mob = (0.3, 0.3, 0.3) #'#D98859'
#mob = '#DB764F'
#mob = '#FFFF00'
#sed = '#CAAE98'
#clist = [(0.5, 0.9, 0.9),mob, mob, mob, mob, mob, mob,'#CD6839',(0.3,0.3,0.3)]
clist = [sky,mob, mob, mob, mob, mob, mob,sed,rock]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
k=0
# Plot the initial grid
ca_plotter.update_plot()
# RUN
# Run with closed silo
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print 'Current sim time',current_time,'(',100*current_time/run_duration,'%)'
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=plot_every_transition, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# Open the silo
xmid = nc*0.866*0.5
for i in range(hmg.number_of_nodes):
if node_state_grid[i]==8 and hmg.node_x[i]>(xmid-silo_opening_half_width) \
and hmg.node_x[i]<(xmid+silo_opening_half_width) \
and hmg.node_y[i]>0 and hmg.node_y[i]<38.0:
node_state_grid[i]=0
# Create the CA model
ca = OrientedHexCTS(hmg, ns_dict, xn_list, node_state_grid)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca)
# Plot the initial grid
ca_plotter.update_plot()
# Re-run with open silo
savefig('silo'+str(k)+'.png')
k+=1
current_time = 0.0
while current_time < 5*run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print 'Current sim time',current_time,'(',100*current_time/run_duration,'%)'
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=plot_every_transition, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
savefig('silo'+str(k)+'.png')
k+=1
# FINALIZE
# Plot
ca_plotter.finalize()
if __name__=='__main__':
main()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.webrisk_v1.types import webrisk
from .base import WebRiskServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import WebRiskServiceGrpcTransport
class WebRiskServiceGrpcAsyncIOTransport(WebRiskServiceTransport):
"""gRPC AsyncIO backend transport for WebRiskService.
Web Risk API defines an interface to detect malicious URLs on
your website and in client applications.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "webrisk.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "webrisk.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def compute_threat_list_diff(
self,
) -> Callable[
[webrisk.ComputeThreatListDiffRequest],
Awaitable[webrisk.ComputeThreatListDiffResponse],
]:
r"""Return a callable for the compute threat list diff method over gRPC.
Gets the most recent threat list diffs. These diffs
should be applied to a local database of hashes to keep
it up-to-date. If the local database is empty or
excessively out-of-date, a complete snapshot of the
database will be returned. This Method only updates a
single ThreatList at a time. To update multiple
ThreatList databases, this method needs to be called
once for each list.
Returns:
Callable[[~.ComputeThreatListDiffRequest],
Awaitable[~.ComputeThreatListDiffResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "compute_threat_list_diff" not in self._stubs:
self._stubs["compute_threat_list_diff"] = self.grpc_channel.unary_unary(
"/google.cloud.webrisk.v1.WebRiskService/ComputeThreatListDiff",
request_serializer=webrisk.ComputeThreatListDiffRequest.serialize,
response_deserializer=webrisk.ComputeThreatListDiffResponse.deserialize,
)
return self._stubs["compute_threat_list_diff"]
@property
def search_uris(
self,
) -> Callable[[webrisk.SearchUrisRequest], Awaitable[webrisk.SearchUrisResponse]]:
r"""Return a callable for the search uris method over gRPC.
This method is used to check whether a URI is on a
given threatList. Multiple threatLists may be searched
in a single query. The response will list all requested
threatLists the URI was found to match. If the URI is
not found on any of the requested ThreatList an empty
response will be returned.
Returns:
Callable[[~.SearchUrisRequest],
Awaitable[~.SearchUrisResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_uris" not in self._stubs:
self._stubs["search_uris"] = self.grpc_channel.unary_unary(
"/google.cloud.webrisk.v1.WebRiskService/SearchUris",
request_serializer=webrisk.SearchUrisRequest.serialize,
response_deserializer=webrisk.SearchUrisResponse.deserialize,
)
return self._stubs["search_uris"]
@property
def search_hashes(
self,
) -> Callable[
[webrisk.SearchHashesRequest], Awaitable[webrisk.SearchHashesResponse]
]:
r"""Return a callable for the search hashes method over gRPC.
Gets the full hashes that match the requested hash
prefix. This is used after a hash prefix is looked up in
a threatList and there is a match. The client side
threatList only holds partial hashes so the client must
query this method to determine if there is a full hash
match of a threat.
Returns:
Callable[[~.SearchHashesRequest],
Awaitable[~.SearchHashesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_hashes" not in self._stubs:
self._stubs["search_hashes"] = self.grpc_channel.unary_unary(
"/google.cloud.webrisk.v1.WebRiskService/SearchHashes",
request_serializer=webrisk.SearchHashesRequest.serialize,
response_deserializer=webrisk.SearchHashesResponse.deserialize,
)
return self._stubs["search_hashes"]
@property
def create_submission(
self,
) -> Callable[[webrisk.CreateSubmissionRequest], Awaitable[webrisk.Submission]]:
r"""Return a callable for the create submission method over gRPC.
Creates a Submission of a URI suspected of containing phishing
content to be reviewed. If the result verifies the existence of
malicious phishing content, the site will be added to the
`Google's Social Engineering
lists <https://support.google.com/webmasters/answer/6350487/>`__
in order to protect users that could get exposed to this threat
in the future. Only projects with CREATE_SUBMISSION_USERS
visibility can use this method.
Returns:
Callable[[~.CreateSubmissionRequest],
Awaitable[~.Submission]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_submission" not in self._stubs:
self._stubs["create_submission"] = self.grpc_channel.unary_unary(
"/google.cloud.webrisk.v1.WebRiskService/CreateSubmission",
request_serializer=webrisk.CreateSubmissionRequest.serialize,
response_deserializer=webrisk.Submission.deserialize,
)
return self._stubs["create_submission"]
def close(self):
return self.grpc_channel.close()
__all__ = ("WebRiskServiceGrpcAsyncIOTransport",)
| |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the BSD License.
"""The way that Flask is designed, we have to read our configuration and
initialize many things on module import, which is a bit lame. There are
probably ways to work around that but things work well enough as is.
"""
import logging
import sys
from pkg_resources import get_distribution, DistributionNotFound, parse_version
try:
# version information is saved under hera_librarian package
__version__ = get_distribution("hera_librarian").version
except DistributionNotFound:
# package is not installed
pass
_log_level_names = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
def _initialize():
import json
import os.path
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
if "LIBRARIAN_CONFIG_PATH" not in os.environ:
raise ValueError(
"The `LIBRARIAN_CONFIG_PATH` environment variable must be set "
"before starting the librarian server. Run `export "
"LIBRARIAN_CONFIG_PATH=/path/to/config.json` and try again."
)
config_path = os.environ["LIBRARIAN_CONFIG_PATH"]
try:
with open(config_path) as f:
config = json.load(f)
except FileNotFoundError:
raise ValueError(f"Librarian configuration file {config_path} not found.")
if 'SECRET_KEY' not in config:
print('cannot start server: must define the Flask "secret key" as the item '
'"SECRET_KEY" in "server-config.json"', file=sys.stderr)
sys.exit(1)
# TODO: configurable logging parameters will likely be helpful. We use UTC
# for timestamps using standard ISO-8601 formatting. The Python docs claim
# that 8601 is the default format but this does not appear to be true.
loglevel_cfg = config.get('log_level', 'info')
loglevel = _log_level_names.get(loglevel_cfg)
warn_loglevel = (loglevel is None)
if warn_loglevel:
loglevel = logging.INFO
logging.basicConfig(
level=loglevel,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%dT%H:%M:%SZ'
)
import time
logging.getLogger('').handlers[0].formatter.converter = time.gmtime
logger = logging.getLogger('librarian')
if warn_loglevel:
logger.warn('unrecognized value %r for "log_level" config item', loglevel_cfg)
tf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask('librarian', template_folder=tf)
app.config.update(config)
db = SQLAlchemy(app)
return logger, app, db
logger, app, db = _initialize()
def is_primary_server():
"""Ugh, need to figure out new model to deal with all of this.
"""
if app.config.get('server', 'flask') != 'tornado':
return True
import tornado.process
return tornado.process.task_id() == 0
# We have to manually import the modules that implement services. It's not
# crazy to worry about circular dependency issues, but everything will be all
# right.
from . import webutil
from . import observation
from . import store
from . import file
from . import bgtasks
from . import search
from . import misc
# Finally ...
def get_version_info():
"""
Extract version info from version tag.
We're using setuptools_scm, so the git information is in the version tag.
The one exception is when we're running from a tagged release. In that case,
we get the git hash of the corresponding release from GitHub directly.
Parameters
----------
None
Returns
-------
tag : str
The semantic version of the installed librarian server.
git_hash : str
The git hash of the installed librarian server.
"""
parsed_version = parse_version(__version__)
tag = parsed_version.base_version
local = parsed_version.local
if local is None:
# we're running from a "clean" (tagged/released) repo
# get the git info from GitHub directly
from subprocess import CalledProcessError, check_output
gitcmd = [
"git",
"ls-remote",
"https://github.com/HERA-Team/librarian.git",
f"v{tag}",
]
try:
output = check_output(gitcmd).decode("utf-8")
git_hash = output.split()[0]
except CalledProcessError:
git_hash = "???"
else:
# check if version has "dirty" tag
split_local = local.split(".")
if len(split_local) > 1:
logger.warn("running from a codebase with uncommited changes")
# get git info from the tag--the hash has a leading "g" we ignore
git_hash = split_local[0][1:]
return tag, git_hash
def commandline(argv):
from . import bgtasks
version_string, git_hash = get_version_info()
logger.info('starting up Librarian %s (%s)', version_string, git_hash)
app.config['_version_string'] = version_string
app.config['_git_hash'] = git_hash
server = app.config.get('server', 'flask')
host = app.config.get('host', None)
port = app.config.get('port', 21106)
debug = app.config.get('flask_debug', False)
n_server_processes = app.config.get('n_server_processes', 1)
if host is None:
print('note: no "host" set in configuration; server will not be remotely accessible',
file=sys.stderr)
maybe_add_stores()
if n_server_processes > 1:
if server != 'tornado':
print('error: can only use multiple processes with Tornado server', file=sys.stderr)
sys.exit(1)
if server == 'tornado':
# Need to set up HTTP server and fork subprocesses before doing
# anything with the IOLoop.
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado import web
from .webutil import StreamFile
flask_app = WSGIContainer(app)
tornado_app = web.Application([
(r'/stream/.*', StreamFile),
(r'.*', web.FallbackHandler, {'fallback': flask_app}),
])
http_server = HTTPServer(tornado_app)
http_server.bind(port, address=host)
http_server.start(n_server_processes)
db.engine.dispose() # force new connection after potentially forking
do_mandc = app.config.get('report_to_mandc', False)
if do_mandc:
from . import mc_integration
mc_integration.register_callbacks(version_string, git_hash)
if server == 'tornado':
# Set up periodic report on background task status; also reminds us
# that the server is alive.
bgtasks.register_background_task_reporter()
if is_primary_server():
# Primary server is also in charge of checking out whether there's
# anything to do with our standing orders.
from tornado.ioloop import IOLoop
from . import search
IOLoop.current().add_callback(search.queue_standing_order_copies)
search.register_standing_order_checkin()
# Hack the logger to indicate which server we are.
import tornado.process
taskid = tornado.process.task_id()
if taskid is not None:
fmtr = logging.getLogger('').handlers[0].formatter
fmtr._fmt = fmtr._fmt.replace(': ', ' #%d: ' % taskid)
if server == 'flask':
print('note: using "flask" server, so background operations will not work',
file=sys.stderr)
app.run(host=host, port=port, debug=debug)
elif server == 'tornado':
from tornado.ioloop import IOLoop
IOLoop.current().start()
else:
print('error: unknown server type %r' % server, file=sys.stderr)
sys.exit(1)
use_globus = app.config.get("use_globus", False)
if use_globus:
have_all_info = True
# make sure we have the other information that we need
if "globus_client_id" not in app.config.keys():
print(
"error: globus_client_id must be in the config file to use "
"globus.",
file=sys.stderr,
)
have_all_info = False
if "globus_transfer_token" not in app.config.keys():
print(
"error: globus_transfer_token must be in the config file to use "
"globus.",
file=sys.stderr,
)
have_all_info = False
if not have_all_info:
app.config["use_globus"] = False
else:
# add the key just in case it wasn't there
app.config["use_globus"] = False
bgtasks.maybe_wait_for_threads_to_finish()
def maybe_add_stores():
"""Add any stores specified in the configuration file that we didn't already
know about.
"""
from .dbutil import SQLAlchemyError
from .store import Store
for name, cfg in app.config.get('add-stores', {}).items():
prev = Store.query.filter(Store.name == name).first()
if prev is None:
store = Store(name, cfg['path_prefix'], cfg['ssh_host'])
store.http_prefix = cfg.get('http_prefix')
store.available = cfg.get('available', True)
db.session.add(store)
try:
db.session.commit()
except SQLAlchemyError:
db.rollback()
raise # this only happens on startup, so just refuse to start
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''OpenGL and GLU interface.
This package imports all OpenGL, GLU and registered OpenGL extension
functions. Functions have identical signatures to their C counterparts. For
example::
from pyglet.gl import *
# [...omitted: set up a GL context and framebuffer]
glBegin(GL_QUADS)
glVertex3f(0, 0, 0)
glVertex3f(0.1, 0.2, 0.3)
glVertex3f(0.1, 0.2, 0.3)
glEnd()
OpenGL is documented in full at the `OpenGL Reference Pages`_.
The `OpenGL Programming Guide`_ is a popular reference manual organised by
topic. The free online version documents only OpenGL 1.1. `Later editions`_
cover more recent versions of the API and can be purchased from a book store.
.. _OpenGL Reference Pages: http://www.opengl.org/documentation/red_book/
.. _OpenGL Programming Guide: http://fly.cc.fer.hr/~unreal/theredbook/
.. _Later editions: http://www.opengl.org/documentation/red_book/
The following subpackages are imported into this "mega" package already (and
so are available by importing ``pyglet.gl``):
``pyglet.gl.gl``
OpenGL
``pyglet.gl.glu``
GLU
``pyglet.gl.gl.glext_arb``
ARB registered OpenGL extension functions
``pyglet.gl.gl.glext_missing``
ARB registered OpenGL extension functions not included in the ARB C header
These subpackages are also available, but are not imported into this namespace
by default:
``pyglet.gl.glext_nv``
nVidia OpenGL extension functions
``pyglet.gl.agl``
AGL (Mac OS X OpenGL context functions)
``pyglet.gl.glx``
GLX (Linux OpenGL context functions)
``pyglet.gl.glxext_arb``
ARB registered GLX extension functions
``pyglet.gl.glxext_nv``
nvidia GLX extension functions
``pyglet.gl.wgl``
WGL (Windows OpenGL context functions)
``pyglet.gl.wglext_arb``
ARB registered WGL extension functions
``pyglet.gl.wglext_nv``
nvidia WGL extension functions
The information modules are provided for convenience, and are documented
below.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2541 2009-12-31 04:31:11Z benjamin.coder.smith@gmail.com $'
from pyglet.gl.lib import GLException
from pyglet.gl.gl import *
from pyglet.gl.glu import *
from pyglet.gl.glext_arb import *
from pyglet.gl.glext_missing import *
from pyglet.gl import gl_info
import sys as _sys
_is_epydoc = hasattr(_sys, 'is_epydoc') and _sys.is_epydoc
# List of contexts currently in use, so we can create new contexts that
# share objects with. Remember to remove from this list when context is
# destroyed.
_contexts = []
#: The active OpenGL context.
#:
#: You can change the current context by calling `Context.set_current`; do not
#: modify this global.
#:
#: :type: `Context`
#:
#: :since: pyglet 1.1
current_context = None
def get_current_context():
'''Return the active OpenGL context.
You can change the current context by calling `Context.set_current`.
:deprecated: Use `current_context`
:rtype: `Context`
:return: the context to which OpenGL commands are directed, or None
if there is no selected context.
'''
return current_context
class Config(object):
'''Graphics configuration.
A GLConfig stores the preferences for OpenGL attributes such as the
number of auxiliary buffers, size of the colour and depth buffers,
double buffering, stenciling, multi- and super-sampling, and so on.
Different platforms support a different set of attributes, so these
are set with a string key and a value which is integer or boolean.
See also `pyglet.window.Screen.get_best_config` and
`pyglet.window.Screen.get_matching_configs`.
:Ivariables:
`double_buffer` : bool
Specify the presence of a back-buffer for every color buffer.
`stereo` : bool
Specify the presence of separate left and right buffer sets.
`buffer_size` : int
Total bits per sample per color buffer.
`aux_buffers` : int
The number of auxiliary color buffers.
`sample_buffers` : int
The number of multisample buffers.
`samples` : int
The number of samples per pixel, or 0 if there are no multisample
buffers.
`red_size` : int
Bits per sample per buffer devoted to the red component.
`green_size` : int
Bits per sample per buffer devoted to the green component.
`blue_size` : int
Bits per sample per buffer devoted to the blue component.
`alpha_size` : int
Bits per sample per buffer devoted to the alpha component.
`depth_size` : int
Bits per sample in the depth buffer.
`stencil_size` : int
Bits per sample in the stencil buffer.
`accum_red_size` : int
Bits per pixel devoted to the red component in the accumulation
buffer.
`accum_green_size` : int
Bits per pixel devoted to the green component in the accumulation
buffer.
`accum_blue_size` : int
Bits per pixel devoted to the blue component in the accumulation
buffer.
`accum_alpha_size` : int
Bits per pixel devoted to the alpha component in the accumulation
buffer.
'''
_attribute_names = [
'double_buffer',
'stereo',
'buffer_size',
'aux_buffers',
'sample_buffers',
'samples',
'red_size',
'green_size',
'blue_size',
'alpha_size',
'depth_size',
'stencil_size',
'accum_red_size',
'accum_green_size',
'accum_blue_size',
'accum_alpha_size',
]
def __init__(self, **kwargs):
'''Create a template config with the given attributes.
Specify attributes as keyword arguments, for example::
template = Config(double_buffer=True)
'''
for name in self._attribute_names:
if name in kwargs:
setattr(self, name, kwargs[name])
else:
setattr(self, name, None)
def get_gl_attributes(self):
'''Return a list of attributes set on this config.
:rtype: list of tuple (name, value)
:return: All attributes, with unset attributes having a value of
``None``.
'''
return [(name, getattr(self, name)) for name in self._attribute_names]
def create_context(self, share):
'''Create a GL context that satisfies this configuration.
:Parameters:
`share` : `Context`
If not None, a context with which to share objects with.
:rtype: `Context`
:return: The new context.
'''
raise ConfigException(
'This config is not complete. Use Screen.get_matching_configs')
def is_complete(self):
'''Determine if this config is complete and able to create a context.
Configs created directly are not complete, they can only serve
as templates for retrieving a supported config from the system.
For example, `pyglet.window.Screen.get_matching_configs` returns
complete configs.
:rtype: bool
:return: True if the config is complete and can create a context.
'''
return False
def __repr__(self):
import pprint
return '%s(%s)' % (self.__class__.__name__,
pprint.pformat(self.get_gl_attributes()))
class ObjectSpace(object):
def __init__(self):
# Textures and buffers scheduled for deletion the next time this
# object space is active.
self._doomed_textures = []
self._doomed_buffers = []
class Context(object):
'''OpenGL context for drawing.
Windows in pyglet each have their own GL context. This class boxes
the context in a platform-independent manner. Applications will have
no need to deal with contexts directly.
:Ivariables:
`object_space` : `ObjectSpace`
An object which is shared between all contexts that share
GL objects.
'''
#: Context share behaviour indicating that objects should not be
#: shared with existing contexts.
CONTEXT_SHARE_NONE = None
#: Context share behaviour indicating that objects are shared with
#: the most recently created context (the default).
CONTEXT_SHARE_EXISTING = 1
# Used for error checking, True if currently within a glBegin/End block.
# Ignored if error checking is disabled.
_gl_begin = False
# gl_info.GLInfo instance, filled in on first set_current
_info = None
# List of (attr, check) for each driver/device-specific workaround that is
# implemented. The `attr` attribute on this context is set to the result
# of evaluating `check(gl_info)` the first time this context is used.
_workaround_checks = [
# GDI Generic renderer on Windows does not implement
# GL_UNPACK_ROW_LENGTH correctly.
('_workaround_unpack_row_length',
lambda info: info.get_renderer() == 'GDI Generic'),
# Reportedly segfaults in text_input.py example.
# "ATI Radeon X1600 OpenGL Engine"
# glGenBuffers not exported by
# "ATI Radeon X1270 x86/MMX/3DNow!/SSE2"
('_workaround_vbo',
lambda info: info.get_renderer().startswith('ATI Radeon X')),
# Some ATI cards on OS X start drawing from a VBO before it's written
# to. In these cases pyglet needs to call glFinish() to flush the
# pipeline after updating a buffer but before rendering.
('_workaround_vbo_finish',
lambda info: ('ATI' in info.get_renderer() and
info.have_version(1, 5) and
_sys.platform == 'darwin')),
]
def __init__(self, context_share=None):
self.window = None
_contexts.append(self)
if context_share:
assert context_share in _contexts
self.object_space = context_share.object_space
else:
self.object_space = ObjectSpace()
def __repr__(self):
return '%s()' % self.__class__.__name__
def set_current(self):
global current_context
assert self in _contexts
current_context = self
# Implement workarounds
if not self._info:
self._info = gl_info.GLInfo()
self._info.set_active_context()
for attr, check in self._workaround_checks:
setattr(self, attr, check(self._info))
# Release textures on this context scheduled for deletion
if self.object_space._doomed_textures:
textures = self.object_space._doomed_textures
textures = (GLuint * len(textures))(*textures)
glDeleteTextures(len(textures), textures)
self.object_space._doomed_textures = []
# Release buffers on this context scheduled for deletion
if self.object_space._doomed_buffers:
buffers = self.object_space._doomed_buffers
buffers = (GLuint * len(buffers))(*buffers)
glDeleteBuffers(len(buffers), buffers)
self.object_space._doomed_buffers = []
def destroy(self):
'''Release the context.
The context will not be usable after being destroyed. Each platform
has its own convention for releasing the context and the buffer(s)
that depend on it in the correct order; this should never be called
by an application.
'''
global current_context
if current_context is self:
current_context = None
gl_info.remove_active_context()
# Switch back to shadow context.
if _shadow_window is not None:
_shadow_window.switch_to()
_contexts.remove(self)
def delete_texture(self, texture_id):
'''Safely delete a texture belonging to this context.
Usually, the texture is released immediately using
``glDeleteTextures``, however if another context that does not share
this context's object space is currently active, the deletion will
be deferred until an appropriate context is activated.
:Parameters:
`texture_id` : int
The OpenGL name of the texture to delete.
'''
if self.object_space is current_context.object_space:
id = GLuint(texture_id)
glDeleteTextures(1, id)
else:
self.object_space._doomed_textures.append(texture_id)
def delete_buffer(self, buffer_id):
'''Safely delete a buffer object belonging to this context.
This method behaves similarly to `delete_texture`, though for
``glDeleteBuffers`` instead of ``glDeleteTextures``.
:Parameters:
`buffer_id` : int
The OpenGL name of the buffer to delete.
:since: pyglet 1.1
'''
if self.object_space is current_context.object_space and False:
id = GLuint(buffer_id)
glDeleteBuffers(1, id)
else:
self.object_space._doomed_buffers.append(buffer_id)
class ContextException(Exception):
pass
class ConfigException(Exception):
pass
import pyglet as _pyglet
if _pyglet.options['debug_texture']:
_debug_texture_total = 0
_debug_texture_sizes = {}
_debug_texture = None
def _debug_texture_alloc(texture, size):
global _debug_texture_total
_debug_texture_sizes[texture] = size
_debug_texture_total += size
print '%d (+%d)' % (_debug_texture_total, size)
def _debug_texture_dealloc(texture):
global _debug_texture_total
size = _debug_texture_sizes[texture]
del _debug_texture_sizes[texture]
_debug_texture_total -= size
print '%d (-%d)' % (_debug_texture_total, size)
_glBindTexture = glBindTexture
def glBindTexture(target, texture):
global _debug_texture
_debug_texture = texture
return _glBindTexture(target, texture)
_glTexImage2D = glTexImage2D
def glTexImage2D(target, level, internalformat, width, height, border,
format, type, pixels):
try:
_debug_texture_dealloc(_debug_texture)
except KeyError:
pass
if internalformat in (1, GL_ALPHA, GL_INTENSITY, GL_LUMINANCE):
depth = 1
elif internalformat in (2, GL_RGB16, GL_RGBA16):
depth = 2
elif internalformat in (3, GL_RGB):
depth = 3
else:
depth = 4 # Pretty crap assumption
size = (width + 2 * border) * (height + 2 * border) * depth
_debug_texture_alloc(_debug_texture, size)
return _glTexImage2D(target, level, internalformat, width, height,
border, format, type, pixels)
_glDeleteTextures = glDeleteTextures
def glDeleteTextures(n, textures):
if not hasattr(textures, '__len__'):
_debug_texture_dealloc(textures.value)
else:
for i in range(n):
_debug_texture_dealloc(textures[i].value)
return _glDeleteTextures(n, textures)
def _create_shadow_window():
global _shadow_window
import pyglet
if not pyglet.options['shadow_window'] or _is_epydoc:
return
from pyglet.window import Window
_shadow_window = Window(width=1, height=1, visible=False)
_shadow_window.switch_to()
from pyglet import app
app.windows.remove(_shadow_window)
_shadow_window = None
# Import pyglet.window now if it isn't currently being imported (this creates
# the shadow window).
if (not _is_epydoc and
'pyglet.window' not in _sys.modules and
_pyglet.options['shadow_window']):
# trickery is for circular import
_pyglet.gl = _sys.modules[__name__]
import pyglet.window
| |
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides functions to build a large network modeling a carrier topology.
This is the target parameter optimization topology for the mapping algorithm.
The topology is based on WP3 - Service Provider Scenario for Optimization.ppt
by Telecom Italia (UNIFY SVN repo)
Parameter names are also based on the .ppt file.
"""
import logging
import math
import random
import string
import networkx as nx
from generator import NFFG
# Aggregation links (100Gbps) Connecting Distribution nodes to Aggregation Nodes
aggr_link = {'bandwidth': 1000, 'delay': 0.2}
log = logging.getLogger("TopoConstruct")
logging.basicConfig(level=logging.WARN,
format='%(levelname)s:%(name)s:%(message)s')
max_portids = {}
def add_port(obj, increment_port_ids=False):
# WARNING! this function is not thread safe!!
global max_portids
if not increment_port_ids:
port = obj.add_port()
else:
if obj in max_portids:
max_portids[obj] += 1
port = obj.add_port(id=max_portids[obj])
else:
max_portids[obj] = 1
port = obj.add_port(id=1)
return port
def getGenForName(prefix):
number = 0
while True:
yield prefix+str(number)
number += 1
prefixes = {}
def getName(prefix):
# WARNING! this function is not thread safe!!
global prefixes
while True:
if prefix in prefixes:
return prefixes[prefix].next()
else:
prefixes[prefix] = getGenForName(prefix)
return prefixes[prefix].next()
def addRedundantPairedConnection(nffg, an0, an1, bn0, bn1, p, linkres):
"""
Connects A-s to B-s and B-s to A-s with undirected links with linkres.
"""
nffg.add_undirected_link(add_port(an0, p), add_port(bn0, p), **linkres)
nffg.add_undirected_link(add_port(an0, p), add_port(bn1, p), **linkres)
nffg.add_undirected_link(add_port(an1, p), add_port(bn0, p), **linkres)
nffg.add_undirected_link(add_port(an1, p), add_port(bn1, p), **linkres)
def index_gen():
while True:
yield int(math.floor(random.random() * 100000))
def gen_params(l):
while True:
yield l[next(index_gen()) % len(l)]
def addRetailOrBusinessPart(nffg, an0, an1, p, popn, BNAS_PE,
Cpb, access_bw, part="R"):
"""
Retail and Business part inside one PoP is structurally the same.
"""
log.debug("Adding %s part for %s..."%(part, popn))
# add Distribution Nodes (100Gbps switching capacity)
dnres = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
dn0 = None
dn1 = None
if BNAS_PE > 0:
dn0 = nffg.add_infra(id=getName("dn"),
**dnres)
dn1 = nffg.add_infra(id=getName("dn"),
**dnres)
addRedundantPairedConnection(nffg, an0, an1, dn0, dn1, p, aggr_link)
# add BNAS or PE (10Gbps switching capacity)
# and connecting SAPs towards Retail Clients (links with BCT bandwidth)
for i in range(0, BNAS_PE):
log.debug("Adding switch %s for %s part..."%(i, part))
bnas_pe_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 100, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
if part == "R":
bnas_pe = nffg.add_infra(id=getName("bnas"),
**bnas_pe_res)
elif part == "B":
bnas_pe = nffg.add_infra(id=getName("pe"),
**bnas_pe_res)
else:
raise Exception("Invalid part identifier given for CarrierTopoBuilder"
".addRetailOrBusinessPart")
distr_link = {'bandwidth': 100, 'delay': 0.2}
#add Distribution Links towards Distribution Nodes
nffg.add_undirected_link(add_port(dn0, p), add_port(bnas_pe, p),
**distr_link)
nffg.add_undirected_link(add_port(dn1, p), add_port(bnas_pe, p),
**distr_link)
# add clients to current BNAS or PE
log.debug("Connecting %s SAPs to switch %s of %s part."%(Cpb, i, part))
for j in range(0, Cpb):
nameid = getName("sap")
sap = nffg.add_sap(id=nameid, name=nameid)
access_link = {'bandwidth': access_bw, 'delay': 0.5}
nffg.add_undirected_link(add_port(bnas_pe, p), add_port(sap, p),
**access_link)
def addCassis(nffg, fi0, fi1, p, cluster_id, chassis_id, popn,
SE, NF_types, SE_cores, SE_mem, SE_storage, CL_bw, CH_links):
log.debug("Adding Chassis no.%s with %s Servers for Cluster no.%s of %s."%
(chassis_id,SE,cluster_id,popn))
fabricext_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
fe0 = nffg.add_infra(id=getName("fe"), **fabricext_res)
fe1 = nffg.add_infra(id=getName("fe"), **fabricext_res)
# add links connecting the Fabric Interconnects and Fabric Extenders
for i in range(0, CH_links/2):
nffg.add_undirected_link(add_port(fi0, p), add_port(fe0, p),
bandwidth=float(CL_bw)/CH_links, delay=0.2)
nffg.add_undirected_link(add_port(fi1, p), add_port(fe1, p),
bandwidth=float(CL_bw)/CH_links, delay=0.2)
# add servers and connect them to Fabric Extenders
for s in range(0, SE):
server_res = {'cpu': next(gen_params(SE_cores)),
'mem': next(gen_params(SE_mem)),
'storage': next(gen_params(SE_storage)),
'delay': 0.5, 'bandwidth': 1000,
'infra_type': NFFG.TYPE_INFRA_EE}
server = nffg.add_infra(id=getName("host"),
**server_res)
# add supported types
server.add_supported_type(random.sample(NF_types,
(next(index_gen()) % len(NF_types)) + 1))
# connect servers to Fabric Extenders with 10Gbps links
server_link = {'bandwidth': 100, 'delay': 0.2}
nffg.add_undirected_link(add_port(server, p), add_port(fe0, p), **server_link)
nffg.add_undirected_link(add_port(server, p), add_port(fe1, p), **server_link)
def addCloudNFVPart(nffg, an0, an1, p, popn, CL, CH, SE, SAN_bw, SAN_storage,
NF_types, SE_cores, SE_mem, SE_storage, CL_bw, CH_links):
log.debug("Adding Cloud/NFV part for %s."%popn)
dnres = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
dn0 = nffg.add_infra(id=getName("dn"),
**dnres)
dn1 = nffg.add_infra(id=getName("dn"),
**dnres)
addRedundantPairedConnection(nffg, an0, an1, dn0, dn1, p, aggr_link)
# add Server Clusters
for i in range(0, CL):
log.debug("Adding Cluster no.%s to Could/NFV part of %s"%(i, popn))
fi_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
fabric_interconnect0 = nffg.add_infra(id=getName("fi"), **fi_res)
fabric_interconnect1 = nffg.add_infra(id=getName("fi"), **fi_res)
addRedundantPairedConnection(nffg, an0, an1, fabric_interconnect0,
fabric_interconnect1, p, aggr_link)
# NOTE: SAN can't host any VNFs now!!
# SAN is an Infra with big storage (internal bw should be big: e.g. 1Tbps)
san_res = {'cpu': 0, 'mem': 0, 'storage': SAN_storage, 'delay': 0.1,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_EE}
san = nffg.add_infra(id=getName("san"), **san_res)
# connect SAN to Fabric Interconnects
nffg.add_undirected_link(add_port(san, p), add_port(fabric_interconnect0, p),
bandwidth=SAN_bw, delay=0.1)
nffg.add_undirected_link(add_port(san, p), add_port(fabric_interconnect1, p),
bandwidth=SAN_bw, delay=0.1)
# add Chassis
for j in range(0, CH):
addCassis(nffg, fabric_interconnect0, fabric_interconnect1, p, i, j, popn,
SE, NF_types, SE_cores, SE_mem, SE_storage, CL_bw, CH_links)
def addPoP(nffg, popcnt, backbonenode0, backbonenode1, p,
BNAS, RCpb, RCT,
PE, BCpb, BCT,
CL, CH, SE, SAN_bw, SAN_storage, NF_types,
SE_cores, SE_mem, SE_storage, CL_bw, CH_links):
"""
Create one PoP which consists of three domains:
- Cloud/NFV services
- Retail Edge
- Business Edge
Backbone nodes where the Aggregation Nodes should be connected.
BNAS: number of BNAS nodes (~2-10)
RCpb: number of Retail Clients per BNAS box (~40k)
RCT: traffic per Retail Clients (0.1-0.2 Mbps)
PE: number of PE nodes per PoP (~2-8)
BCpb: number of business clients oer PE box (~4k)
BCT: traffic per Business Clients (0.1-0.2 Mbps)
CL: number of clusters in Cloud/NFV part (?)
CH: number of Chassis per cluster (~8-40)
SE: number of Servers per chassis (~8)
SAN_bw: Cluster bandwith to SAN (160Gbps - 1.6Tbps)
SAN_storage: storage of one SAN (?)
NF_types: list of supported NF types on the servers
SE_cores: list of numbers of cores per server (~8-16)
SE_mem: list of memory capacities per server (~32000MB - 64000MB)
SE_storage: list of storage capacities per server (~300GB - 1500GB)
CL_bw: cluster bandwidth to servers per Chassis (~40Gbps - 160Gbps)
CH_links: number of uplinks per Chassis (~4-16)
NOTE: Link bw from Fabric Extender to Fabric Interc. equals
CL_bw/CH_links (~10Gbps).
"""
popn = "PoP"+str(popcnt)
log.debug("Adding PoP %s..."%popcnt)
# add Aggregation Nodes (1Tbps switching capacity)
anres = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
an0 = nffg.add_infra(id=getName("an"), **anres)
an1 = nffg.add_infra(id=getName("an"), **anres)
# add uplinks to the Backbone
nffg.add_undirected_link(add_port(an0, p), add_port(backbonenode0, p),
bandwidth=1000, delay=0.1)
nffg.add_undirected_link(add_port(an1, p), add_port(backbonenode1, p),
bandwidth=1000, delay=0.1)
addRetailOrBusinessPart(nffg, an0, an1, p, popn, BNAS, RCpb, RCT)
addRetailOrBusinessPart(nffg, an0, an1, p, popn, PE, BCpb, BCT,
part="B")
if CL > 0:
addCloudNFVPart(nffg, an0, an1, p, popn, CL, CH, SE, SAN_bw, SAN_storage,
NF_types, SE_cores, SE_mem, SE_storage, CL_bw, CH_links)
return
def getCarrierTopo(params, increment_port_ids=False):
"""
Construct the core network and add PoPs with their parameters.
params is a list of dictionaries with PoP data:
'Retail': (BNAS, RCpb, RCT)
'Business': (PE, BCpb, BCT)
'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto,
CL_bw, CH_links)
WARNING: using this function with increment_port_ids=True this function is not
thread safe, because it uses global variable then!
"""
# This initializes the random generator always to the same value, so the
# returned index sequence, and thus the network parameters will be generated
# always the same (we want a fixed network environment)
# The generated identifiers are still different between genereations, but
# those does not influence the mapping process
random.seed(0)
popcnt = 0
nffg = NFFG(id="CarrierTopo")
p = increment_port_ids
backbone_res = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 10000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
bn0 = nffg.add_infra(id=getName("bn"), **backbone_res)
bn1 = nffg.add_infra(id=getName("bn"), **backbone_res)
bn2 = nffg.add_infra(id=getName("bn"), **backbone_res)
bn3 = nffg.add_infra(id=getName("bn"), **backbone_res)
nffg.add_undirected_link(add_port(bn0, p), add_port(bn1, p), bandwidth=1000,
delay=10)
nffg.add_undirected_link(add_port(bn1, p), add_port(bn2, p), bandwidth=1000,
delay=10)
nffg.add_undirected_link(add_port(bn2, p), add_port(bn3, p), bandwidth=1000,
delay=10)
nffg.add_undirected_link(add_port(bn3, p), add_port(bn0, p), bandwidth=1000,
delay=10)
backbones = (bn0, bn1, bn2, bn3)
bnlen = len(backbones)
for popdata in params:
tmp = []
tmp.extend(popdata['Retail'])
tmp.extend(popdata['Business'])
tmp.extend(popdata['CloudNFV'])
addPoP(nffg, popcnt, backbones[popcnt%bnlen], backbones[(popcnt+1)%bnlen],
p, *tmp)
popcnt += 1
"""
# BNAS,RCpb, RCT, PE,BCpb, BCT, CL,CH,SE, SAN_bw,
addPoP(nffg, bn2, bn3, 2, 10000, 0.2, 2, 4000, 0.2, 2, 8, 8, 160000,
# SAN_sto,NF_types, SE_cores, SE_mem, SE_sto, CL_bw, CH_links
100000, ['A','B'], [8,12,16], [32000,64000], [150], 40000, 4)
# BNAS, RCpb, RCT, PE,BCpb, BCT, CL,CH, SE, SAN_bw,
addPoP(nffg, bn1, bn2, 10, 40000, 0.2, 8, 4000, 0.2, 4, 40, 8, 160000,
# SAN_sto,NF_types, SE_cores, SE_mem, SE_sto,
100000, ['A','B','C','D','E'],[8,12,16], [32000,64000], [150,200],
# CL_bw, CH_links
80000, 8)
"""
log.debug("Carrier topology construction finished!")
return nffg
def getMediumTopo():
"""
Constructs a medium sized topology for worst case presentation, if the bigger
would take too long to finish. Its size is around 12 460 nodes
(~4500 with SAP cutting)
"""
topoparams = []
# params of one PoP
# 'Retail': (BNAS, RCpb, RCT)
# 'Business': (PE, BCpb, BCT)
# 'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto,
# CL_bw, CH_links)
topoparams.append({'Retail': (6, 250, 0.2), 'Business': (6, 200, 0.2),
'CloudNFV': (4, 40, 8, 160000, 100000, ['A','B','C'],
[4,8,16], [32000, 64000], [100,150], 40000, 4)})
topoparams.append({'Retail': (6, 100, 0.2), 'Business': (4, 200, 0.2),
'CloudNFV': (4, 20, 8, 160000, 100000, ['A','B'],
[8,12,16], [32000,64000], [150], 40000, 4)})
topoparams.append({'Retail': (3, 400, 0.2), 'Business': (8, 100, 0.2),
'CloudNFV': (4, 30, 8, 160000, 100000, ['B', 'C'],
[4,8,12,16], [32000,64000], [200], 40000, 4)})
topoparams.append({'Retail': (10, 100, 0.2), 'Business': (8, 150, 0.2),
'CloudNFV': (4, 40, 8, 160000, 100000, ['B', 'C'],
[4,8,12,16], [32000,64000], [200], 40000, 4)})
return getCarrierTopo(topoparams), topoparams
def getSmallTopo():
"""
Constructs a small topology which is structurally similar to carrier topology,
but could be executed fast enough for testing.
"""
topoparams = []
# params of one PoP
# 'Retail': (BNAS, RCpb, RCT)
# 'Business': (PE, BCpb, BCT)
# 'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto,
# CL_bw, CH_links)
topoparams.append({'Retail': (2, 250, 0.2), 'Business': (2, 100, 0.2),
'CloudNFV': (2, 4, 8, 160000, 100000, ['A','B','C'],
[4,8,16], [32000], [100,150], 40000, 4)})
topoparams.append({'Retail': (2, 250, 0.2), 'Business': (2, 150, 0.2),
'CloudNFV': (2, 2, 8, 160000, 100000, ['A','B'],
[8,12,16], [32000,64000], [150], 40000, 4)})
# topoparams.append({'Retail': (2, 4000, 0.2), 'Business': (8, 2000, 0.2),
# 'CloudNFV': (2, 40, 8, 160000, 100000, ['B', 'C'],
# [4,8,12,16], [32000,64000], [200], 40000, 4)})
return getCarrierTopo(topoparams), topoparams
def getMicroTopo():
topoparams = []
topoparams.append({'Retail': (2, 50, 0.2), 'Business': (2, 30, 0.2),
'CloudNFV': (2, 2, 4, 160000, 100000, ['A','B'],
[8,12,16], [32000,64000], [150], 40000, 4)})
topoparams.append({'Retail': (2, 50, 0.2), 'Business': (2, 30, 0.2),
'CloudNFV': (2, 2, 4, 160000, 100000, ['A','B', 'C'],
[8,12,16], [32000,64000], [150], 40000, 4)})
return getCarrierTopo(topoparams), topoparams
def getNanoTopo():
topoparams = []
topoparams.append({'Retail': (1, 2, 10), 'Business': (1, 2, 10),
'CloudNFV': (1, 2, 4, 1000, 100000, ['A', 'B', 'C'],
[8,12,16], [32000,64000], [150], 4000, 4)})
"""
topoparams.append({'Retail': (2, 5, 0.2), 'Business': (2, 5, 0.2),
'CloudNFV': (1, 2, 4, 160000, 100000, ['A','B', 'C'],
[8,12,16], [32000,64000], [150], 4000, 4)})
"""
return getCarrierTopo(topoparams, increment_port_ids=True), topoparams
def getPicoTopo():
"""
Not carrier style topo. Few nodes with big resources.
"""
random.seed(0)
nffg = NFFG(id="SmallExampleTopo")
switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 100, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
sw = nffg.add_infra(id = getName("sw"), **switch)
infra = {'cpu': 90, 'mem': 32000, 'storage': 200, 'delay': 1.0,
'bandwidth': 1000, 'infra_type': NFFG.TYPE_INFRA_EE}
linkres = {'bandwidth': 100, 'delay': 0.5}
inf1 = nffg.add_infra(id = getName("infra"), **infra)
inf0 = inf1
inf1.add_supported_type(list(string.ascii_uppercase)[:10])
for i in range(0,4):
if i == 3:
inf2 = inf0
else:
inf2 = nffg.add_infra(id = getName("infra"), **infra)
inf2.add_supported_type(list(string.ascii_uppercase)[:10])
nameid = getName("sap")
sap = nffg.add_sap(id = nameid, name = nameid)
# add links
nffg.add_undirected_link(sw.add_port(), inf2.add_port(), **linkres)
nffg.add_undirected_link(inf1.add_port(), inf2.add_port(), **linkres)
nffg.add_undirected_link(inf2.add_port(), sap.add_port(id=1), **linkres)
inf1 = inf2
return nffg
def getSNDlib_dfn_gwin(gwin_path = "dfn-gwin.gml", save_to_file=False,
gen_sap_names=False,
abc_nf_type_num=10, edge_computing=False,
edge_and_core_computing=False):
"""
Topology taken from SNDlib, dfn-gwin.
:type edge_and_core_computing: only has effect when edge_computing is True
"""
random.seed(0)
gwin = nx.read_gml(gwin_path)
nffg = NFFG(id="dfn-gwin")
nf_types = list(string.ascii_uppercase)[:abc_nf_type_num]
switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
infrares = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_EE}
corelinkres = {'bandwidth': 10000, 'delay': 1.0}
aggrlinkres = {'bandwidth': 1000, 'delay': 5.0}
acclinkres = {'bandwidth': 100, 'delay': 1.0}
gwinnodes = []
for n in gwin.nodes_iter():
gwinnodes.append(n.rstrip('.'))
# get topology from dfn-gwin
for n in gwinnodes:
nffg.add_infra(id=n, **switch)
for i,j in gwin.edges_iter():
nffg.add_undirected_link(nffg.network.node[i.rstrip('.')].add_port(),
nffg.network.node[j.rstrip('.')].add_port(),
**corelinkres)
if not edge_computing:
# add cloud nodes to 6 random nodes.
nodeset1 = random.sample(gwinnodes, 3)
nodeset1.extend(random.sample(gwinnodes, 3))
for n in nodeset1:
infra = nffg.add_infra(id=getName(n+"Host"), **infrares)
infra.add_supported_type(random.sample(nf_types, 6))
nffg.add_undirected_link(nffg.network.node[n].add_port(), infra.add_port(),
**corelinkres)
elif edge_and_core_computing:
# add only 2 hosts to the core and the rest distributed in the
# access switches
for n in random.sample(gwinnodes, 2):
infra = nffg.add_infra(id=getName(n+"Host"), **infrares)
infra.add_supported_type(random.sample(nf_types, 6))
nffg.add_undirected_link(nffg.network.node[n].add_port(), infra.add_port(),
**corelinkres)
# distribute the other 4 data center's computing power among 6 nodes
# connected to the access switches
infrares = {'cpu': 267, 'mem': 213333, 'storage': 1000, 'delay': 1.0,
'bandwidth': 26667, 'infra_type': NFFG.TYPE_INFRA_EE}
nodeset2 = random.sample(gwinnodes, 3)
nodeset2.extend(random.sample(gwinnodes, 3))
# add access switches to 6 random nodes
for n in nodeset2:
sw = nffg.add_infra(id=getName(n+"Sw"), **switch)
nffg.add_undirected_link(nffg.network.node[n].add_port(), sw.add_port(),
**aggrlinkres)
for i in xrange(0,random.randint(3,4)):
if gen_sap_names:
nameid = getName("sap")
else:
nameid = getName(n+"SAP")
sap = nffg.add_sap(id=nameid, name=nameid)
nffg.add_undirected_link(sap.add_port(id=1), sw.add_port(), **acclinkres)
if edge_computing:
number_of_added_hosts = 0
# add hosts to the 6 access switches available
for n in filter(lambda i: "Sw" in i.id, [i for i in nffg.infras]):
infra = nffg.add_infra(id=getName(n.id + "Host"), **infrares)
infra.add_supported_type(random.sample(nf_types, 6))
nffg.add_undirected_link(nffg.network.node[n.id].add_port(),
infra.add_port(),
**corelinkres)
# save it to file
if save_to_file:
augmented_gwin = nx.MultiDiGraph()
augmented_gwin.add_nodes_from(nffg.network.nodes_iter())
augmented_gwin.add_edges_from(nffg.network.edges_iter())
nx.write_gml(augmented_gwin, "augmented-dfn-gwin.gml")
return nffg
def getFatTreeTopo(abc_nf_type_num=10, save_to_file=False):
"""
Constructs a data center fat tree topology, with similar values as
SNDLib gwin.
:param gen_sap_names:
:param abc_nf_type_num:
:param save_to_file:
:return:
"""
random.seed(1)
nf_types = list(string.ascii_uppercase)[:abc_nf_type_num]
nffg = NFFG(id="FatTree")
switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
infrares = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_EE}
corelinkres = {'bandwidth': 10000, 'delay': 1.0}
aggrlinkres = {'bandwidth': 1000, 'delay': 5.0}
acclinkres = {'bandwidth': 100, 'delay': 1.0}
# add core switches
core_sw = []
for i in nx.complete_graph(4):
core_sw.append(nffg.add_infra(id='CoreSw'+str(i), **switch))
for i,j in nx.complete_graph(4).edges():
nffg.add_undirected_link(core_sw[i].add_port(id=getName('p'+core_sw[i].id)),
core_sw[j].add_port(id=getName('p'+core_sw[j].id)),
**corelinkres)
# add aggregation switches
aggr_sw = []
for i in xrange(0,6):
aggr_sw.append(nffg.add_infra(id='AggrSw'+str(i), **switch))
for sw_c in core_sw:
for sw_a in random.sample(aggr_sw, 4):
nffg.add_undirected_link(sw_c.add_port(id=getName('p'+sw_c.id)),
sw_a.add_port(id=getName('p'+sw_a.id)),
**aggrlinkres)
# add sap nodes with one connecting switch each
for i in xrange(0,19):
nameid = getName('SAP')
sap = nffg.add_sap(id=nameid, name=nameid)
access_sw = nffg.add_infra(id='AccessSw'+str(i), **switch)
# connect the SAP to its Switch to eliminate SAP link bottleneck.
# WARNING: in some NFFG environments SAPs should only have one port, in
# others this port must have ID 1.
nffg.add_undirected_link(sap.add_port(id=1),
access_sw.add_port(id=getName('p'+access_sw.id)),
**corelinkres)
for sw_a in random.sample(aggr_sw, 6):
nffg.add_undirected_link(access_sw.add_port(id=getName('p'+access_sw.id)),
sw_a.add_port(id=getName('p'+sw_a.id)),
**acclinkres)
# add hosts: 2 connected to core and 4 connected to aggregation switches
for number, sw_list in zip((2, 4), (core_sw, aggr_sw)):
for i, sw in zip(xrange(0,number), random.sample(sw_list, number)):
infra = nffg.add_infra(id=getName('Host'), **infrares)
infra.add_supported_type(random.sample(nf_types, 6))
nffg.add_undirected_link(sw.add_port(id=getName('p'+sw.id)),
infra.add_port(id=getName('p'+infra.id)),
**corelinkres)
if save_to_file:
fat_tree = nx.MultiDiGraph()
fat_tree.add_nodes_from(nffg.network.nodes_iter())
fat_tree.add_edges_from(nffg.network.edges_iter())
nx.write_gml(fat_tree, "fat-tree-data-center.gml")
return nffg
def getSpineLeafTopology(abc_nf_type_num=10, save_to_file=False):
"""
Constructs a data center fat tree topology, with similar values as
SNDLib gwin.
:param gen_sap_names:
:param abc_nf_type_num:
:param save_to_file:
:return:
"""
random.seed(1)
nf_types = list(string.ascii_uppercase)[:abc_nf_type_num]
nffg = NFFG(id="SpineLeaf")
switch = {'cpu': 0, 'mem': 0, 'storage': 0, 'delay': 0.5,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_SDN_SW}
infrares = {'cpu': 400, 'mem': 320000, 'storage': 1500, 'delay': 1.0,
'bandwidth': 40000, 'infra_type': NFFG.TYPE_INFRA_EE}
corelinkres = {'bandwidth': 10000, 'delay': 1.0}
# add core switches
core_sw = []
for i in range(0,2):
core_sw.append(nffg.add_infra(id='CoreSw'+str(i), **switch))
nffg.add_undirected_link(core_sw[0].add_port(id=getName('p' + core_sw[0].id)),
core_sw[1].add_port(id=getName('p' + core_sw[1].id)),
**corelinkres)
spine_sw = []
for i in range(0,2):
spine_sw.append(nffg.add_infra(id='SpineSw'+str(i), **switch))
cleaf_sw = []
for i in range(0, 2):
cleaf_sw.append(nffg.add_infra(id='CoreLeafSw' + str(i), **switch))
for cleaf in cleaf_sw:
for spine in spine_sw:
nffg.add_undirected_link(spine.add_port(id=getName('p' + spine.id)),
cleaf.add_port(id=getName('p' + cleaf.id)),
**corelinkres)
for core in core_sw:
nffg.add_undirected_link(core.add_port(id=getName('p' + core.id)),
cleaf.add_port(id=getName('p' + cleaf.id)),
**corelinkres)
# add sap nodes with one connecting switch each
for i in xrange(0,19):
nameid = getName('SAP')
sap = nffg.add_sap(id=nameid, name=nameid)
sap_helper = nffg.add_infra(id='SAPHelperSw'+str(i), **switch)
# connect the SAP to its Switch to eliminate SAP link bottleneck.
# WARNING: in some NFFG environments SAPs should only have one port, in
# others this port must have ID 1.
nffg.add_undirected_link(sap.add_port(id=1),
sap_helper.add_port(id=getName('p'+sap_helper.id)),
**corelinkres)
# ~half of the SAPs are connected to one core, half to the other core.
if i < 10:
spine_idx = 0
else:
spine_idx = 1
nffg.add_undirected_link(sap_helper.add_port(id=getName('p' + sap_helper.id)),
core_sw[spine_idx].add_port(
id=getName('p' + core_sw[spine_idx].id)),
**corelinkres)
# connect all the hosts to both SpineSw through an AccessSw
for i in range(0,6):
infra = nffg.add_infra(id=getName('Host'), **infrares)
infra.add_supported_type(random.sample(nf_types, 6))
sw = nffg.add_infra(id='AccessSw'+str(i), **switch)
nffg.add_undirected_link(sw.add_port(id=getName('p' + sw.id)),
infra.add_port(id=getName('p' + infra.id)),
**corelinkres)
for spine in spine_sw:
nffg.add_undirected_link(sw.add_port(id=getName('p'+sw.id)),
spine.add_port(id=getName('p'+spine.id)),
**corelinkres)
if save_to_file:
spine_leaf = nx.MultiDiGraph()
spine_leaf.add_nodes_from(nffg.network.nodes_iter())
spine_leaf.add_edges_from(nffg.network.edges_iter())
nx.write_gml(spine_leaf, "spine-leaf-data-center.gml")
return nffg
if __name__ == '__main__':
topoparams = []
# params of one PoP
# 'Retail': (BNAS, RCpb, RCT)
# 'Business': (PE, BCpb, BCT)
# 'CloudNFV': (CL,CH,SE,SAN_bw,SAN_sto,NF_types,SE_cores,SE_mem,SE_sto,
# CL_bw, CH_links)
# print getSNDlib_dfn_gwin().dump() NOT WORKING.
topoparams.append({'Retail': (2, 10000, 0.2), 'Business': (2, 8000, 0.2),
'CloudNFV': (2, 8, 8, 160000, 100000, ['A','B','C'],
[4,8,16], [32000], [100,150], 40000, 4)})
topoparams.append({'Retail': (2, 10000, 0.2), 'Business': (4, 4000, 0.2),
'CloudNFV': (2, 8, 8, 160000, 100000, ['A','B'],
[8,12,16], [32000,64000], [150], 40000, 4)})
# topoparams.append({'Retail': (2, 20000, 0.2), 'Business': (8, 4000, 0.2),
# 'CloudNFV': (2, 40, 8, 160000, 100000, ['B', 'C'],
# [4,8,12,16], [32000,64000], [200], 40000, 4)})
# topo = getCarrierTopo(topoparams)
# print topo.dump()
with open("augmented-dfn-gwin.nffg", "w") as f:
f.write(getSNDlib_dfn_gwin(abc_nf_type_num=10, gen_sap_names=False,
save_to_file=True, edge_computing=True,
edge_and_core_computing=True).dump())
#
# with open("fat-tree-data-center.nffg", "w") as f:
# f.write(getFatTreeTopo(save_to_file=True).dump())
# with open("spine-leaf-data-center.nffg", "w") as f:
# f.write(getSpineLeafTopology(save_to_file=True).dump())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/letsencrypt.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import logging
import os
import re
from king_phisher import startup
from king_phisher.server.database import storage as db_storage
logger = logging.getLogger('KingPhisher.LetsEncrypt')
LETS_ENCRYPT_DEFAULT_DATA_PATH = '/etc/letsencrypt'
"""The default path at which Let's Encrypt data is stored."""
_HOSTNAME_DIRECTORY_REGEX = re.compile(r'^(?P<hostname>[a-z0-9][a-z0-9-]*(\.[a-z0-9-]+)*\.[a-z]+)(-(?P<index>\d+))?$', re.IGNORECASE)
_sni_hostnames = db_storage.KeyValueStorage(namespace='server.ssl.sni.hostnames', order_by='key')
SNIHostnameConfiguration = collections.namedtuple('SNIHostnameConfiguration', ('certfile', 'keyfile', 'enabled'))
"""
The information for a certificate used by the server's SSL Server Name Indicator
(SNI) extension.
.. py:attribute:: certfile
The path to the SSL certificate file on disk to use for the hostname.
.. py:attribute:: keyfile
The path to the SSL key file on disk to use for the hostname.
.. py:attribute:: enabled
Whether or not this configuration is set to be loaded by the server.
"""
def _check_files(*file_paths):
return all(os.path.isfile(file_path) and os.access(file_path, os.R_OK) for file_path in file_paths)
def _get_files(directory, hostname):
if os.path.isdir(os.path.join(directory, hostname)):
directory = os.path.join(directory, hostname)
else:
# certbot will append digits to the end of a directory to avoid naming conflicts, so find the highest index
index_str = None
for subdirectory in os.listdir(directory):
match = _HOSTNAME_DIRECTORY_REGEX.match(subdirectory)
if match is None or match.group('hostname') != hostname or not match.group('index'):
continue
if index_str is None or int(match.group('index')) > int(index_str):
index_str = match.group('index')
if index_str is None:
return None, None
directory = os.path.join(directory, hostname + '-' + index_str)
cert_path = os.path.join(directory, 'fullchain.pem')
if not _check_files(cert_path):
cert_path = None
key_path = os.path.join(directory, 'privkey.pem')
if not _check_files(key_path):
key_path = None
return cert_path, key_path
def _run_certbot(args, bin_path=None):
bin_path = bin_path or get_certbot_bin_path()
if bin_path is None:
return FileNotFoundError('the certbot binary could not be found')
args = (bin_path,) + tuple(args)
return startup.run_process(args)
def _sync_hostnames(unified_directory):
directory = os.path.join(unified_directory, 'etc', 'live')
if not os.path.isdir(directory):
logger.warning('can not enumerate available letsencrypt data (directory not found)')
return
if not os.access(directory, os.R_OK | os.X_OK):
logger.warning('can not enumerate available letsencrypt data (invalid permissions)')
return
for subdirectory in os.listdir(directory):
match = _HOSTNAME_DIRECTORY_REGEX.match(subdirectory)
if match is None:
continue
hostname = match.group('hostname')
if hostname in _sni_hostnames:
continue
certfile, keyfile = _get_files(directory, match.group('hostname'))
if not (certfile and keyfile):
continue
set_sni_hostname(hostname, certfile, keyfile)
def certbot_issue(webroot, hostname, bin_path=None, unified_directory=None):
"""
Issue a certificate using Let's Encrypt's ``certbot`` utility. This function
wraps the ``certbot`` binary and configures the parameters as appropriate.
By default, the resulting certificate will be placed under
:py:data:`.LETS_ENCRYPT_DEFAULT_DATA_PATH`, however if *unified_directory*
is used then it will be under ``$unified_directory/etc``.
:param str webroot: The webroot to use while requesting the certificate.
:param str hostname: The hostname of the certificate to request.
:param str bin_path: The optional path to the ``certbot`` binary. If not
specified, then it will be searched for utilizing
:py:func:`~king_phisher.startup.which`.
:param str unified_directory: A single directory under which all the Let's
Encrypt data should be stored. This is useful when not running the
utility as root.
:return: The exit status of the ``certbot`` utility.
:rtype: int
"""
args = ['certonly']
if unified_directory:
args.extend(['--config-dir', os.path.join(unified_directory, 'etc')])
args.extend(['--logs-dir', os.path.join(unified_directory, 'log')])
args.extend(['--work-dir', os.path.join(unified_directory, 'lib')])
args.extend(['--webroot', '--webroot-path', webroot, '-d', hostname])
proc = _run_certbot(args, bin_path=bin_path)
return proc.status
def get_certbot_bin_path(config=None):
"""
Get the path to Let's Encrypt's ``certbot`` command line utility. If the
path is found, it is verified to be both a file and executable. If the
path verification fails, ``None`` is returned.
.. versionadded:: 1.14.0
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: The path to the certbot binary.
:rtype: str
"""
if config:
letsencrypt_config = config.get_if_exists('server.letsencrypt', {})
else:
letsencrypt_config = {}
bin_path = letsencrypt_config.get('certbot_path') or startup.which('certbot')
if bin_path is None:
return None
if not os.path.isfile(bin_path):
return None
if not os.access(bin_path, os.R_OK | os.X_OK):
return None
return bin_path
def get_sni_hostname_config(hostname, config=None):
"""
Search for and return the SNI configuration for the specified *hostname*.
This method will first check to see if the entry exists in the database
before searching the Let's Encrypt data directory (if ``data_path`` is
present in the server configuration). If no configuration data is found, or
the data file paths appear invalid, ``None`` is returned.
:param str hostname: The hostname to retrieve the configuration for.
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:return: The SNI configuration for the hostname if it was found.
:rtype: :py:class:`.SNIHostnameConfiguration`
"""
unified_directory = config.get_if_exists('server.letsencrypt.data_path') if config else None
if unified_directory:
_sync_hostnames(unified_directory)
sni_config = _sni_hostnames.get(hostname)
if not sni_config:
return None
if not _check_files(sni_config['certfile'], sni_config['keyfile']):
return None
return SNIHostnameConfiguration(**sni_config)
def get_sni_hostnames(config=None, check_files=True):
"""
Retrieve all the hostnames for which a valid SNI configuration can be
retrieved. These are the hostnames for which SNI can be enabled. If
*check_files* is enabled, the data files will be checked to ensure that they
exist and are readable, else the configuration will be omitted.
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param bool check_files: Whether or not to check the referenced data files.
:return: A dictionary, keyed by hostnames with values of :py:class:`.SNIHostnameConfiguration` instances.
:rtype: dict
"""
unified_directory = config.get_if_exists('server.letsencrypt.data_path') if config else None
if unified_directory:
_sync_hostnames(unified_directory)
hostnames = collections.OrderedDict()
for hostname, sni_config in _sni_hostnames.items():
if check_files and not _check_files(sni_config['certfile'], sni_config['keyfile']):
continue
hostnames[hostname] = SNIHostnameConfiguration(**sni_config)
return hostnames
def set_sni_hostname(hostname, certfile, keyfile, enabled=False):
"""
Set the SNI configuration for the specified *hostname*. This information can
then later be retrieved with either :py:func:`get_sni_hostname_config` or
:py:func:`get_sni_hostnames`.
:param str hostname: The hostname associated with the configuration data.
:param str certfile: The path to the certificate file on disk.
:param str keyfile: The path to the key file on disk.
:param bool enabled: Whether or not this SNI configuration is loaded in the server.
"""
_sni_hostnames[hostname] = {'certfile': os.path.abspath(certfile), 'keyfile': os.path.abspath(keyfile), 'enabled': enabled}
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import re
import time
from google.appengine.api import urlfetch
import webapp2
from base import bigquery
from base import constants
from common import buildbot
class Builds(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(120)
bq = bigquery.BigQuery()
current_events = []
events = []
for master_name in constants.MASTER_NAMES:
builders = buildbot.Builders(master_name)
available_builds = _AvailableBuilds(builders)
recorded_builds = _RecordedBuilds(bq, builders, available_builds)
for builder in builders:
# Filter out recorded builds from available builds.
build_numbers = (available_builds[builder.name] -
recorded_builds[builder.name])
builder_current_events, builder_events = _TraceEventsForBuilder(
builder, build_numbers)
current_events += builder_current_events
events += builder_events
jobs = []
if current_events:
jobs += bq.InsertRowsAsync(
constants.DATASET, constants.CURRENT_BUILDS_TABLE,
current_events, truncate=True)
if events:
jobs += bq.InsertRowsAsync(constants.DATASET, constants.BUILDS_TABLE,
events)
for job in jobs:
bq.PollJob(job, 60 * 20) # 20 minutes.
def _AvailableBuilds(builders):
available_builds = {}
for builder in builders:
if not builder.cached_builds:
available_builds[builder.name] = frozenset()
continue
max_build = max(builder.cached_builds)
# Buildbot on tryserver.chromium.perf is occasionally including build 0 in
# its list of cached builds. That results in more builds than we want.
# Limit the list to the last 100 builds, because the urlfetch URL limit is
# 2048 bytes, and "&select=100000" * 100 is 1400 bytes.
builds = frozenset(build for build in builder.cached_builds
if build >= max_build - 100)
available_builds[builder.name] = builds
return available_builds
def _RecordedBuilds(bq, builders, available_builds):
# 105 days / 15 weeks. Must be some number greater than 100 days, because
# we request up to 100 builds (see above comment), and the slowest cron bots
# run one job every day.
start_time_ms = -1000 * 60 * 60 * 24 * 105
table = '%s.%s@%d-' % (constants.DATASET, constants.BUILDS_TABLE,
start_time_ms)
conditions = []
for builder in builders:
if not available_builds[builder.name]:
continue
max_build = max(available_builds[builder.name])
min_build = min(available_builds[builder.name])
conditions.append('WHEN builder = "%s" THEN build >= %d AND build <= %d' %
(builder.name, min_build, max_build))
query = (
'SELECT builder, build '
'FROM [%s] ' % table +
'WHERE CASE %s END ' % ' '.join(conditions) +
'GROUP BY builder, build'
)
query_result = bq.QuerySync(query, 600)
builds = collections.defaultdict(set)
for row in query_result:
builds[row['f'][0]['v']].add(int(row['f'][1]['v']))
return builds
def _TraceEventsForBuilder(builder, build_numbers):
if not build_numbers:
return (), ()
build_numbers_string = ', '.join(map(str, sorted(build_numbers)))
logging.info('Getting %s: %s', builder.name, build_numbers_string)
# Fetch build information and generate trace events.
current_events = []
events = []
builder_builds = builder.builds.Fetch(build_numbers)
query_time = time.time()
for build in builder_builds:
if build.complete:
events += _TraceEventsFromBuild(builder, build, query_time)
else:
current_events += _TraceEventsFromBuild(builder, build, query_time)
return current_events, events
def _TraceEventsFromBuild(builder, build, query_time):
match = re.match(r'(.+) \(([0-9]+)\)', builder.name)
if match:
configuration, host_shard = match.groups()
host_shard = int(host_shard)
else:
configuration = builder.name
host_shard = 0
# Build trace event.
if build.end_time:
build_end_time = build.end_time
else:
build_end_time = query_time
os, os_version, role = _ParseBuilderName(builder.master_name, builder.name)
yield {
'name': 'Build %d' % build.number,
'start_time': build.start_time,
'end_time': build_end_time,
'build': build.number,
'builder': builder.name,
'configuration': configuration,
'host_shard': host_shard,
'hostname': build.slave_name,
'master': builder.master_name,
'os': os,
'os_version': os_version,
'role': role,
'status': build.status,
'url': build.url,
}
# Step trace events.
for step in build.steps:
if not step.start_time:
continue
if step.name == 'steps':
continue
if step.end_time:
step_end_time = step.end_time
else:
step_end_time = query_time
yield {
'name': step.name,
'start_time': step.start_time,
'end_time': step_end_time,
'benchmark': step.name, # TODO(dtu): This isn't always right.
'build': build.number,
'builder': builder.name,
'configuration': configuration,
'host_shard': host_shard,
'hostname': build.slave_name,
'master': builder.master_name,
'os': os,
'os_version': os_version,
'role': role,
'status': step.status,
'url': step.url,
}
def _ParseBuilderName(master_name, builder_name):
if master_name == 'chromium.perf':
match = re.match(r'^([A-Za-z]+)(?: ([0-9\.]+|XP))?([A-Za-z0-9-\. ]+)? '
r'(Builder|Perf)(?: \([0-9]+\))?$', builder_name).groups()
os = match[0]
if match[1]:
os_version = match[1]
else:
os_version = None
if match[3] == 'Builder':
role = 'builder'
elif match[3] == 'Perf':
role = 'tester'
else:
raise NotImplementedError()
elif master_name == 'client.catapult':
match = re.match(r'^Catapult(?: ([A-Za-z])+)? ([A-Za-z]+)$',
builder_name).groups()
os = match[1]
os_version = None
role = match[0]
if not role:
role = 'tester'
elif master_name == 'tryserver.chromium.perf':
match = re.match(r'^(android|linux|mac|win).*_([a-z]+)$',
builder_name).groups()
os = match[0]
os_version = None
role = match[1]
elif master_name == 'tryserver.client.catapult':
match = re.match(r'^Catapult(?: (Android|Linux|Mac|Windows))? ([A-Za-z]+)$',
builder_name).groups()
os = match[0]
os_version = None
role = match[1]
else:
raise NotImplementedError()
if os:
os = os.lower()
if os == 'windows':
os = 'win'
if os_version:
os_version = os_version.lower()
role = role.lower()
return (os, os_version, role)
| |
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from sahara.plugins.mapr.domain import node_process as np
from sahara.plugins.mapr.services.management import management
from sahara.plugins.mapr.services.maprfs import maprfs
from sahara.plugins.mapr.services.oozie import oozie
from sahara.plugins.mapr.services.swift import swift
from sahara.plugins.mapr.services.yarn import yarn
import sahara.plugins.mapr.versions.v4_0_1_mrv2.context as cc
import sahara.plugins.mapr.versions.v4_0_1_mrv2.version_handler as handler
from sahara.plugins import provisioning as p
from sahara.tests.unit import base as b
from sahara.tests.unit import testutils as tu
class TestClusterContext(b.SaharaTestCase):
def __init__(self, *args, **kwds):
super(TestClusterContext, self).__init__(*args, **kwds)
self.fake_np = np.NodeProcess('fake', 'foo', 'bar')
def _get_context(self):
i1 = tu.make_inst_dict('id_1', 'instance_1', '1.1.1.1')
master_proc = [yarn.RESOURCE_MANAGER.ui_name,
yarn.NODE_MANAGER.ui_name,
yarn.HISTORY_SERVER.ui_name,
maprfs.CLDB.ui_name,
maprfs.FILE_SERVER.ui_name,
oozie.OOZIE.ui_name,
management.ZOOKEEPER.ui_name]
master_ng = tu.make_ng_dict('master', 'large', master_proc,
1, [i1])
cluster_configs = {
'Service': {
'key': 'value',
'Service Version': '1.1'}
}
cluster = tu.create_cluster(name='test_cluster', tenant='large',
plugin='mapr', version='4.0.1.mrv1',
node_groups=[master_ng],
cluster_configs=cluster_configs)
self.ng = cluster.node_groups[0]
self.instance = self.ng.instances[0]
return cc.Context(cluster, handler.VersionHandler().get_services())
def test_get_oozie_server_uri(self):
ctx = self._get_context()
self.assertEqual('http://1.1.1.1:11000/oozie', ctx.oozie_server_uri)
def test_oozie_server(self):
ctx = self._get_context()
self.assertIn(oozie.OOZIE.ui_name,
ctx.oozie_server.node_group.node_processes)
def test_oozie_http(self):
ctx = self._get_context()
self.assertEqual('1.1.1.1:11000', ctx.oozie_http)
def test_configure_sh(self):
ctx = self._get_context()
conf_sh = ctx.configure_sh
pattern = (r'^(\S+)\s+(-N (\S+))\s+(-C (\S+))\s+(-Z (\S+))\s+'
r'(-no-autostart)\s+(-f)\s+(-RM (\S+))\s(-HS (\S+))')
self.assertRegex(conf_sh, pattern)
self.assertIn('/opt/mapr/server/configure.sh', conf_sh)
self.assertIn('-C 1.1.1.1', conf_sh)
self.assertIn('-Z 1.1.1.1', conf_sh)
self.assertIn('-RM 1.1.1.1', conf_sh)
self.assertIn('-HS 1.1.1.1', conf_sh)
self.assertIn('-no-autostart', conf_sh)
self.assertIn('-N ' + ctx.cluster.name, conf_sh)
def test_get_cluster_config_value(self):
ctx = self._get_context()
conf = p.Config('key', 'Service', 'cluster')
self.assertEqual('value', ctx._get_cluster_config_value(conf))
not_set = p.Config('nonset', 'Service', 'cluster')
self.assertIsNone(ctx._get_cluster_config_value(not_set))
def test_get_instances(self):
ctx = self._get_context()
instances = ctx.get_instances()
self.assertEqual(1, len(instances))
rms1 = ctx.get_instances(yarn.RESOURCE_MANAGER)
self.assertEqual(1, len(rms1))
rms2 = ctx.get_instances(yarn.RESOURCE_MANAGER.ui_name)
self.assertEqual(1, len(rms2))
not_existing_1 = ctx.get_instances(self.fake_np)
self.assertEqual(0, len(not_existing_1))
not_existing_2 = ctx.get_instances(self.fake_np.ui_name)
self.assertEqual(0, len(not_existing_2))
def test_get_instance(self):
ctx = self._get_context()
instance_1 = ctx.get_instance(yarn.RESOURCE_MANAGER)
self.assertIn(yarn.RESOURCE_MANAGER.ui_name,
instance_1.node_group.node_processes)
instance_2 = ctx.get_instance(yarn.RESOURCE_MANAGER)
self.assertIn(yarn.RESOURCE_MANAGER.ui_name,
instance_2.node_group.node_processes)
self.assertIsNone(ctx.get_instance(self.fake_np))
def test_get_instances_ip(self):
ctx = self._get_context()
ip_list_1 = ctx.get_instances_ip(yarn.RESOURCE_MANAGER)
self.assertEqual(1, len(ip_list_1))
self.assertIn('1.1.1.1', ip_list_1)
ip_list_2 = ctx.get_instances_ip(yarn.RESOURCE_MANAGER.ui_name)
self.assertEqual(1, len(ip_list_2))
self.assertIn('1.1.1.1', ip_list_2)
empty_list = ctx.get_instances_ip(self.fake_np)
self.assertEqual(0, len(empty_list))
def test_get_instance_ip(self):
ctx = self._get_context()
ip_1 = ctx.get_instance_ip(yarn.RESOURCE_MANAGER)
self.assertEqual('1.1.1.1', ip_1)
ip_2 = ctx.get_instance_ip(yarn.RESOURCE_MANAGER.ui_name)
self.assertEqual('1.1.1.1', ip_2)
none_ip = ctx.get_instance_ip(self.fake_np)
self.assertIsNone(none_ip)
def test_get_zookeeper_nodes_ip_with_port(self):
ctx = self._get_context()
self.assertEqual('1.1.1.1:5181',
ctx.get_zookeeper_nodes_ip_with_port())
management.ZK_CLIENT_PORT = '0000'
self.assertEqual('1.1.1.1:0000',
ctx.get_zookeeper_nodes_ip_with_port())
def test_filter_instances(self):
ctx = self._get_context()
instances = ctx.get_instances()
rsmngs = ctx.filter_instances(instances, yarn.RESOURCE_MANAGER)
self.assertEqual(1, len(rsmngs))
not_existing_i = ctx.filter_instances(instances, self.fake_np)
self.assertEqual(0, len(not_existing_i))
def test_check_for_process(self):
ctx = self._get_context()
instance = ctx.get_instance(yarn.RESOURCE_MANAGER)
self.assertTrue(ctx.check_for_process(instance, yarn.RESOURCE_MANAGER))
self.assertTrue(ctx.check_for_process(instance,
yarn.RESOURCE_MANAGER.ui_name))
self.assertFalse(ctx.check_for_process(instance, maprfs.NFS))
self.assertFalse(ctx.check_for_process(instance, maprfs.NFS.ui_name))
def test_get_chosen_service_version(self):
ctx = self._get_context()
version = ctx.get_chosen_service_version('Service')
self.assertEqual('1.1', version)
def test_get_cluster_services(self):
pass
ctx = self._get_context()
actual_services = ctx.get_cluster_services()
actual_services_names = map(lambda s: s.ui_name, actual_services)
expected_services_names = [yarn.YARN().ui_name,
management.Management().ui_name,
maprfs.MapRFS().ui_name,
oozie.Oozie().ui_name,
swift.Swift().ui_name]
self.assertListEqual(sorted(actual_services_names),
sorted(expected_services_names))
def test_get_service(self):
ctx = self._get_context()
service = ctx.get_service(yarn.HISTORY_SERVER)
self.assertEqual(yarn.YARN().ui_name, service.ui_name)
with testtools.ExpectedException(ValueError):
ctx.get_service(self.fake_np)
def test_get_service_name_by_node_process(self):
ctx = self._get_context()
s_name_1 = ctx.get_service_name_by_node_process(yarn.RESOURCE_MANAGER)
self.assertEqual(s_name_1, yarn.YARN().ui_name)
s_name_2 = ctx.get_service_name_by_node_process(
yarn.RESOURCE_MANAGER.ui_name)
self.assertEqual(s_name_2, yarn.YARN().ui_name)
not_existing_np = np.NodeProcess('not_existing', 'NotExisting', 'foo')
self.assertIsNone(ctx.get_service_name_by_node_process(
not_existing_np))
self.assertIsNone(ctx.get_service_name_by_node_process(
not_existing_np.ui_name))
def test_get_instances_count(self):
ctx = self._get_context()
self.assertEqual(1, ctx.get_instances_count())
self.assertEqual(1, ctx.get_instances_count(yarn.RESOURCE_MANAGER))
self.assertEqual(1, ctx.get_instances_count(
yarn.RESOURCE_MANAGER.ui_name))
self.assertEqual(0, ctx.get_instances_count(self.fake_np))
self.assertEqual(0, ctx.get_instances_count(
self.fake_np.ui_name))
def test_get_node_groups(self):
ctx = self._get_context()
all_ngs = ctx.get_node_groups()
self.assertEqual(1, len(all_ngs))
self.assertEqual([self.ng], all_ngs)
rm_ngs_1 = ctx.get_node_groups(yarn.RESOURCE_MANAGER)
self.assertEqual(1, len(rm_ngs_1))
self.assertEqual([self.ng], rm_ngs_1)
rm_ngs_2 = ctx.get_node_groups(yarn.RESOURCE_MANAGER.ui_name)
self.assertEqual(1, len(rm_ngs_2))
self.assertEqual([self.ng], rm_ngs_2)
empty_ngs = ctx.get_node_groups(self.fake_np)
self.assertEqual(0, len(empty_ngs))
def test_get_cldb_nodes_ip(self):
ctx = self._get_context()
cldb_list_1 = ctx.get_cldb_nodes_ip()
self.assertEqual(1, len(cldb_list_1.split(',')))
self.assertIn('1.1.1.1', cldb_list_1)
cldb_list_2 = ctx.get_cldb_nodes_ip()
self.assertEqual(1, len(cldb_list_2.split(',')))
self.assertIn('1.1.1.1', cldb_list_2)
sep = ':'
cldb_list_3 = ctx.get_cldb_nodes_ip(sep)
self.assertEqual(1, len(cldb_list_3.split(sep)))
self.assertIn('1.1.1.1', cldb_list_3)
def test_get_zookeeper_nodes_ip(self):
ctx = self._get_context()
zk_list_1 = ctx.get_zookeeper_nodes_ip()
self.assertEqual(1, len(zk_list_1.split(',')))
self.assertIn('1.1.1.1', zk_list_1)
zk_list_2 = ctx.get_zookeeper_nodes_ip()
self.assertEqual(1, len(zk_list_2.split(',')))
self.assertIn('1.1.1.1', zk_list_2)
sep = ':'
zk_list_3 = ctx.get_zookeeper_nodes_ip(sep)
self.assertEqual(1, len(zk_list_3.split(sep)))
self.assertIn('1.1.1.1', zk_list_3)
def test_get_resourcemanager_ip(self):
ctx = self._get_context()
ip = ctx.get_resourcemanager_ip()
self.assertEqual('1.1.1.1', ip)
def test_get_historyserver_ip(self):
ctx = self._get_context()
self.assertTrue(
ctx.check_for_cldb_or_zookeeper_service([self.instance]))
| |
import matplotlib
matplotlib.use('Agg') # Forces matplotlib not to use any xwindows calls
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import model.ksgroup
from functions import moneyfmt
from matplotlib.patches import Rectangle
from model.users import protected
import model.users
import web
import os
from config import config
"""
Graphs available:
general URL: /graph/<year>/<ksgroup>/<type of graph>/<name of the graph>
Note: Graph() is not a child of Controller-superclass as it doesn't render
a page but returns directly an image to the browser.
Note2: matplotlib is not 'thread' save - stuff stays global, mixes graphs
and you get an error. Solution: use OO interface Matplotlib
"""
class Graph:
def __init__(self):
self.order_allowed = True # TODO security
self.path = None
self.year = None
self.graph_type = None
self.name = None
self.orderOrGroup = None
self.ksGroup = None
self.plt = None
self.colormap = None
self.ksmap = None
@protected()
def GET(self, year, ksGroup, graph_type, name):
self.year = year
self.graph_type = graph_type
self.name = name
self.ksGroup = ksGroup
self.path = os.path.join(config['graphs']['path'], year, ksGroup, graph_type, name+'.png')
if not self.authorized():
raise web.notfound()
if os.path.isfile(self.path):
return self.serve_graph()
else:
self.create_graph()
if os.path.isfile(self.path):
return self.serve_graph()
raise web.notfound()
def authorized(self):
types_allowed = ['realisatie']
if not self.graph_type in types_allowed:
return False
# If user has no view or report perm. he has no business viewing graphs
if not model.users.check_permission(['view']) and not model.users.check_permission(['report']):
return False
try:
order_allowed = int(self.name) in model.users.orders_allowed()
except:
order_allowed = False
if not order_allowed: # Could still be an ordergroup instead of order...
try:
og_allowed = model.users.check_ordergroup(*model.ordergroup.decode(self.name))
except:
og_allowed = False
if not og_allowed:
return False
try:
year_allowed = int(self.year) in model.regels.years()
except:
year_allowed = False
if not year_allowed:
return False
try:
ksgroup_allowed = self.ksGroup in model.ksgroup.available()
except:
ksgroup_allowed = False
if not ksgroup_allowed:
return False
return True
def serve_graph(self):
web.header("Content-Type", "images/png")
graph = open(self.path, "rb").read()
# Debug: remove graph after serving so it gets rebuild every time
# os.remove(self.path)
return graph
def create_graph(self):
self.load_maps()
self.load_data()
if self.graph_type == 'realisatie':
self.graph_realisatie()
def load_maps(self):
graph_ks_group = self.ksGroup
ksgroup_root = model.ksgroup.load(graph_ks_group)
self.ksmap = {}
self.colormap = {'baten': {}, 'lasten': {}}
for tiepe in ['baten', 'lasten']:
for ks_groups in config['ksgroup']['ksgroups'][graph_ks_group][tiepe]:
for child in ksgroup_root.find(ks_groups).children:
self.colormap[tiepe][child.descr] = {}
for ks in child.get_ks_recursive():
self.ksmap[ks] = (tiepe, child.descr)
colors_amount = max(len(self.colormap[tiepe]), 3) # prevent white colors
colors = {}
colors['baten'] = plt.cm.BuPu(np.linspace(0.75, 0.1, colors_amount))
colors['lasten'] = plt.cm.BuGn(np.linspace(0.75, 0.1, colors_amount))
for i, key in enumerate(self.colormap[tiepe]):
self.colormap[tiepe][key] = colors[tiepe][i]
def format_table_row(self, row):
str_row = []
for value in row:
if value == 0 or np.abs(value) < 0.5:
str_row.append('')
else:
str_row.append(moneyfmt(value))
return str_row
def load_data(self):
ordergroup = None
try:
orders = [ int(self.name) ]
except:
og_file, og_group = model.ordergroup.decode(self.name)
ordergroup_top = model.ordergroup.load(og_file)
ordergroup = ordergroup_top.find(og_group)
orders = ordergroup.list_orders_recursive().keys()
if ordergroup:
descr = ordergroup.descr
else:
descr = model.orders.load(orders_load=[orders[0]]).orders[0].ordernaam
regels = {}
regels['plan'] = model.regels.load(['plan'], years_load=[self.year], orders_load=orders)
regels['resultaat'] = model.regels.load(['geboekt', 'obligo'], years_load=[self.year], orders_load=orders)
resultaat_ks_periode = regels['resultaat'].split(['kostensoort', 'periode'])
data = {}
data['title'] = '%s-%s-%s' % (self.name, descr, self.year) #TODO replace self.name with order descr if it is a single ordr
try:
data['begroting'] = float(regels['plan'].total())
except:
data['begroting'] = 0
data['baten'] = {}
data['lasten'] = {}
data['resultaat'] = np.zeros(12)
#prebuild ksgroup data structure to force fixed order (baten/lasten..)
keys = ['baten', 'lasten']
for key in keys:
for ksgroup in self.colormap[key].keys():
data[key][ksgroup] = np.zeros(12)
for ks, resultaat_periode in resultaat_ks_periode.iteritems():
key = self.ksmap[ks][0]
name = self.ksmap[ks][1]
for periode, regels in resultaat_periode.iteritems():
if periode > 12:
periode = 12
total = float(regels.total())
data[key][name][periode - 1] += total
data['resultaat'][periode - 1] += total
data['resultaat'] = np.cumsum(data['resultaat'])
# remove empty ksgroups
keys = ['baten', 'lasten']
empty_ksgroups = []
for key in keys:
for ksgroup, values in data[key].iteritems():
if not np.any(values):
empty_ksgroups.append((key, ksgroup))
for key, ksgroup in empty_ksgroups:
del data[key][ksgroup]
self.data = data
def graph_realisatie(self):
data_x = np.arange(1, 13)
data_x_begroting = np.array([0, 12])
data_y_begroting = np.array([0, self.data['begroting'] / 1000])
data_y_resultaat = self.data['resultaat'] / 1000
fig, ax = plt.subplots(figsize=(12,9))
# Set layout
ax.set_title(self.data['title'], loc='right', fontsize=12)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_xlim(0.5, 12.51)
ax.set_xticks([])
ax.hlines(0, 0, 13, color='black')
ax.tick_params(axis='y', labelsize='14', left=True)
ax.yaxis.set_label_text('Spent (keur)', size=18)
legend = {}
legend['data'] = []
legend['keys'] = []
# Plot data
plot_resultaat = ax.plot(data_x, data_y_resultaat, 'ro-', lw=2)
plot_begroting = ax.plot(data_x_begroting, data_y_begroting, 'k--')
# setup legend
legend['data'].append(plot_resultaat[0])
legend['keys'].append("Realisatie (%s keur)" % moneyfmt(data_y_resultaat[-1]))
legend['data'].append(plot_begroting[0])
legend['keys'].append("Begroting (%s keur)" % moneyfmt(self.data['begroting'], keur=True))
legend['data'].append(Rectangle((0, 0), 0, 0, alpha=0.0))
overschot = self.data['begroting'] / 1000 - data_y_resultaat[-1]
if overschot > 0:
legend['keys'].append("Te besteden (%s keur)" % moneyfmt(overschot))
else:
legend['keys'].append("Overbesteed: (%s keur)" % moneyfmt(overschot))
leg = ax.legend(tuple(legend['data']), tuple(legend['keys']), fontsize=16, loc=2)
if data_y_resultaat[-1] < 0:
leg = ax.legend(tuple(legend['data']), tuple(legend['keys']), fontsize=16, loc=3)
leg.get_frame().set_linewidth(0.0)
# Plot bars of baten/lasten!
totaalbars = len(self.data['baten']) + len(self.data['lasten'])
width = 1. / (totaalbars + 1)
offset = (1 - totaalbars * width) / 2
bar_nr = 0
for name, data_y in self.data['baten'].iteritems():
plot_baten_bars = ax.bar(data_x + width * bar_nr - 0.5 + offset, data_y / 1000, width,
color=self.colormap['baten'][name])
bar_nr += 1
for name, data_y in self.data['lasten'].iteritems():
plot_lasten_bars = ax.bar(data_x + width * bar_nr - 0.5 + offset, data_y / 1000, width,
color=self.colormap['lasten'][name])
bar_nr += 1
# add table below the graph
values = []
values.append(self.format_table_row(data_y_resultaat)) # totaal
begroting_per_maand = self.data['begroting'] / 12000
residue_begroting_per_maand = data_y_resultaat - np.linspace(begroting_per_maand, 12 * begroting_per_maand,
num=12)
values.append(self.format_table_row(residue_begroting_per_maand))
for data_key in ['baten', 'lasten']:
for key, row in self.data[data_key].iteritems():
self.data[data_key][key] = row / 1000
values.append(self.format_table_row(self.data[data_key][key]))
label_columns = (["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"])
label_rows = []
label_rows.extend(["Totaal"])
label_rows.extend(["+/- Begroting"])
label_rows.extend(self.data['baten'].keys())
label_rows.extend(self.data['lasten'].keys())
colors = []
for key in self.data['baten'].keys():
colors.extend([self.colormap['baten'][key]])
for key in self.data['lasten'].keys():
colors.extend([self.colormap['lasten'][key]])
for key in self.data['baten'].keys():
colors.extend([self.colormap['baten'][key]])
if colors:
colors = np.insert(colors, 0, [1, 1, 1, 1], 0) # Hack for making sure color realisatie
colors = np.insert(colors, 0, [1, 1, 1, 1], 0) # Hack for making sure color realisatie
else:
colors = [[1, 1, 1, 1], [1, 1, 1, 1]]
the_table = ax.table(cellText=values, rowLabels=label_rows, rowColours=colors,
colLabels=label_columns, loc='bottom', rowLoc='right')
the_table.set_fontsize(14)
the_table.scale(1, 2)
# Add y-lines:
for i in range(0, 15):
ax.axvline(i + 0.5, color='grey', ls=':')
# Save the graph in dir with proper perm.
dir_graph = os.path.split(self.path)[0]
if not os.path.isdir(dir_graph):
os.makedirs(dir_graph)
for path in os.walk(config['graphs']['path']):
os.chmod(path[0], 0777)
fig.savefig(self.path, bbox_inches='tight')
os.chmod(self.path, 0666)
plt.close(fig)
| |
import logging
import wx
from wx.lib.wordwrap import wordwrap
import threading
from updater import AutoUpdate
from tooling.instance import SingleInstance
import os
import traceback
EVT_COMPLETE_ID = wx.NewId()
EVT_PROGRESS_ID = wx.NewId()
EVT_FAIL_ID = wx.NewId()
def EVT_COMPLETE(win, func):
win.Connect(-1, -1, EVT_COMPLETE_ID, func)
def EVT_PROGRESS(win, func):
win.Connect(-1, -1, EVT_PROGRESS_ID, func)
def EVT_FAIL(win, func):
win.Connect(-1, -1, EVT_FAIL_ID, func)
class CompleteEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(EVT_COMPLETE_ID)
class ProgressEvent(wx.PyEvent):
def __init__(self, bytesRead, expectedBytes):
wx.PyEvent.__init__(self)
self.SetEventType(EVT_PROGRESS_ID)
self.bytesRead = bytesRead
self.expectedBytes = expectedBytes
class FailEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(EVT_FAIL_ID)
class wxImagePanel(wx.Panel):
def __init__(self, parent, bitmap):
size = (bitmap.GetWidth(), bitmap.GetHeight())
wx.Panel.__init__(self, parent, size=size)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.bitmap = bitmap
#self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.label = ''
"""
def OnEraseBackground(self, evt):
dc = evt.GetDC()
if not dc:
dc = wx.ClintDC(self)
rect = self.GetUpdateRegion().GetBox()
dc.SetClippingRect(rect)
dc.Clear()
dc.DrawBitmap(self.bitmap, 0, 0)"""
def OnPaint(self, evt):
bpdc = wx.BufferedPaintDC(self)
dc = wx.GCDC(bpdc)
self.Draw(dc)
def Draw(self, dc):
width, height = self.GetClientSize()
if not width or not height:
return
dc.Clear()
dc.DrawBitmap(self.bitmap, 0, 0)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(15)
font.SetWeight(wx.FONTWEIGHT_NORMAL)
font.SetFaceName('Segoe UI')
dc.SetFont(font)
textColour = wx.Colour(255, 255, 255)
dc.SetTextForeground(textColour)
textWidth, textHeight = dc.GetTextExtent(self.label)
pandaWidth = 90
x = pandaWidth
text = wordwrap(self.label,
width - pandaWidth - 50,
wx.ClientDC(self))
lines = 1
start = text.find('\n', 0)
while start > 0:
start = text.find('\n', start + 1)
lines += 1
y = (height / 2) - textHeight * lines / 2
#print("original: %s" % self.label)
#print("new : %s" % text)
dc.DrawText(text, x, y)
def SetText(self, text):
self.label = text
self.Invalidate()
def Invalidate(self):
self.Refresh()
class wxUpdateFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "Digital Panda",
wx.DefaultPosition,
style=wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION)
icon = wx.Icon('gfx/digital-panda-icon.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
# panel
image = wx.Image('gfx/digitalpanda_autoupdate.png', wx.BITMAP_TYPE_ANY)
bitmap = image.ConvertToBitmap()
self.framePanel = wxImagePanel(self, bitmap)
EVT_COMPLETE(self, self.OnResult)
EVT_PROGRESS(self, self.OnProgress)
EVT_FAIL(self, self.OnFail)
def SetText(self, text):
self.framePanel.SetText(text)
def OnResult(self, event):
self.framePanel.SetText('Starting the Digital Panda...')
StartPanda()
self.Close()
def OnProgress(self, event):
megabytesRead = 0
if event.bytesRead > 0:
megabytesRead = event.bytesRead / 1024.0 / 1024.0
expectedMegaBytes = event.expectedBytes / 1024.0 / 1024.0
self.framePanel.SetText(('Downloading the Digital Panda\n'
'%s MB / %s MB') %
(format(megabytesRead, '.2f'),
format(expectedMegaBytes, '.2f')))
def OnFail(self, event):
self.framePanel.SetText(('Digital Panda installation failed!\n'
':(\nPlease try again.'))
class PandaInstaller(threading.Thread):
def __init__(self, notify_window, upgradeHost):
threading.Thread.__init__(self)
self._notify_window = notify_window
self.upgradeHost = upgradeHost
def run(self):
try:
autoUpdate = AutoUpdate(self, self.upgradeHost)
if autoUpdate.Install():
self.SignalComplete()
else:
self.SignalFail()
except:
logging.error(traceback.format_exc())
self.SignalFail()
def SignalComplete(self):
wx.PostEvent(self._notify_window, CompleteEvent())
def SignalDownloadProgress(self, bytesRead, expectedBytes):
wx.PostEvent(self._notify_window,
ProgressEvent(bytesRead, expectedBytes))
def SignalFail(self):
wx.PostEvent(self._notify_window,
FailEvent())
def InstallPanda(upgradeHost = None):
# the panda isn't installed at all!
app = wx.PySimpleApp()
frame = wxUpdateFrame()
frame.Fit()
frame.Centre()
frame.SetText('Installing the Digital Panda...')
frame.Show(True)
# start up a thread for the update
installerThread = PandaInstaller(frame, upgradeHost)
installerThread.start()
frame.SetText('Installing the Digital Panda...')
app.MainLoop()
def StartPanda(upgradeHost = None):
logging.info("starting the panda")
autoUpdate = AutoUpdate(None, upgradeHost)
os.startfile(autoUpdate.GetShortcutPath())
def main(args):
logging.basicConfig(level=logging.DEBUG)
instanceName = '{5A475CB1-CDB5-46b5-B221-4E36602FC47E}'
myapp = SingleInstance(instanceName)
try:
if myapp.alreadyRunning():
logging.info('another instance of sync tool already running')
if len(args)>0:
upgradeHost = args[0]
else:
upgradeHost = "www.digitalpanda.co.za"
logging.debug('upgradeHost is: %s' % upgradeHost)
autoUpdate = AutoUpdate(None, upgradeHost)
if (autoUpdate.IsInstalled()):
logging.debug('panda already installed - start...')
#pandaPath = autoUpdate.GetShortcutPath()
#subprocess.call([pandaPath])
os.startfile(autoUpdate.GetShortcutPath())
else:
logging.debug('panda not installed - starting install')
InstallPanda(upgradeHost)
finally:
del myapp
| |
"""
pygments.lexers.dylan
~~~~~~~~~~~~~~~~~~~~~
Lexers for the Dylan language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, do_insertions, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Literal, Whitespace
__all__ = ['DylanLexer', 'DylanConsoleLexer', 'DylanLidLexer']
class DylanLexer(RegexLexer):
"""
For the `Dylan <http://www.opendylan.org/>`_ language.
.. versionadded:: 0.7
"""
name = 'Dylan'
aliases = ['dylan']
filenames = ['*.dylan', '*.dyl', '*.intr']
mimetypes = ['text/x-dylan']
flags = re.IGNORECASE
builtins = {
'subclass', 'abstract', 'block', 'concrete', 'constant', 'class',
'compiler-open', 'compiler-sideways', 'domain', 'dynamic',
'each-subclass', 'exception', 'exclude', 'function', 'generic',
'handler', 'inherited', 'inline', 'inline-only', 'instance',
'interface', 'import', 'keyword', 'library', 'macro', 'method',
'module', 'open', 'primary', 'required', 'sealed', 'sideways',
'singleton', 'slot', 'thread', 'variable', 'virtual'}
keywords = {
'above', 'afterwards', 'begin', 'below', 'by', 'case', 'cleanup',
'create', 'define', 'else', 'elseif', 'end', 'export', 'finally',
'for', 'from', 'if', 'in', 'let', 'local', 'otherwise', 'rename',
'select', 'signal', 'then', 'to', 'unless', 'until', 'use', 'when',
'while'}
operators = {
'~', '+', '-', '*', '|', '^', '=', '==', '~=', '~==', '<', '<=',
'>', '>=', '&', '|'}
functions = {
'abort', 'abs', 'add', 'add!', 'add-method', 'add-new', 'add-new!',
'all-superclasses', 'always', 'any?', 'applicable-method?', 'apply',
'aref', 'aref-setter', 'as', 'as-lowercase', 'as-lowercase!',
'as-uppercase', 'as-uppercase!', 'ash', 'backward-iteration-protocol',
'break', 'ceiling', 'ceiling/', 'cerror', 'check-type', 'choose',
'choose-by', 'complement', 'compose', 'concatenate', 'concatenate-as',
'condition-format-arguments', 'condition-format-string', 'conjoin',
'copy-sequence', 'curry', 'default-handler', 'dimension', 'dimensions',
'direct-subclasses', 'direct-superclasses', 'disjoin', 'do',
'do-handlers', 'element', 'element-setter', 'empty?', 'error', 'even?',
'every?', 'false-or', 'fill!', 'find-key', 'find-method', 'first',
'first-setter', 'floor', 'floor/', 'forward-iteration-protocol',
'function-arguments', 'function-return-values',
'function-specializers', 'gcd', 'generic-function-mandatory-keywords',
'generic-function-methods', 'head', 'head-setter', 'identity',
'initialize', 'instance?', 'integral?', 'intersection',
'key-sequence', 'key-test', 'last', 'last-setter', 'lcm', 'limited',
'list', 'logand', 'logbit?', 'logior', 'lognot', 'logxor', 'make',
'map', 'map-as', 'map-into', 'max', 'member?', 'merge-hash-codes',
'min', 'modulo', 'negative', 'negative?', 'next-method',
'object-class', 'object-hash', 'odd?', 'one-of', 'pair', 'pop',
'pop-last', 'positive?', 'push', 'push-last', 'range', 'rank',
'rcurry', 'reduce', 'reduce1', 'remainder', 'remove', 'remove!',
'remove-duplicates', 'remove-duplicates!', 'remove-key!',
'remove-method', 'replace-elements!', 'replace-subsequence!',
'restart-query', 'return-allowed?', 'return-description',
'return-query', 'reverse', 'reverse!', 'round', 'round/',
'row-major-index', 'second', 'second-setter', 'shallow-copy',
'signal', 'singleton', 'size', 'size-setter', 'slot-initialized?',
'sort', 'sort!', 'sorted-applicable-methods', 'subsequence-position',
'subtype?', 'table-protocol', 'tail', 'tail-setter', 'third',
'third-setter', 'truncate', 'truncate/', 'type-error-expected-type',
'type-error-value', 'type-for-copy', 'type-union', 'union', 'values',
'vector', 'zero?'}
valid_name = '\\\\?[\\w!&*<>|^$%@\\-+~?/=]+'
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
lowercase_value = value.lower()
if lowercase_value in self.builtins:
yield index, Name.Builtin, value
continue
if lowercase_value in self.keywords:
yield index, Keyword, value
continue
if lowercase_value in self.functions:
yield index, Name.Builtin, value
continue
if lowercase_value in self.operators:
yield index, Operator, value
continue
yield index, token, value
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'([a-z0-9-]+)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
default('code') # no header match, switch to code
],
'code': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# multi-line comment
(r'/\*', Comment.Multiline, 'comment'),
# strings and characters
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-f0-9]{1,2}|[^\\\'\n])'", String.Char),
# binary integer
(r'#b[01]+', Number.Bin),
# octal integer
(r'#o[0-7]+', Number.Oct),
# floating point
(r'[-+]?(\d*\.\d+(e[-+]?\d+)?|\d+(\.\d*)?e[-+]?\d+)', Number.Float),
# decimal integer
(r'[-+]?\d+', Number.Integer),
# hex integer
(r'#x[0-9a-f]+', Number.Hex),
# Macro parameters
(r'(\?' + valid_name + ')(:)'
r'(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'(\?)(:)(token|name|variable|expression|body|case-body|\*)',
bygroups(Name.Tag, Operator, Name.Builtin)),
(r'\?' + valid_name, Name.Tag),
# Punctuation
(r'(=>|::|#\(|#\[|##|\?\?|\?=|\?|[(){}\[\],.;])', Punctuation),
# Most operators are picked up as names and then re-flagged.
# This one isn't valid in a name though, so we pick it up now.
(r':=', Operator),
# Pick up #t / #f before we match other stuff with #.
(r'#[tf]', Literal),
# #"foo" style keywords
(r'#"', String.Symbol, 'keyword'),
# #rest, #key, #all-keys, etc.
(r'#[a-z0-9-]+', Keyword),
# required-init-keyword: style keywords.
(valid_name + ':', Keyword),
# class names
('<' + valid_name + '>', Name.Class),
# define variable forms.
(r'\*' + valid_name + r'\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
# everything else. We re-flag some of these in the method above.
(valid_name, Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'keyword': [
(r'"', String.Symbol, '#pop'),
(r'[^\\"]+', String.Symbol), # all other characters
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-f0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
]
}
class DylanLidLexer(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Whitespace),
# single line comment
(r'(//.*?)(\n)', bygroups(Comment.Single, Whitespace)),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Whitespace, String)),
]
}
class DylanConsoleLexer(Lexer):
"""
For Dylan interactive console output like:
.. sourcecode:: dylan-console
? let a = 1;
=> 1
? a
=> 1
This is based on a copy of the RubyConsoleLexer.
.. versionadded:: 1.6
"""
name = 'Dylan session'
aliases = ['dylan-console', 'dylan-repl']
filenames = ['*.dylan-console']
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
_prompt_re = re.compile(r'\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
curcode = ''
insertions = []
for match in self._line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
dylexer.get_tokens_unprocessed(curcode))
| |
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse
)
from ..utils import (
extract_attributes,
float_or_none,
int_or_none,
try_get,
url_or_none,
)
class TEDIE(InfoExtractor):
IE_NAME = 'ted'
_VALID_URL = r'''(?x)
(?P<proto>https?://)
(?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/
(
(?P<type_playlist>playlists(?:/(?P<playlist_id>\d+))?) # We have a playlist
|
((?P<type_talk>talks)) # We have a simple talk
|
(?P<type_watch>watch)/[^/]+/[^/]+
)
(/lang/(.*?))? # The url may contain the language
/(?P<name>[\w-]+) # Here goes the name and then ".html"
.*)$
'''
_TESTS = [{
'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
'md5': 'b0ce2b05ca215042124fbc9e3886493a',
'info_dict': {
'id': '102',
'ext': 'mp4',
'title': 'The illusion of consciousness',
'description': ('Philosopher Dan Dennett makes a compelling '
'argument that not only don\'t we understand our own '
'consciousness, but that half the time our brains are '
'actively fooling us.'),
'uploader': 'Dan Dennett',
'width': 853,
'duration': 1308,
'view_count': int,
'comment_count': int,
'tags': list,
},
'params': {
'skip_download': True,
},
}, {
# missing HTTP bitrates
'url': 'https://www.ted.com/talks/vishal_sikka_the_beauty_and_power_of_algorithms',
'info_dict': {
'id': '6069',
'ext': 'mp4',
'title': 'The beauty and power of algorithms',
'thumbnail': r're:^https?://.+\.jpg',
'description': 'md5:734e352710fb00d840ab87ae31aaf688',
'uploader': 'Vishal Sikka',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
'md5': 'e6b9617c01a7970ceac8bb2c92c346c0',
'info_dict': {
'id': '1972',
'ext': 'mp4',
'title': 'Be passionate. Be courageous. Be your best.',
'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:5174aed4d0f16021b704120360f72b92',
'duration': 1128,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
'info_dict': {
'id': '10',
'title': 'Who are the hackers?',
'description': 'md5:49a0dbe8fb76d81a0e64b4a80af7f15a'
},
'playlist_mincount': 6,
}, {
# contains a youtube video
'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
'add_ie': ['Youtube'],
'info_dict': {
'id': '_ZG8HBuDjgc',
'ext': 'webm',
'title': 'Douglas Adams: Parrots the Universe and Everything',
'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
'uploader': 'University of California Television (UCTV)',
'uploader_id': 'UCtelevision',
'upload_date': '20080522',
},
'params': {
'skip_download': True,
},
}, {
# no nativeDownloads
'url': 'https://www.ted.com/talks/tom_thum_the_orchestra_in_my_mouth',
'info_dict': {
'id': '1792',
'ext': 'mp4',
'title': 'The orchestra in my mouth',
'description': 'md5:5d1d78650e2f8dfcbb8ebee2951ac29a',
'uploader': 'Tom Thum',
'view_count': int,
'comment_count': int,
'tags': list,
},
'params': {
'skip_download': True,
},
}, {
# with own formats and private Youtube external
'url': 'https://www.ted.com/talks/spencer_wells_a_family_tree_for_humanity',
'only_matching': True,
}]
_NATIVE_FORMATS = {
'low': {'width': 320, 'height': 180},
'medium': {'width': 512, 'height': 288},
'high': {'width': 854, 'height': 480},
}
def _extract_info(self, webpage):
info_json = self._search_regex(
r'(?s)q\(\s*"\w+.init"\s*,\s*({.+?})\)\s*</script>',
webpage, 'info json')
return json.loads(info_json)
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
if m.group('type').startswith('embed'):
desktop_url = m.group('proto') + 'www' + m.group('urlmain')
return self.url_result(desktop_url, 'TED')
name = m.group('name')
if m.group('type_talk'):
return self._talk_info(url, name)
elif m.group('type_watch'):
return self._watch_info(url, name)
else:
return self._playlist_videos_info(url, name)
def _playlist_videos_info(self, url, name):
'''Returns the videos of the playlist'''
webpage = self._download_webpage(url, name,
'Downloading playlist webpage')
playlist_entries = []
for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage):
attrs = extract_attributes(entry)
entry_url = compat_urlparse.urljoin(url, attrs['href'])
playlist_entries.append(self.url_result(entry_url, self.ie_key()))
final_url = self._og_search_url(webpage, fatal=False)
playlist_id = (
re.match(self._VALID_URL, final_url).group('playlist_id')
if final_url else None)
return self.playlist_result(
playlist_entries, playlist_id=playlist_id,
playlist_title=self._og_search_title(webpage, fatal=False),
playlist_description=self._og_search_description(webpage))
def _talk_info(self, url, video_name):
webpage = self._download_webpage(url, video_name)
info = self._extract_info(webpage)
data = try_get(info, lambda x: x['__INITIAL_DATA__'], dict) or info
talk_info = data['talks'][0]
title = talk_info['title'].strip()
downloads = talk_info.get('downloads') or {}
native_downloads = downloads.get('nativeDownloads') or talk_info.get('nativeDownloads') or {}
formats = [{
'url': format_url,
'format_id': format_id,
} for (format_id, format_url) in native_downloads.items() if format_url is not None]
subtitled_downloads = downloads.get('subtitledDownloads') or {}
for lang, subtitled_download in subtitled_downloads.items():
for q in self._NATIVE_FORMATS:
q_url = subtitled_download.get(q)
if not q_url:
continue
formats.append({
'url': q_url,
'format_id': '%s-%s' % (q, lang),
'language': lang,
})
if formats:
for f in formats:
finfo = self._NATIVE_FORMATS.get(f['format_id'].split('-')[0])
if finfo:
f.update(finfo)
player_talk = talk_info['player_talks'][0]
resources_ = player_talk.get('resources') or talk_info.get('resources')
http_url = None
for format_id, resources in resources_.items():
if format_id == 'hls':
if not isinstance(resources, dict):
continue
stream_url = url_or_none(resources.get('stream'))
if not stream_url:
continue
formats.extend(self._extract_m3u8_formats(
stream_url, video_name, 'mp4', m3u8_id=format_id,
fatal=False))
else:
if not isinstance(resources, list):
continue
if format_id == 'h264':
for resource in resources:
h264_url = resource.get('file')
if not h264_url:
continue
bitrate = int_or_none(resource.get('bitrate'))
formats.append({
'url': h264_url,
'format_id': '%s-%sk' % (format_id, bitrate),
'tbr': bitrate,
})
if re.search(r'\d+k', h264_url):
http_url = h264_url
elif format_id == 'rtmp':
streamer = talk_info.get('streamer')
if not streamer:
continue
for resource in resources:
formats.append({
'format_id': '%s-%s' % (format_id, resource.get('name')),
'url': streamer,
'play_path': resource['file'],
'ext': 'flv',
'width': int_or_none(resource.get('width')),
'height': int_or_none(resource.get('height')),
'tbr': int_or_none(resource.get('bitrate')),
})
m3u8_formats = list(filter(
lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
formats))
if http_url:
for m3u8_format in m3u8_formats:
bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None)
if not bitrate:
continue
bitrate_url = re.sub(r'\d+k', bitrate, http_url)
if not self._is_valid_url(
bitrate_url, video_name, '%s bitrate' % bitrate):
continue
f = m3u8_format.copy()
f.update({
'url': bitrate_url,
'format_id': m3u8_format['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
if f.get('acodec') == 'none':
del f['acodec']
formats.append(f)
audio_download = talk_info.get('audioDownload')
if audio_download:
formats.append({
'url': audio_download,
'format_id': 'audio',
'vcodec': 'none',
})
if not formats:
external = player_talk.get('external')
if isinstance(external, dict):
service = external.get('service')
if isinstance(service, compat_str):
ext_url = None
if service.lower() == 'youtube':
ext_url = external.get('code')
return self.url_result(ext_url or external['uri'])
self._sort_formats(formats)
video_id = compat_str(talk_info['id'])
return {
'id': video_id,
'title': title,
'uploader': player_talk.get('speaker') or talk_info.get('speaker'),
'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'),
'description': self._og_search_description(webpage),
'subtitles': self._get_subtitles(video_id, talk_info),
'formats': formats,
'duration': float_or_none(talk_info.get('duration')),
'view_count': int_or_none(data.get('viewed_count')),
'comment_count': int_or_none(
try_get(data, lambda x: x['comments']['count'])),
'tags': try_get(talk_info, lambda x: x['tags'], list),
}
def _get_subtitles(self, video_id, talk_info):
sub_lang_list = {}
for language in try_get(
talk_info,
(lambda x: x['downloads']['languages'],
lambda x: x['languages']), list):
lang_code = language.get('languageCode') or language.get('ianaCode')
if not lang_code:
continue
sub_lang_list[lang_code] = [
{
'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext),
'ext': ext,
}
for ext in ['ted', 'srt']
]
return sub_lang_list
def _watch_info(self, url, name):
webpage = self._download_webpage(url, name)
config_json = self._html_search_regex(
r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
webpage, 'config', default=None)
if not config_json:
embed_url = self._search_regex(
r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url')
return self.url_result(self._proto_relative_url(embed_url))
config = json.loads(config_json)['config']
video_url = config['video']['url']
thumbnail = config.get('image', {}).get('url')
title = self._html_search_regex(
r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title')
description = self._html_search_regex(
[
r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>',
r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>',
],
webpage, 'description', fatal=False)
return {
'id': name,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'description': description,
}
| |
from ordereddict import OrderedDict
from django.conf import settings
from tower import ugettext_lazy as _lazy
# WARNING: When adding a new app feature here also include a migration.
#
# WARNING: Order matters here. Don't re-order these or alphabetize them. If you
# add new ones put them on the end.
#
# These are used to dynamically generate the field list for the AppFeatures
# django model in mkt.webapps.models.
APP_FEATURES = OrderedDict([
('APPS', {
'name': _lazy(u'App Management API'),
'description': _lazy(u'The app requires the `navigator.mozApps` API '
u'to install and manage other apps.'),
'apis': ('navigator.mozApps',),
}),
('PACKAGED_APPS', {
'name': _lazy(u'Packaged Apps Install API'),
'description': _lazy(
u'The app requires the `navigator.mozApps.installPackage` API '
u'to install other packaged apps.'),
'apis': ('navigator.mozApps.installPackage',),
}),
('PAY', {
'name': _lazy(u'Web Payment'),
'description': _lazy(u'The app requires the `navigator.mozApps` API.'),
'apis': ('navigator.pay', 'navigator.mozPay',),
}),
('ACTIVITY', {
'name': _lazy(u'Web Activities'),
'description': _lazy(u'The app requires Web Activities '
u'(the `MozActivity` API).'),
'apis': ('MozActivity',),
}),
('LIGHT_EVENTS', {
'name': _lazy(u'Ambient Light Sensor'),
'description': _lazy(u'The app requires an ambient light sensor '
u'(the `ondevicelight` API).'),
'apis': ('window.ondevicelight',),
}),
('ARCHIVE', {
'name': _lazy(u'Archive'),
'description': u'',
'apis': (),
}),
('BATTERY', {
'name': _lazy(u'Battery'),
'description': _lazy(u'The app requires the `navigator.battery` API.'),
'apis': ('navigator.battery',),
}),
('BLUETOOTH', {
'name': u'Bluetooth',
'description': _lazy(u'The app requires the `navigator.mozBluetooth` '
u'API.'),
'apis': ('navigator.bluetooth', 'navigator.mozBluetooth'),
}),
('CONTACTS', {
'name': _lazy(u'Contacts'),
'description': _lazy(u'The app requires the `navigator.mozContacts` '
u'API.'),
'apis': ('navigator.contacts', 'navigator.mozContacts'),
}),
('DEVICE_STORAGE', {
'name': _lazy(u'Device Storage'),
'description': _lazy(u'The app requires the Device Storage API to '
u'access files on the filesystem.'),
'apis': ('navigator.getDeviceStorage',),
}),
('INDEXEDDB', {
'name': u'IndexedDB',
'description': _lazy(u'The app requires the platform to support '
u'IndexedDB.'),
'apis': ('navigator.indexedDB', 'navigator.mozIndexedDB'),
}),
('GEOLOCATION', {
'name': _lazy(u'Geolocation'),
'description': _lazy(u'The app requires the platform to support the '
u'`navigator.geolocation` API.'),
'apis': ('navigator.geolocation',),
}),
('IDLE', {
'name': _lazy(u'Idle'),
'description': u'',
'apis': ('addIdleObserver', 'removeIdleObserver'),
}),
('NETWORK_INFO', {
'name': _lazy(u'Network Information'),
'description': _lazy(u'The app requires the ability to get '
u'information about the network connection (the '
u'`navigator.mozConnection` API).'),
'apis': ('navigator.mozConnection', 'navigator.mozMobileConnection'),
}),
('NETWORK_STATS', {
'name': _lazy(u'Network Stats'),
'description': _lazy(u'The app requires the '
u'`navigator.mozNetworkStats` API.'),
'apis': ('navigator.networkStats', 'navigator.mozNetworkStats'),
}),
('PROXIMITY', {
'name': _lazy(u'Proximity'),
'description': _lazy(u'The app requires a proximity sensor (the '
u'`ondeviceproximity` API).'),
'apis': ('navigator.ondeviceproximity',),
}),
('PUSH', {
'name': _lazy(u'Simple Push'),
'description': _lazy(u'The app requires the `navigator.mozPush` API.'),
'apis': ('navigator.push', 'navigator.mozPush'),
}),
('ORIENTATION', {
'name': _lazy(u'Screen Orientation'),
'description': _lazy(u'The app requires the platform to support the '
u'`ondeviceorientation` API.'),
'apis': ('ondeviceorientation',),
}),
('TIME_CLOCK', {
'name': _lazy(u'Time/Clock'),
'description': _lazy(u'The app requires the `navigator.mozTime` API.'),
'apis': ('navigator.time', 'navigator.mozTime'),
}),
('VIBRATE', {
'name': _lazy(u'Vibration'),
'description': _lazy(u'The app requires the device to support '
u'vibration (the `navigator.vibrate` API).'),
'apis': ('navigator.vibrate',),
}),
('FM', {
'name': u'WebFM',
'description': _lazy(u'The app requires the `navigator.mozFM` or '
u'`navigator.mozFMRadio` APIs.'),
'apis': ('navigator.mozFM', 'navigator.mozFMRadio'),
}),
('SMS', {
'name': u'WebSMS',
'description': _lazy(u'The app requires the `navigator.mozSms` API.'),
'apis': ('navigator.mozSms', 'navigator.mozSMS'),
}),
('TOUCH', {
'name': _lazy(u'Touch'),
'description': _lazy(u'The app requires the platform to support touch '
u'events. This option indicates that the app '
u'will not function when used with a mouse.'),
'apis': ('window.ontouchstart',),
}),
('QHD', {
'name': _lazy(u'Smartphone-Sized Displays (qHD)'),
'description': _lazy(u'The app requires the platform to have a '
u'smartphone-sized display (having qHD '
u'resolution). This option indicates that the '
u'app will be unusable on larger displays '
u'(e.g., tablets, desktop, large or high-DPI '
u'phones).'),
'apis': (),
}),
('MP3', {
'name': u'MP3',
'description': _lazy(u'The app requires that the platform can decode '
u'and play MP3 files.'),
'apis': (),
}),
('AUDIO', {
'name': _lazy(u'Audio'),
'description': _lazy(u'The app requires that the platform supports '
u'the HTML5 audio API.'),
'apis': ('Audio',),
}),
('WEBAUDIO', {
'name': _lazy(u'Web Audio'),
'description': _lazy(u'The app requires that the platform supports '
u'the Web Audio API (`window.AudioContext`).'),
'apis': ('AudioContext', 'mozAudioContext', 'webkitAudioContext'),
}),
('VIDEO_H264', {
'name': u'H.264',
'description': _lazy(u'The app requires that the platform can decode '
u'and play H.264 video files.'),
'apis': (),
}),
('VIDEO_WEBM', {
'name': u'WebM',
'description': _lazy(u'The app requires that the platform can decode '
u'and play WebM video files (VP8).'),
'apis': (),
}),
('FULLSCREEN', {
'name': _lazy(u'Full Screen'),
'description': _lazy(u'The app requires the Full Screen API '
u'(`requestFullScreen` or '
u'`mozRequestFullScreen`).'),
'apis': ('document.documentElement.requestFullScreen',),
}),
('GAMEPAD', {
'name': _lazy(u'Gamepad'),
'description': _lazy(u'The app requires the platform to support the '
u'gamepad API (`navigator.getGamepads`).'),
'apis': ('navigator.getGamepad', 'navigator.mozGetGamepad'),
}),
('QUOTA', {
'name': _lazy(u'Quota Management'),
'description': _lazy(u'The app requires the platform to allow '
u'persistent storage limit increases above the '
u'normally allowed limits for an app '
u'(`window.StorageInfo` or '
u'`window.persistentStorage`).'),
'apis': ('navigator.persistentStorage', 'navigator.temporaryStorage'),
}),
('CAMERA', {
'name': _lazy(u'Camera'),
'description': _lazy(u'The app requires the platform to allow access '
u'to video from the device camera via a '
u'LocalMediaStream object.'),
'apis': ('navigator.getUserMedia({video: true, picture: true})',),
}),
('MIC', {
'name': _lazy(u'Microphone'),
'description': _lazy(u'The app requires the platform to allow access '
u'to audio from the device microphone.'),
'apis': ('navigator.getUserMedia({audio: true})',),
}),
('SCREEN_CAPTURE', {
'name': _lazy(u'Screen Capture'),
'description': _lazy(u'The app requires the platform to allow access '
u'to the device screen for capture.'),
'apis': ('navigator.getUserMedia({video: {mandatory: '
'{chromeMediaSource: "screen"}}})',),
}),
('WEBRTC_MEDIA', {
'name': _lazy(u'WebRTC MediaStream'),
'description': _lazy(u'The app requires the platform to allow web '
u'real-time communication browser-to-browser '
u'inbound media streams.'),
'apis': ('MediaStream',),
}),
('WEBRTC_DATA', {
'name': _lazy(u'WebRTC DataChannel'),
'description': _lazy(u'The app requires the platform to allow '
u'peer-to-peer exchange of data other than audio '
u'and video.'),
'apis': ('DataChannel',),
}),
('WEBRTC_PEER', {
'name': _lazy(u'WebRTC PeerConnection'),
'description': _lazy(u'The app requires the platform to allow '
u'communication of streaming data between '
u'peers.'),
'apis': ('RTCPeerConnection',),
}),
('SPEECH_SYN', {
'name': _lazy(u'Web Speech Synthesis'),
'description': _lazy(u'The app requires the platform to allow the use '
u'of text-to-speech.'),
'apis': ('SpeechSynthesis',)
}),
('SPEECH_REC', {
'name': _lazy(u'Web Speech Recognition'),
'description': _lazy(u'The app requires the platform to allow '
u'the use of speech-to-text.'),
'apis': ('SpeechRecognition',)
}),
('POINTER_LOCK', {
'name': _lazy(u'Pointer Lock'),
'description': _lazy(u'The app requires the platform to provide '
u'additional information and control about the '
u'pointer.'),
'apis': ('document.documentElement.requestPointerLock',)
}),
('NOTIFICATION', {
'name': _lazy(u'Notifications'),
'description': _lazy(u'The app requires the platform to allow the '
u'displaying phone and desktop notifications to '
u'the user.'),
'apis': ('Notification', 'navigator.mozNotification')
}),
('ALARM', {
'name': _lazy(u'Alarms'),
'description': _lazy(u'The app requires the platform to provide '
u'access to the device alarm settings to '
u'schedule notifications and events at specific '
u'time.'),
'apis': ('navigator.mozAlarms',)
}),
('SYSTEMXHR', {
'name': _lazy(u'SystemXHR'),
'description': _lazy(u'The app requires the platform to allow the '
u'sending of asynchronous HTTP requests without '
u'the restrictions of the same-origin policy.'),
'apis': ('XMLHttpRequest({mozSystem: true})',)
}),
('TCPSOCKET', {
'name': _lazy(u'TCP Sockets'),
'description': _lazy(u'The app requires the platform to allow opening '
u'raw TCP sockets.'),
'apis': ('TCPSocket', 'navigator.mozTCPSocket',
'navigator.mozTCPServerSocket')
}),
('THIRDPARTY_KEYBOARD_SUPPORT', {
'name': _lazy(u'Third-Party Keyboard Support'),
'description': _lazy(u'The app requires the platform to support '
u'third-party keyboards.'),
'apis': ('navigator.mozInputMethod',),
}),
])
class FeatureProfile(OrderedDict):
"""
Convenience class for performing conversion operations on feature profile
representations.
"""
def __init__(self, **kwargs):
"""
Creates a FeatureProfile object.
Takes kwargs to the features to enable or disable. Features not
specified but that are in APP_FEATURES will be False by default.
E.g.:
>>> FeatureProfile(sms=True).to_signature()
'400.32.1'
"""
super(FeatureProfile, self).__init__()
for af in APP_FEATURES:
key = af.lower()
self[key] = kwargs.get(key, False)
@classmethod
def from_int(cls, features):
"""
Construct a FeatureProfile object from a integer bitfield.
>>> FeatureProfile.from_int(0x42)
FeatureProfile([('apps', False), ('packaged_apps', True), ...)
"""
instance = cls()
for i, k in enumerate(reversed(APP_FEATURES)):
instance[k.lower()] = bool(features & 1 << i)
return instance
@classmethod
def from_signature(cls, signature):
"""
Construct a FeatureProfile object from a decimal signature.
>>> FeatureProfile.from_signature('40000000.32.1')
FeatureProfile([('apps', False), ('packaged_apps', True), ...)
"""
dehexed = int(signature.split('.')[0], 16)
return cls.from_int(dehexed)
def to_int(self):
"""
Convert a FeatureProfile object to an integer bitfield.
>>> profile.to_int()
66
"""
features = 0
for i, v in enumerate(reversed(self.values())):
features |= bool(v) << i
return features
def to_signature(self):
"""
Convert a FeatureProfile object to its decimal signature.
>>> profile.to_signature()
'40000000.32.1'
"""
return '%x.%s.%s' % (self.to_int(), len(self),
settings.APP_FEATURES_VERSION)
def to_list(self):
"""
Returns a list representing the true values of this profile.
"""
return [k for k, v in self.iteritems() if v]
def to_kwargs(self, prefix=''):
"""
Returns a dict representing the false values of this profile.
Parameters:
- `prefix` - a string prepended to the key name. Helpful if being used
to traverse relations
This only includes keys for which the profile is False, which is useful
for querying apps where we want to filter by apps which do not require
a feature.
>>> profile = FeatureProject.from_signature(request.get('pro'))
>>> Webapp.objects.filter(**profile.to_kwargs())
"""
return dict((prefix + k, False) for k, v in self.iteritems() if not v)
| |
#python standard library
from time import time, sleep
import threading
from abc import ABCMeta, abstractproperty
# third-party module
import numpy
#apetools
from basepollster import BasePollster
from apetools.parsers import oatbran
class BaseProcPollster(BasePollster):
"""
A base-class for polling proc-files
"""
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
"""
:param:
- `output`: A writeable file-like object
- `interval`: seconds between samples
- `expression`: a regular expression to match the output
- `device`: the to the device to watch
- `name`: the name of the file to watch
- `timestamp_format`: format for timestamps
- `use_header`: If True, prepend header to output
"""
super(BaseProcPollster, self).__init__(*args, **kwargs)
self._logger = None
self._header = None
self._expression_keys = None
self._connection = None
self.stopped = False
return
@property
def connection(self):
"""
:return: the node's connection
"""
if self._connection is None:
self._connection = self.device.connection
return self._connection
@abstractproperty
def expression_keys(self):
"""
:return: the keys to the expression groupdict
"""
return self._expression_keys
@abstractproperty
def header(self):
"""
:return: first line of output file
"""
return self._header
def stop(self):
"""
:postcondition: `self.stopped` is True
"""
self.stopped = True
return
def run(self):
"""
The main loop
"""
if self.use_header:
self.logger.info("self.header")
self.output.write(self.header)
start = time()
output, error = self.connection.cat(self.name)
start_array = numpy.zeros(len(self.expression_keys), dtype=object)
next_array = numpy.zeros(len(self.expression_keys), dtype=object)
#enum = ProcnetdevWatcherEnum
for line in output:
match = self.regex.search(line)
if match:
self.logger.debug(line)
match = match.groupdict()
for value_index, expression_key in enumerate(self.expression_keys):
start_array[value_index] = int(match[expression_key])
try:
sleep(self.interval - (time() - start))
except IOError:
pass
while not self.stopped:
start = time()
output, error = self.connection.cat(self.name)
for line in output:
match = self.regex.search(line)
if match:
tstamp = self.timestamp.now
self.logger.debug(line)
match = match.groupdict()
for value_index, expression_key in enumerate(self.expression_keys):
next_array[value_index] = int(match[expression_key])
self.output.write("{0},{1}\n".format(tstamp,
",".join((str(i) for i in (next_array - start_array)))))
start_array = numpy.copy(next_array)
try:
sleep(self.interval - (time() - start))
except IOError:
self.logger.debug("cat {0} took more than one second".format(self.name))
return
def start(self):
"""
:postcondition: run is running in a thread (self.thread)
"""
self.stopped = False
name = self.name.replace('/', '')
self.thread = threading.Thread(target=self.run_thread, name=name)
self.thread.daemon = True
self.thread.start()
return
# end class BaseProcPollster
class ProcnetdevPollsterEnum(object):
"""
A class to hold constants
"""
__slots__ = ()
interface = 'interface'
receive_bytes = 'receive_bytes'
receive_packets = 'receive_packets'
receive_errs = 'receive_errs'
receive_drop = 'receive_drop'
receive_fifo = 'receive_fifo'
receive_frame = 'receive_frame'
transmit_bytes = 'transmit_bytes'
transmit_packets = 'transmit_packets'
transmit_errs = 'transmit_errs'
transmit_drop = 'transmit_drop'
transmit_fifo = 'transmit_fifo'
transmit_colls = 'transmit_colls'
transmit_carrier = 'transmit_carrier'
#end class ProcnetdevPollsterEnum
class ProcnetdevPollsterIndices(object):
"""
A class to hold indices to place the values in order
"""
__slots__ = ()
rbytes, rpackets, rerrs, rdrop, rfifo, rframe = range(6)
tbytes, tpackets, terrs, tdrop, tfifo, tcolls, tcarrier = range(6,13)
# end class ProcnetdevPollsterIndices
class ProcnetdevPollster(BaseProcPollster):
"""
A class to grab the bytes and packets received at timed intervals.
"""
def __init__(self, interface, *args, **kwargs):
"""
:param:
- `output`: A writeable file-like object
- `interface`: The name of the interface to watch
- `interval`: seconds between samples
- `connection`: the connection to the device to watch
- `name`: the name of the file to watch
"""
super(ProcnetdevPollster, self).__init__(*args, **kwargs)
self.interface = interface
self._rexpression_keys = None
self._texpression_keys = None
return
@property
def name(self):
"""
:return: the name for logging (or the name of the file)
"""
if self._name is None:
self._name = "/proc/net/dev"
return self._name
@property
def expression_keys(self):
"""
:return: keys for the regex groupdict
"""
if self._expression_keys is None:
# this is explicitly stated to preserve the ordering
self._expression_keys = ('receive_bytes receive_packets receive_errs '
'receive_drop receive_fifo receive_frame '
'transmit_bytes transmit_packets transmit_errs '
'transmit_drop transmit_fifo transmit_colls '
'transmit_carrier').split()
return self._expression_keys
@property
def rexpression_keys(self):
"""
:return: subset of keys needed for receiving
"""
if self._rexpression_keys is None:
self._rexpression_keys = [v for v in self.expression_keys if v.startswith('r')]
return self._rexpression_keys
@property
def texpression_keys(self):
"""
:return: subset of keys needed for transmitting
"""
if self._texpression_keys is None:
self._texpression_keys = [v for v in self._expression_keys if v.startswith('t')]
return self._texpression_keys
@property
def header(self):
"""
:return: the first line for the output file
"""
if self._header is None:
self._header = ("timestamp,rx_bytes,rx_packets,rx_errs,rx_drop,rx_fifo,rx_frame,tx_bytes,"
"tx_packets,tx_errs,"
"txdrop,tx_fifo,tx_colls,tx_carrier\n")
return self._header
@property
def expression(self):
"""
:return: compiled regular expression to match the interface output line
"""
if self._expression is None:
integer = oatbran.INTEGER
enum = ProcnetdevPollsterEnum
named = oatbran.NAMED
interface = named(n=enum.interface, e=self.interface) + ":"
rx_values = [named(n=name, e=integer) for name in self.rexpression_keys]
tx_values = [named(n=name, e=integer) for name in self.texpression_keys]
self._expression = oatbran.SPACES.join([interface] + rx_values + [integer] * 2 + tx_values)
return self._expression
# end class ProcnetdevPollster
class CpuPollsterEnum(object):
__slots = ()
user = 'user'
nice = 'nice'
system = 'system'
idle = 'idle'
# end class CpuPollsterEnum
class CpuPollster(BaseProcPollster):
"""
A class to grab the percent of CPU used.
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `output`: A writeable file-like object
- `interface`: The name of the interface to watch
- `interval`: seconds between samples
- `connection`: the connection to the device to watch
- `name`: the name of the file to watch
"""
super(CpuPollster, self).__init__(*args, **kwargs)
return
@property
def name(self):
"""
:return: the name for logging (or the name of the file)
"""
if self._name is None:
self._name = "/proc/stat"
return self._name
@property
def expression_keys(self):
"""
:return: keys for the regex groupdict
"""
if self._expression_keys is None:
# this is explicitly stated to preserve the ordering
self._expression_keys = (CpuPollsterEnum.user,
CpuPollsterEnum.nice,
CpuPollsterEnum.system,
CpuPollsterEnum.idle)
return self._expression_keys
@property
def header(self):
"""
:return: the first line for the output file
"""
if self._header is None:
self._header = "timestamp,cpu_percent\n"
return self._header
@property
def expression(self):
"""
:return: compiled regular expression to match the interface output line
"""
if self._expression is None:
integer = oatbran.INTEGER
enum = CpuPollsterEnum
named = oatbran.NAMED
spaces = oatbran.SPACES
user = named(n=enum.user, e=integer)
nice = named(n=enum.nice, e=integer)
system = named(n=enum.system, e=integer)
idle = named(n=enum.idle, e=integer)
self._expression = spaces.join(['cpu',
user,
nice,
system,
idle])
return self._expression
def run(self):
"""
The main loop
"""
self.output.write(self.header)
start = time()
lock = self.connection.lock
with lock:
output, error = self.connection.cat(self.name)
start_used = 0
next_used = 0
start_total = 0
next_total = 0
# get the first sample
for line in output:
match = self.regex.search(line)
if match:
self.logger.debug(line)
match = match.groupdict()
start_total = sum([int(value) for value in match.itervalues()])
start_used = start_total - int(match[CpuPollsterEnum.idle])
try:
sleep(self.interval - (time() - start))
except IOError:
pass
# watch the file
while not self.stopped:
start = time()
with lock:
output, error = self.connection.cat(self.name)
for line in output:
match = self.regex.search(line)
if match:
tstamp = self.timestamp.now
self.logger.debug(line)
match = match.groupdict()
next_total = sum([int(value) for value in match.itervalues()])
next_used = next_total - float(match[CpuPollsterEnum.idle])
used = (next_used - start_used)/(next_total - start_total)
self.output.write("{0},{1}\n".format(tstamp,
100 * used))
start_used, start_total = next_used, next_total
next_used = next_total = 0
break
try:
sleep(self.interval - (time() - start))
except IOError:
self.logger.debug("cat {0} took more than one second".format(self.name))
return
# end class CpuPollster
if __name__ == "__main__":
from apetools.connections.sshconnection import SSHConnection
import sys
c = SSHConnection("portege", "portegeadmin")
p = ProcnetdevPollster(sys.stdout, c, "wlan0")
p()
| |
"""
Form Widget classes specific to the Django admin site.
"""
import copy
from django import forms
from django.db.models.deletion import CASCADE
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from django.utils.encoding import force_text
from django.utils.html import smart_urlquote
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
from django.utils.translation import ugettext as _
class FilteredSelectMultiple(forms.SelectMultiple):
"""
A SelectMultiple with a JavaScript filter interface.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
@property
def media(self):
js = ["core.js", "SelectBox.js", "SelectFilter2.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(FilteredSelectMultiple, self).__init__(attrs, choices)
def get_context(self, name, value, attrs=None):
context = super(FilteredSelectMultiple, self).get_context(name, value, attrs)
context['widget']['attrs']['class'] = 'selectfilter'
if self.is_stacked:
context['widget']['attrs']['class'] += 'stacked'
context['widget']['attrs']['data-field-name'] = self.verbose_name
context['widget']['attrs']['data-is-stacked'] = int(self.is_stacked)
return context
class AdminDateWidget(forms.DateInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vDateField', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
js = ["calendar.js", "admin/DateTimeShortcuts.js"]
return forms.Media(js=["admin/js/%s" % path for path in js])
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'vTimeField', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
template_name = 'admin/widgets/split_datetime.html'
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def get_context(self, name, value, attrs):
context = super(AdminSplitDateTime, self).get_context(name, value, attrs)
context['date_label'] = _('Date:')
context['time_label'] = _('Time:')
return context
class AdminRadioSelect(forms.RadioSelect):
template_name = 'admin/widgets/radio.html'
class AdminFileWidget(forms.ClearableFileInput):
template_name = 'admin/widgets/clearable_file_input.html'
def url_params_from_lookup_dict(lookups):
"""
Converts the type of lookups specified in a ForeignKey limit_choices_to
attribute to a dictionary of query parameters
"""
params = {}
if lookups and hasattr(lookups, 'items'):
items = []
for k, v in lookups.items():
if callable(v):
v = v()
if isinstance(v, (tuple, list)):
v = ','.join(str(x) for x in v)
elif isinstance(v, bool):
v = ('0', '1')[v]
else:
v = str(v)
items.append((k, v))
params.update(dict(items))
return params
class ForeignKeyRawIdWidget(forms.TextInput):
"""
A Widget for displaying ForeignKeys in the "raw_id" interface rather than
in a <select> box.
"""
template_name = 'admin/widgets/foreign_key_raw_id.html'
def __init__(self, rel, admin_site, attrs=None, using=None):
self.rel = rel
self.admin_site = admin_site
self.db = using
super(ForeignKeyRawIdWidget, self).__init__(attrs)
def get_context(self, name, value, attrs=None):
context = super(ForeignKeyRawIdWidget, self).get_context(name, value, attrs)
rel_to = self.rel.model
if rel_to in self.admin_site._registry:
# The related object is registered with the same AdminSite
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=self.admin_site.name,
)
params = self.url_parameters()
if params:
related_url += '?' + '&'.join(
'%s=%s' % (k, v) for k, v in params.items(),
)
context['related_url'] = mark_safe(related_url)
context['link_title'] = _('Lookup')
# The JavaScript code looks for this class.
context['widget']['attrs'].setdefault('class', 'vForeignKeyRawIdAdminField')
if context['widget']['value']:
context['link_label'], context['link_url'] = self.label_and_url_for_value(value)
return context
def base_url_parameters(self):
limit_choices_to = self.rel.limit_choices_to
if callable(limit_choices_to):
limit_choices_to = limit_choices_to()
return url_params_from_lookup_dict(limit_choices_to)
def url_parameters(self):
from django.contrib.admin.views.main import TO_FIELD_VAR
params = self.base_url_parameters()
params.update({TO_FIELD_VAR: self.rel.get_related_field().name})
return params
def label_and_url_for_value(self, value):
key = self.rel.get_related_field().name
try:
obj = self.rel.model._default_manager.using(self.db).get(**{key: value})
except (ValueError, self.rel.model.DoesNotExist):
return '', ''
try:
url = reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
obj._meta.app_label,
obj._meta.object_name.lower(),
),
args=(obj.pk,)
)
except NoReverseMatch:
url = '' # Admin not registered for target model.
return Truncator(obj).words(14, truncate='...'), url
class ManyToManyRawIdWidget(ForeignKeyRawIdWidget):
"""
A Widget for displaying ManyToMany ids in the "raw_id" interface rather than
in a <select multiple> box.
"""
template_name = 'admin/widgets/many_to_many_raw_id.html'
def get_context(self, name, value, attrs=None):
context = super(ManyToManyRawIdWidget, self).get_context(name, value, attrs)
if self.rel.model in self.admin_site._registry:
# The related object is registered with the same AdminSite
context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'
return context
def url_parameters(self):
return self.base_url_parameters()
def label_and_url_for_value(self, value):
return '', ''
def value_from_datadict(self, data, files, name):
value = data.get(name)
if value:
return value.split(',')
def format_value(self, value):
return ','.join(force_text(v) for v in value) if value else ''
class RelatedFieldWidgetWrapper(forms.Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
admin interface.
"""
template_name = 'admin/widgets/related_widget_wrapper.html'
def __init__(self, widget, rel, admin_site, can_add_related=None,
can_change_related=False, can_delete_related=False):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
# Backwards compatible check for whether a user can add related
# objects.
if can_add_related is None:
can_add_related = rel.model in admin_site._registry
self.can_add_related = can_add_related
# XXX: The UX does not support multiple selected values.
multiple = getattr(widget, 'allow_multiple_selected', False)
self.can_change_related = not multiple and can_change_related
# XXX: The deletion UX can be confusing when dealing with cascading deletion.
cascade = getattr(rel, 'on_delete', None) is CASCADE
self.can_delete_related = not multiple and not cascade and can_delete_related
# so we can check if the related object is registered with this AdminSite
self.admin_site = admin_site
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
return reverse("admin:%s_%s_%s" % (info + (action,)),
current_app=self.admin_site.name, args=args)
def get_context(self, name, value, attrs=None):
with self.widget.override_choices(self.choices):
context = self.widget.get_context(name, value, attrs)
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, self.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context['url_params'] = url_params
context['model'] = rel_opts.verbose_name
if self.can_change_related:
change_related_template_url = self.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if self.can_add_related:
add_related_url = self.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if self.can_delete_related:
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'vLargeTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminEmailInputWidget(forms.EmailInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'vTextField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminEmailInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.URLInput):
template_name = 'admin/widgets/url.html'
def __init__(self, attrs=None):
final_attrs = {'class': 'vURLField'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
def get_context(self, name, value, attrs):
context = super(AdminURLFieldWidget, self).get_context(name, value, attrs)
context['current_label'] = _('Currently:')
context['change_label'] = _('Change:')
context['widget']['href'] = smart_urlquote(context['widget']['value'])
return context
def format_value(self, value):
value = super(AdminURLFieldWidget, self).format_value(value)
return force_text(value)
class AdminIntegerFieldWidget(forms.NumberInput):
class_name = 'vIntegerField'
def __init__(self, attrs=None):
final_attrs = {'class': self.class_name}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminBigIntegerFieldWidget(AdminIntegerFieldWidget):
class_name = 'vBigIntegerField'
| |
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os
import multi_method
import random
import statistical_feature as sf
import Task
from uuid import uuid4
import time
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
# save the data user upload
upload_dir = "upload_data/"
download_dir = "result/"
# start page /
class ClassifierHandler(tornado.web.RequestHandler):
def get(self):
self.render("classifier.html", test="test")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["all_data"][0]
all_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(all_data)
f.close()
try:
# get method to calculate, default:svm with polynomial kernel
# method
method = self.get_argument("method", "SVM")
kernel = self.get_argument("kernel", "Polynomial")
C = float(self.get_argument("C", "1"))
gamma = float(self.get_argument("gamma", "0"))
selected_feature = self.get_argument("selected_feature", "")
# overall accuracy,average accuracy,accuracy of each label
# true data's location of each label,testing result's location of
# each label,label
[OA, AA, kappa, label_accuracy, y_te_location, t_te_location, uniq_ele, result_filename] = multi_method.calculate(
filename, selected_feature, method=method, kernel=kernel, C=C, gamma=gamma)
# download path
result_filename = "/" + result_filename
# for test
# OA = 59.54
# AA = 48.62
# kappa = 52.14
# label_accuracy = {1: 0.8421052631578947, 2: 0.7097242380261248, 3: 0.6423076923076924, 4: 0.8074866310160428, 5: 0.8614318706697459, 6: 0.8764705882352941, 7: 0.9333333333333333, 8:
# 0.9579439252336449, 9: 0.8, 10: 0.7505422993492408, 11:
# 0.5193347193347193, 12: 0.7237569060773481, 13: 0.9612903225806452,
# 14: 0.8337448559670781, 15: 0.49404761904761907, 16:
# 0.9565217391304348}
# uniq_ele=range(1,17,1)
# remove data after calculating
except Exception, e:
print e
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format.")
return
os.remove(filename)
# error of data's format
# if (OA < 0):
# self.render(
# "error.html", error_info="Please Check Your Data Format.")
self.render("classifier_result.html", method=method, kernel=kernel,
C=C, gamma=gamma, OA=OA, AA=AA, kappa=kappa, label_accuracy=label_accuracy, y_te_location=y_te_location, t_te_location=t_te_location, uniq_ele=uniq_ele, result_filename=result_filename)
# data format page /data_format
class DataFormatHandler(tornado.web.RequestHandler):
def get(self, input):
if input == "c":
# format of data for classification
self.render("c_format.html")
elif input == "s":
# format of data for feature statistics
self.render("s_format.html")
elif input == "r":
self.render("r_format.html")
else:
self.render("error.html", error_info="Not Found.")
class StatFeatureHandler(tornado.web.RequestHandler):
def get(self):
self.render("stat_feature.html")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["whole_data"][0]
whole_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
try:
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(whole_data)
f.close()
# average of each label,variance of each label
# label count,label
[ave, var, y_count, label_location, uniq_ele] = sf.calculate(
filename)
# remove data after calculating
except Exception, e:
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format.")
return
os.remove(filename)
self.render(
"stat_feature_result.html", ave=ave, var=var, y_count=y_count, label_location=label_location, uniq_ele=uniq_ele)
class RBFGammaHandler(tornado.web.RequestHandler):
def get(self):
self.render("rbf_gamma.html")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["rbf_gamma_data"][0]
rbf_gamma_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
try:
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(rbf_gamma_data)
f.close()
min_g = float(self.get_argument("gamma_min", "1"))
max_g = float(self.get_argument("gamma_max", "10"))
step = float(self.get_argument("gamma_step", "1"))
C = float(self.get_argument("C", "1"))
# min_g = 1.0e-8
# max_g = 2.0e-8
# step = 1e-9
# task of calculate with different gamma of RBF
rgTask = Task.RBFGammaTask(filename, min_g, max_g, step, C)
# remove data after calculating
except Exception, e:
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format or Selected Gammas")
return
os.remove(filename)
# create uniq session of each task
session = str(uuid4())
# register the task for requesting
self.application.taskTable.register(session, rgTask)
# run task on another thread
rgTask.start()
self.render(
"rbf_gamma_result.html", session=session)
class StateHandler(tornado.web.RequestHandler):
def get(self, input):
# get progress of task
if input == "progress":
# self.write('{"progress":"%.2f"}' % 1)
# return
# get uniq session
self.session = self.get_argument("session")
# print self.application.taskTable
# print self.session
# get progress of special task
progress = self.application.taskTable.getprogress(self.session)
# print progress
self.write('{"progress":"%.2f"}' % progress)
# get result of task
elif input == "result":
# self.write('{"axis":"%s","acc":"%s"}' % (str(range(20))[1:-1], str(range(20))[1:-1]))
# return
# get uniq session
self.session = self.get_argument("session")
# get result of special task
result = self.application.taskTable.getresult(self.session)
if result[0] != -1:
# getting result means task have done,so unregister the task
self.application.taskTable.unregister(self.session)
# in RBFGamma task,return axis of different gamma,acc of respective
# gamma
self.write('{"axis":"%s","acc":"%s"}' % (result[0], result[1]))
elif input == "knn_result":
# get uniq session
self.session = self.get_argument("session")
# get result of special task
result = self.application.taskTable.getresult(self.session)
if result[0] != -1:
# getting result means task have done,so unregister the task
self.application.taskTable.unregister(self.session)
# in RBFGamma task,return axis of different gamma,acc of respective
# gamma
self.write('{"axis":"%s","acc":"%s","te_location":"%s","trak_location":"%s","uniq_ele":"%s"}' % (
result[0], result[1], result[2], result[3], result[4]))
else:
return
class SoftMarginHandler(tornado.web.RequestHandler):
def get(self):
self.render("soft_margin.html")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["soft_margin_data"][0]
soft_margin_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
try:
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(soft_margin_data)
f.close()
min_sf = float(self.get_argument("soft_margin_min", "1"))
max_sf = float(self.get_argument("soft_margin_max", "10"))
step = float(self.get_argument("soft_margin_step", "1"))
kernel = self.get_argument("kernel", "Polynomial")
gamma = float(self.get_argument("gamma", "0"))
if min_sf <= 0 or min_sf >= max_sf:
raise Exception
# print min_sf
# print max_sf
# print step
# print kernel
# print gamma
# min_g = 1.0e-8
# max_g = 2.0e-8
# step = 1e-9
# task of calculate with different gamma of RBF
sfTask = Task.SoftMarginTask(
filename, min_sf, max_sf, step, kernel, gamma)
# remove data after calculating
except Exception, e:
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format or Selected Soft Margins")
return
os.remove(filename)
# create uniq session of each task
session = str(uuid4())
# register the task for requesting
self.application.taskTable.register(session, sfTask)
# run task on another thread
sfTask.start()
self.render(
"soft_margin_result.html", kernel=kernel, gamma=gamma, session=session)
class ForwardStepwiseHandler(tornado.web.RequestHandler):
def get(self):
self.render("forward_stepwise.html")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["forward_stepwise_data"][0]
forward_stepwise_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
try:
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(forward_stepwise_data)
f.close()
n_features = int(self.get_argument("number_of_features", "1"))
if n_features <= 0:
raise Exception
fsTask = Task.ForwardStepwiseTask(filename, n_features)
# remove data after calculating
except Exception, e:
print e
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format or Selected Soft Margins")
return
os.remove(filename)
# create uniq session of each task
session = str(uuid4())
# register the task for requesting
self.application.taskTable.register(session, fsTask)
# run task on another thread
fsTask.start()
self.render(
"forward_stepwise_result.html", session=session)
class KnnHandler(tornado.web.RequestHandler):
def get(self):
self.render("knn.html", test="test")
def post(self):
# get data ,return error page if it's empty
try:
fileinfo = self.request.files["knn_data"][0]
all_data = fileinfo["body"]
except KeyError:
self.render("error.html", error_info="Please Upload Your Data.")
return
# save data for calculate
filename = upload_dir + fileinfo["filename"]
while os.path.exists(filename):
filename = filename + str(random.randint(1, 9))
f = open(filename, "w")
f.write(all_data)
f.close()
try:
margin = int(self.get_argument("margin", "1"))
knnTask = Task.KnnTask(filename, margin)
except Exception, e:
os.remove(filename)
self.render(
"error.html", error_info="Please Check Your Data Format.")
return
os.remove(filename)
# create uniq session of each task
session = str(uuid4())
# register the task for requesting
self.application.taskTable.register(session, knnTask)
# run task on another thread
self.render(
"knn_result.html", session=session)
knnTask.start()
class ResultDataHandler(tornado.web.RequestHandler):
def get(self, filename, suffix):
try:
filename = filename + "." + suffix
f = open(download_dir + filename)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header(
'Content-Disposition', 'attachment; filename=' + 'result.mat')
self.write(f.read())
self.finish()
except Exception, e:
self.render("error.html", error_info="File\'s not found.")
class DeleteHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.filename = self.get_argument("filename")[1:]
def on_close(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def on_message(self, message):
pass
class Application(tornado.web.Application):
def __init__(self):
# get tasktable for administrating tasks
self.taskTable = Task.TaskTable()
handlers = [
(r"/", ClassifierHandler),
(r"/format/(\w+)", DataFormatHandler),
(r"/result/(\w+).(\w+)", ResultDataHandler),
(r"/delete", DeleteHandler),
(r"/stat_feature", StatFeatureHandler),
(r"/rbf_gamma", RBFGammaHandler),
(r"/soft_margin", SoftMarginHandler),
(r"/forward_stepwise", ForwardStepwiseHandler),
(r"/state/(\w+)", StateHandler),
(r"/knn", KnnHandler)
]
settings = {
'template_path': 'templates',
'static_path': 'static',
'debug': 'True'
}
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == "__main__":
tornado.options.parse_command_line()
app = Application()
server = tornado.httpserver.HTTPServer(app)
server.listen(8989)
tornado.ioloop.IOLoop.instance().start()
| |
"""Utilities for setting up a project's settings.
The default way to use this is to import and call :func:`init_settings`
in a project's settings module:
# project/top_level_package/settings.py
from arcutils.settings import init_settings
init_settings()
This adds a few default settings for bootstrapping purposes and then
loads the project's local settings--the django-local-settings variety.
Pass ``local_settings=False`` to :func:`init_settings` if the project
doesn't use django-local-settings.
"""
import base64
import inspect
import ipaddress
import os
from datetime import datetime, timedelta
from pkg_resources import get_distribution
from django import VERSION as DJANGO_VERSION
from django.conf import settings as django_settings
from django.utils import timezone
from local_settings import NO_DEFAULT, load_and_check_settings, LocalSetting, SecretSetting
from local_settings.settings import DottedAccessDict, Settings as LocalSettings
class _InternalIPsType:
"""Used to construct a convenient INTERNAL_IPS setting for dev.
An *instance* of this type considers any standard loopback or
private IP address a valid internal IP address.
"""
def __contains__(self, addr):
addr = ipaddress.ip_address(addr)
return addr.is_loopback or addr.is_private
INTERNAL_IPS = _InternalIPsType()
class UpTime:
__slots__ = ('start_time',)
def __init__(self, start_time):
self.start_time = start_time
@property
def current(self) -> timedelta:
"""Get current up time as a timedelta object."""
return timezone.now() - self.start_time
@property
def current_tuple(self) -> tuple:
"""Get current up time as (days, hours, minutes, seconds)."""
seconds = self.current.total_seconds()
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
days, hours, minutes, seconds = map(int, (days, hours, minutes, seconds))
return days, hours, minutes, seconds
def __str__(self) -> str:
"""Get up time as "{days}d {hours}h {minutes}m {seconds}s"."""
parts = []
days, hours, minutes, seconds = self.current_tuple
if days:
parts.append('{days:d}d')
if days or hours:
parts.append('{hours:d}h')
if days or hours or minutes:
parts.append('{minutes:d}m')
parts.append('{seconds:d}s')
string = ' '.join(parts)
string = string.format_map(locals())
return string
def __repr__(self) -> str:
return 'UpTime({self.current})'.format(self=self)
def init_settings(settings=None, local_settings=True, prompt=None, quiet=None, package_level=0,
stack_level=2, drop=(), settings_processors=()):
"""Initialize project settings.
Basic Usage
===========
By default, it's assumed that the project is structured like so,
with the settings module in the top level package::
project/
package/
__init__.py
settings.py
README
setup.py
It's also assumed that :func:`init_settings` will be called from the
global scope of the project's settings module::
# package/settings.py
from arcutils.settings import init_settings
init_settings()
A few default settings that are commonly used in local settings
files will be added (if not explicitly set before calling this
function):
- CWD (current working directory; primarily for use in
development)
- PACKAGE (top level project package)
- DISTRIBUTION (the Python distribution name; often the same as
PACKAGE but not always; defaults to PACKAGE)
- START_TIME (current date/time; will be an "aware" UTC datetime
object if the project has time zone support enabled)
- UP_TIME (an object that can be used to retrieve the current
up time)
If the project has additional local settings, they must be defined
*before* this function is called.
Advanced Usage
==============
Generally, you won't need to pass ``settings``, but if you do, it
should be a dict of settings as you'd get from calling ``globals()``
in the project's settings module.
If the settings module is in a sub-package, ``package_level`` will
need to be adjusted accordingly. If :func:`init_settings` is being
called from another function, ``stack_level`` will have to be
adjusted accordingly. See :func:`derive_top_level_package_name` for
more info about these args.
The ``PACKAGE`` settings will be derived based on the location of
the settings module this function is called from. If this isn't
working, ensure the ``package_level`` and ``stack_level`` options
are correct; or, set the ``PACKAGE`` setting explicitly before
calling this function::
PACKAGE = 'quickticket'
init_settings()
To drop unused default settings, specify a list of such settings via
the ``drop`` arg.
To process settings in any custom manner needed, pass a list of
functions via ``settings_processors``. Each processor will be passed
the settings to be manipulated as necessary.
"""
settings = settings if settings is not None else get_module_globals(stack_level)
def set_default(key, fn, *args, **kwargs):
if key not in settings:
settings[key] = fn(*args, **kwargs)
return settings[key]
def get_now():
# NOTE: We can't simply use Django's timezone.now() here because
# it accesses settings.USE_TZ, but at this point the settings
# may not be considered fully configured by Django, so we have
# to do this to avoid an ImproperlyConfigured exception.
use_tz = settings.get('USE_TZ', False)
if use_tz:
return datetime.utcnow().replace(tzinfo=timezone.utc)
return datetime.now()
set_default('CWD', os.getcwd)
set_default('PACKAGE', derive_top_level_package_name, package_level, stack_level + 1)
if local_settings:
init_local_settings(settings, prompt=prompt, quiet=quiet)
set_default('DISTRIBUTION', lambda: settings['PACKAGE'])
set_default('VERSION', lambda: get_distribution(settings['DISTRIBUTION']).version)
start_time = set_default('START_TIME', get_now)
set_default('UP_TIME', UpTime, start_time)
# Remove the MIDDLEWARE_CLASSES setting on Django >= 1.10, but only
# if the MIDDLEWARE setting is present *and* set.
if DJANGO_VERSION[:2] >= (1, 10):
if settings.get('MIDDLEWARE'):
settings.pop('MIDDLEWARE_CLASSES', None)
# Drop irrelevant settings.
for name in drop:
del settings[name]
for processor in settings_processors:
processor(settings)
return settings
def init_local_settings(settings, prompt=None, quiet=None):
"""Initialize the local settings defined in ``settings``.
Args:
settings (dict): A dict of settings as you'd get from calling
``globals()`` in a Django settings module.
prompt (bool): Whether to prompt for missing local settings.
quiet (bool): Squelch standard out when loading local settings.
.. note:: ``prompt`` and ``quiet`` are passed through to
:func:`local_settings.load_and_check_settings`.
.. note:: If your project has additional local settings, they must
be defined *before* this function is called.
"""
suggested_secret_key = base64.b64encode(os.urandom(64)).decode('utf-8')
defaults = {
'DEBUG': LocalSetting(False),
'ADMINS': LocalSetting([]),
'ALLOWED_HOSTS': LocalSetting([]),
'GOOGLE': {
'analytics': {
'tracking_id': LocalSetting(
None, doc='Enter Google Analytics tracking ID (UA-NNNNNNNN-N)'
),
},
},
'MANAGERS': LocalSetting([]),
'SECRET_KEY': SecretSetting(doc='Suggested: "{suggested_secret_key}"'.format(**locals())),
'DATABASES': {
'default': {
'ENGINE': LocalSetting('django.db.backends.postgresql'),
'NAME': LocalSetting(settings.get('PACKAGE', NO_DEFAULT)),
'USER': LocalSetting(''),
'PASSWORD': SecretSetting(),
'HOST': LocalSetting(''),
},
},
}
for k, v in defaults.items():
settings.setdefault(k, v)
settings.update(load_and_check_settings(settings, prompt=prompt, quiet=quiet))
def get_setting(name, default=NO_DEFAULT, settings=None):
"""Get setting for ``name``, falling back to ``default`` if passed.
``name`` should be a string like 'ARC.cdn.hosts' or 'X.Y.0'. The
name is split on dots into path segments, then the settings are
traversed like this:
- Set current value to django.conf.settings.{first segment}
- For each other segment
- Get current_value[segment] if current value is a dict
- Get current_value[int(segment)] if current value is a list
If the setting isn't found, the ``default`` value will be returned
if specified; otherwise, a ``KeyError`` will be raised.
``settings`` can be used to retrieve the setting from a settings
object other than the default ``django.conf.settings``.
:class:`local_settings.settings.DottedAccessDict` is used to
implement this functionality. See the django-local-settings project
for more details about settings traversal.
"""
if settings is None:
settings = django_settings
if not isinstance(settings, LocalSettings):
settings = DottedAccessDict(get_settings_dict(settings))
return settings.get_dotted(name, default)
class PrefixedSettings:
"""Read-only settings for a given ``prefix``.
Args:
prefix: An upper case setting name such as "CAS" or "LDAP"
defaults: A dict of defaults for the prefix
The idea is to make it easy to fetch sub-settings within a given
package.
For example::
>>> DEFAULT_CAS_SETTINGS = {
... 'base_url': 'https://example.com/cas/',
... # plus a bunch more CAS settings...
... }
>>> cas_settings = PrefixedSettings('CAS', DEFAULT_CAS_SETTINGS)
>>> cas_settings.get('base_url')
'https://example.com/cas/'
>>> cas_settings.get('logout_path', default='/default/logout/path')
'/default/logout/path'
See the ``cas``, ``ldap``, and ``masquerade`` packages for concrete
examples of how this is used.
"""
def __init__(self, prefix, defaults=None, settings=None):
defaults = get_settings_dict(defaults)
settings = get_settings_dict(settings if settings is not None else django_settings)
self.__prefix = prefix
self.__defaults = DottedAccessDict(defaults)
self.__settings = DottedAccessDict(settings)
def get(self, name, default=NO_DEFAULT):
"""Get setting for configured ``prefix``.
Args:
name: setting name without ``prefix``
default: value to use if setting isn't present in the
project's settings or in the ``defaults``
Returns:
object: Value of setting
Attempt to get setting from:
1. Project settings for ``prefix``
2. Default settings from ``defaults``
3. ``default`` arg
Raises:
KeyError: When the setting isn't found in the project's
settings or in the ``defaults`` and no fallback is
passed via the ``default`` keyword arg
"""
qualified_name = '{prefix}.{name}'.format(prefix=self.__prefix, name=name)
try:
return self.__settings.get_dotted(qualified_name)
except KeyError:
return self.__defaults.get_dotted(name, default=default)
def __getitem__(self, key):
return PrefixedSettings.get(self, key, NO_DEFAULT)
# Internal helper functions
def get_settings_dict(settings):
"""For a given settings object, return a dict.
Args:
settings (object): Usually either a Django settings object or
a dict; can also be a sequence that can be converted to
a dict or some other non-dict mapping
Returns:
empty dict: ``settings`` is ``None``
vars(settings._wrapped): ``settings`` is (or appears to be)
a Django settings object
dict(settings): ``settings`` is any other type of object
"""
if settings is None:
return {}
if hasattr(settings, '_wrapped'):
# A Django settings object
# TODO: Find a better way to check for Django settings?
return vars(settings._wrapped)
return dict(settings)
def derive_top_level_package_name(package_level=0, stack_level=1):
"""Return top level package name.
Args:
package_level (int): How many package levels down the caller
is. 0 indicates this function is being called from the top
level package, 1 indicates that it's being called from a
sub-package, etc.
stack_level (int): How many levels down the stack the caller is
from here. 1 indicates this function is being called from
module scope, 2 indicates this function is being called from
another function, etc.
This will first get the package name of the module containing the
caller. ``package_level`` segments will be then be chopped off of
the package name.
If this is called from a sub-package, ``package_level`` will have to
be adjusted accordingly (add 1 for each sub-package).
If this is called indirectly (e.g., via :func:`init_settings`)
``stack_level`` will have to be adjusted accordingly (add 1 for each
nested function).
"""
assert package_level >= 0, 'Package level should be greater than or equal to 0'
assert stack_level > 0, 'Stack level should be greater than 0'
frame = inspect.stack()[stack_level][0]
package = frame.f_globals['__package__']
package = package.rsplit('.', package_level)[0]
return package
def get_module_globals(stack_level=2):
frame = inspect.stack()[stack_level][0]
return frame.f_globals
| |
# Copyright 2017 Jeffrey A. Wolf
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------
"""Create small cropped images
"""
import numpy as np
import os
from landscape.raster import raster
class Patches(object):
"""Patches class is a sequence of Patch objects
"""
def __init__(self, image_filename, labels_filename, size):
"""Initialize a Patches object
The image at image_filename and the labels at labels_filename
must have the same projection, geotransform, and extent.
Args:
image_filename: filename representing path to image
labels_filename: filename representing path to labels
size: `int` size of window side
Returns:
None
Raises:
AssertionError
"""
self._image = None
self._labels = None
self._image_metadata = None
self._labels_metadata = None
self._size = None
self._offset = None
self._labeled_indices = None # initialized in __iter__()
self._count = None # initialized in __iter__()
self._max_iter = None # initialized in __iter__()
# assert valid files
assert os.path.exists(image_filename), ("image file not found")
assert os.path.exists(labels_filename), ("labels file not found")
# assert equal metadata
image_metadata = raster.Metadata(image_filename)
labels_metadata = raster.Metadata(labels_filename)
assert image_metadata == labels_metadata, (
"Metadata are not equivalent. " +
"Try `gdalinfo` on the files. " +
"Look at the docstring for `raster.Metadata.__eq__()`.")
assert labels_metadata.ndv is not None, (
"labels metadata ndv is None")
self._image_metadata = image_metadata
self._labels_metadata = labels_metadata
# asserts on image and labels np.ndarrays
image = raster.load_image(image_filename)
labels = raster.load_image(labels_filename)
assert isinstance(image, np.ndarray), (
"image must be a numpy.ndarray")
assert len(image.shape) == 3, (
"image must be an numpy.ndarray with shape (H,W,D)")
assert isinstance(labels, np.ndarray), (
"labels must be a numpy.ndarray")
assert len(labels.shape) == 3, (
"lables must be an numpy.ndarray with shape (H,W,D)")
# test if shape of both is equal on H,W axes
assert image.shape[0] == labels.shape[0], (
"Image and label height is different")
assert image.shape[1] == labels.shape[1], (
"Image and label height is different")
self._image = image
self._labels = labels
# assert on size
assert isinstance(size, int), ("size must be an integer")
assert size % 2 == 1, ("size must be an odd integer")
assert size > 1, ("size must be an integer >1")
self._size = size
self._offset = self.size // 2
@property
def image(self):
"""The image `np.ndarray` with shape (H,W,D)
"""
return self._image
@property
def labels(self):
"""The labels `np.ndarray` with shape (H,W,D)
"""
return self._labels
@property
def image_metadata(self):
"""The image `Metadata` object
"""
return self._image_metadata
@property
def labels_metadata(self):
"""The labels `Metadata` object
"""
return self._labels_metadata
@property
def size(self):
"""The `int` size of the side length.
Must be an odd `int`
"""
return self._size
@property
def offset(self):
"""The `int` offset derived from self._size//2
An even integer
"""
return self._offset
@property
def labeled_indices(self):
"""An indices iterator to access labeled pixels
"""
return self._labeled_indices
def _calculate_origin(self, origin, resolution, offset, index):
"""Calculate new origin
Args:
origin: `float`
resolution: `float` that can be positive or negative
offset: `int` pixel offset
index: `int` index
Returns:
new origin `float`
Raises:
AssertionError
"""
assert isinstance(index, int)
assert isinstance(offset, int)
resolution_string = str(resolution)
parts = resolution_string.split(".")
if len(parts) == 2:
precision = len(parts[1])
else:
precision = 0
# calculate difference
difference = (index - offset) * resolution
origin += difference
return round(origin, precision)
def _build_geotransform(self, i, j):
"""Build geotransform for an image patch
Args:
i: `int` row index
j: `int` column index
Returns:
GDAL geotransform for `Metadata` object
Raises:
AssertionError
"""
assert isinstance(i, int), ("i is not an integer")
assert isinstance(j, int), ("j is not an integer")
x_origin, x_res, x_ignore, y_origin, y_ignore, y_res = (
self.image_metadata.geotransform)
# integer conversion to reduce floating point error
new_x_origin = self._calculate_origin(x_origin, x_res, self.offset, j)
new_y_origin = self._calculate_origin(y_origin, y_res, self.offset, i)
geotransform = (new_x_origin, x_res, x_ignore, new_y_origin,
y_ignore, y_res)
return geotransform
def _patch_metadata(self, i, j):
"""Build metadata for an image patch
Uses self.image_metadata as the metadata source. Modifies
the geotransform, x, and y size. Keeps the same projection,
datatype, and ndv.
Args:
i: `int` row index into image and labels `np.ndarray`
j: `int` col index into image and labels `np.ndarray`
Returns:
`raster.Metadata` object
Raises:
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
# modify the geotransform
geotransform = self._build_geotransform(i, j)
# modify the x and y size
x, y = self.size, self.size
# projection
projection = self.image_metadata.projection
# datatype
datatype = self.image_metadata.datatype
# ndv
ndv = self.image_metadata.ndv
metadata = raster.Metadata()
metadata.set(x, y, projection, geotransform, datatype, ndv)
return metadata
def _patch_image(self, i, j):
"""Build an image patch
Args:
i: `int` row index into image and labels `nd.ndarray`
j: `int` row index into image and labels `nd.ndarray`
size:
Returns:
`np.ndarray`
Raises:
AssertionError
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
imin, imax = i - self.offset, i + self.offset + 1
jmin, jmax = j - self.offset, j + self.offset + 1
image = self.image[imin:imax, jmin:jmax, :]
return image
def _patch_label(self, i, j):
"""Get patch label
Args:
i: index i
j: index j
Returns:
label
"""
assert isinstance(i, int), ("i is not an integer")
assert i >= 0, ("i must be >= 0")
assert isinstance(j, int), ("j is not an integer")
assert j >= 0, ("j must be >= 0")
band = 0 # currently supports 1 band labels
label = self.labels[i, j, band]
return label
def __iter__(self):
"""Initialize an iterator
"""
# height and width
shape = self.labels.shape[:2] # equivalently use self.image.shape[:2]
# rows (H,W) `np.ndarray` and columns (H,W) `np.ndarray`
rows, columns = np.indices(shape)
ndv = self.labels_metadata.ndv
# an (H,W,D) `np.ndarray`, labels must be 1 band
band = 0
valid = self.labels[:,:,band] != ndv
# valid rows
valid_rows = rows[valid]
valid_columns = columns[valid]
# randomize - should use seed
# equivalently could use valid_columns.shape
n_valid_rows = valid_rows.shape[0]
indices = np.arange(n_valid_rows)
np.random.shuffle(indices)
self._labeled_indices = np.vstack(
(valid_rows[indices], valid_columns[indices])).T
self._labeled_indices.astype(int)
self._count = 0
self._max_iter = n_valid_rows
return self
def __next__(self):
"""Next patch from the iterator
Args:
None
Returns:
`Patch` object
Raises:
StopIteration
"""
if self._count == self._max_iter:
raise StopIteration
i_npint64, j_npint64 = self._labeled_indices[self._count,:]
# alternative to explicit casting is to
# broaden the integer types accepted by the assert clauses
i, j = int(i_npint64), int(j_npint64)
image = self._patch_image(i, j)
label = self._patch_label(i, j)
metadata = self._patch_metadata(i, j)
patch = Patch(image, label, metadata, self.size)
self._count += 1
return patch
def __len__(self):
"""The number of `Patch` objects
Args:
None
Returns:
`int` number of `Patch` objects in `Patches` object
"""
#initialize self._max_iter
if self._max_iter is None:
# self._max_iter is initialized in __iter__()
iter(self)
return self._max_iter
class Patch(object):
"""Patch
"""
def __init__(self, image, label, metadata, size):
"""Initialize a `Patch` object
Args:
image: a `np.ndarray` of shape (H,W,D)
label: an `int` or `float` type
metadata: a `raster.Metadata` object
size: `int` number of pixels along one axis
Returns:
None
Raises:
AssertionError
"""
self._image = None
self._label = None
self._metadata = None
self._size = None
assert isinstance(image, np.ndarray), ("image must be a numpy.ndarray")
assert len(image.shape) == 3, (
"image must be an numpy.ndarray with shape (H,W,D)")
self._image = image
# label assertion
# need to figure out how to better support np dtypes
#assert isinstance(label, float) or isinstance(label, int), (
# "Patch class currently supports only float or int labels")
self._label = label
# metadata assertion
assert isinstance(metadata, raster.Metadata)
self._metadata = metadata
# size
height, width = self._image.shape[:2]
assert size == width, ("Size and width of image are not equal")
assert size == height, ("Size and height of image are not equal")
self._size = size
@property
def image(self):
"""The image
"""
return self._image
@property
def label(self):
"""The label
"""
return self._label
@property
def metadata(self):
"""The metadata
"""
return self._metadata
@property
def size(self):
"""The size
"""
return self._size
def save_image(self, filename):
"""Save a patch as a raster file
Args:
filename: a valid path for a new file
Returns:
None
"""
raster.save_image(filename, self.image, self.metadata)
def __str__(self):
"""String containing image and label
"""
return str(self.image) + "\n" + str(self.label)
| |
from . quote_fields import quote_definitions, quote_dtypes
from io import StringIO
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from requests import Session
# TODO look into read_csv use_cols option for speedups
# TODO Fix doc comment formatting on methods
class ActiveTick:
def __init__(self, host='127.0.0.1', port=5000, cache=False):
# Active tick HTTP proxy config
self.host = host
self.port = port
self.r = Session()
self.cache = cache
self._date_fmt = '%Y%m%d%H%M%S'
# Contains generator for stream once requested
self.stream_ = None
def _date_wrap(self, date):
# wrapper to allow for np.datetime64 and convert to string
ts = pd.to_datetime(str(date))
return ts.strftime(self._date_fmt)
def _format_symbols(self, symbols):
"""
symbols - string (returns unchanged) or list of symbols to concat with + symbols (string, list)
Formats list of symbols for ActiveTick URL
:param symbols:
:return:
String
"""
if not isinstance(symbols, str):
symbols = '+'.join(symbols)
return symbols
def _date_parser(self, date_format):
"""
Date parser function factory for pandas csv parsing of ActiveTick data
:param date_format:
Format used for parsing date (string)
:return:
(list of datetimes)
"""
def date_parser(dates):
return [ datetime.strptime(date, date_format) for date in dates ]
return date_parser
def quoteData(self, symbols, quoteFields):
"""
symbols - Symbol (or iterable of multiple symbols) for contracts, ie SPY, AAPL--130308C00440000 (string, iter)
quote_fields - List of all fields of interest (string, list)
# Example:
# look to quoteFields.py for the lookup table used and available fields
atClient.quoteData('SPY', ['LastPrice' , 'BidSize', 'AskSize'])
# returns pandas DataFrame with columns named
:return:
pandas.DataFrame() indexed on the symbol column with columns with requested quoteFields
with extra status meta data regarding the request and symbols, to just get a DataFrame
with the requested fields quoteData('SPY', fields)[fields]
"""
names = ['symbol', 'symbol_status']
def __name_fmt(names, field):
names += ["{f}_field_id".format(f=field),
"{f}_status".format(f=field),
"{f}_datatype".format(f=field),
"{f}".format(f=field)]
return names
if not isinstance(quoteFields, str):
# Create column names from quoteFields
for field in quoteFields:
names = __name_fmt(names, field)
# TODO: Declare specific dtypes for each column in names
# Translate from human readable quoteFields to IDs
quoteFields = map(lambda field: quote_definitions[field], quoteFields)
quoteFields = '+'.join(quoteFields)
else:
# Only one quoteField as string
names = __name_fmt(names, quoteFields)
quoteFields = quote_definitions[quoteFields]
url = "http://{host}:{port}/quoteData?symbol={symbols}&field={quoteFields}".format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols),
quoteFields=quoteFields
)
# GET request is made and the CSV is read into a Pandas DataFrame
df = pd.read_csv(url, header=None, names=names, index_col='symbol')
return df
def quoteStream(self, symbols, timeout=None):
"""
symbols - string or iter of symbols
# Example
# res is an instance of requests iter_lines()
res = at.quoteStream('SPY')
for quote in res:
print(quote)
:param timeout:
integer, how many seconds to keep connection open
:return:
returns lazy iterator see requests iter_lines() that can be looped over to access streaming data
"""
# TODO: Start, pause, stop quote stream
def __tickParse(tick):
tick = tick.decode('utf-8')
if tick[0] is 'Q':
names = ['type', 'symbol', 'cond', 'bid_ex', 'ask_ex', 'bid', 'ask', 'bidz', 'askz', 'datetime']
dtype = {'type': object,
'symbol': object,
'cond': np.uint8,
'bid_ex': object,
'ask_ex': object,
'bid': np.float32,
'ask': np.float32,
'bidz': np.uint32,
'askz': np.uint32,
'datetime': object}
else:
names = ['type', 'symbol', 'flags', 'cond1', 'cond2', 'cond3', 'cond4', 'last_ex', 'last', 'lastz',
'datetime']
dtype = {
'type': object,
'symbol': object,
'flags': object,
'cond1': np.int8,
'cond2': np.int8,
'cond3': np.int8,
'cond4': np.int8,
'last_ex': object,
'last': np.float32,
'lastz': np.uint32
}
date_format = '%Y%m%d%H%M%S%f'
parse_date = self._date_parser(date_format)
return pd.read_csv(StringIO(tick), names=names, index_col='type', dtype=dtype,
parse_dates=['datetime'], date_parser=parse_date)
url = 'http://{host}:{port}/quoteStream?symbol={symbols}'.format(
host=self.host,
port=self.port,
symbols=self._format_symbols(symbols)
)
self.stream_ = self.r.get(url, stream=True, timeout=timeout)
pandas_stream = map(__tickParse, self.stream_.iter_lines())
first_line = next(pandas_stream)
return pandas_stream
def barData(self, symbol, historyType='I', intradayMinutes=60,
beginTime=datetime(datetime.now().year, datetime.now().month, 1), endTime=datetime.now()):
"""
:param symbol:
Takes only one symbol, string
:param historyType:
Takes 'I', 'D' or 'W' as a string (Intraday 0, Daily 1 or Weekly 0)
:param intradayMinutes:
If historyType is 'I' select a bar size: 0 to 60 minutes (int)
:param beginTime:
Beginning date for query (datetime)
:param endTime:
Ending date for query (datetime)
:return:
Pandas DataFrame OHLCV indexed on the datetime
"""
history_lookup = {
'I': 0,
'D': 1,
'W': 2
}
def __getIntradayMinutesAttr():
# Returns URL segment for intraday minutes if needed
if historyType is not 'I':
attr_str = ''
else:
attr_str = 'intradayMinutes={intradayMinutes}&'.format(intradayMinutes=str(intradayMinutes))
return attr_str
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = "AT:BARDATA:{symbol}:{historyType}:{intradayMinutes}:{beginTime}:{endTime}"
cache_key = cache_key.format(
symbol=symbol,
historyType=history_lookup[historyType],
intradayMinutes=intradayMinutes,
beginTime=beginTime_s,
endTime=endTime_s)
# If the data is cached
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
url = 'http://{host}:{port}/barData?symbol={symbol}&historyType={historyType}' \
'&{intradayMintuesAttr}beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
historyType=history_lookup[historyType],
intradayMintuesAttr=__getIntradayMinutesAttr(),
beginTime=beginTime_s,
endTime=endTime_s)
dtypes = {'datetime': object,
'open': np.float32,
'high': np.float32,
'low': np.float32,
'close': np.float32,
'volume': np.uint32}
df = pd.read_csv(url, header=None, names=['datetime', 'open', 'high', 'low', 'close', 'volume'],
index_col='datetime', parse_dates=['datetime'], dtype=dtypes)
# Cache the data
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def tickData(self, symbol, trades=False, quotes=True,
beginTime=datetime.now() - timedelta(minutes=15),
endTime=datetime.now()):
"""
Gets tick level data in between a time range, limited to returning 100,000 quotes/trades at a time
:param symbol:
String, ticker for symbol in ActiveTick format
:param trades:
Boolean, whether to return trade ticks
:param quotes:
Boolean whether to return quote ticks
:param beginTime:
datetime beginning of date range
:param endTime:
datetime end of date range
:return:
"""
tick_date_fmt = '%Y%m%d%H%M%S%f'
date_parser = self._date_parser(tick_date_fmt)
q_names = ['type',
'datetime',
'bid',
'ask',
'bidz',
'askz',
'bidx',
'askx',
'cond']
t_names = ['type',
'datetime',
'last',
'lastz',
'lastx',
'cond1',
'cond2',
'cond3',
'cond4']
def __get_trades(df):
trades_df = df[df[0] == 'T'].copy()
trades_df.columns = t_names
trades_df.loc[:, 'last'] = trades_df.loc[:, 'last'].astype(np.float32)
trades_df.loc[:, 'lastz'] = trades_df.loc[:, 'lastz'].astype(np.uint32)
trades_df.loc[:, ['cond1', 'cond2', 'cond3', 'cond4']] = trades_df.loc[:, ['cond1',
'cond2',
'cond3',
'cond4']].astype(np.uint8)
return trades_df
def __get_quotes(df):
quotes_df = df[df[0] == 'Q'].copy()
quotes_df.columns = q_names
quotes_df.loc[:, ['bid', 'ask']] = quotes_df.loc[:, ['bid', 'ask']].astype(np.float32)
quotes_df.loc[:, ['bidz', 'askz']] = quotes_df.loc[:, ['bidz', 'askz']].astype(np.uint32)
quotes_df.loc[:, 'cond'] = quotes_df.loc[:, 'cond'].astype(np.uint8)
return quotes_df
def __at_request(url, names):
if(names):
date_col = 'datetime'
else:
date_col = 1
del q_names[1]
del t_names[1]
try:
df = pd.read_csv(url, header=None,
engine='c',
index_col=date_col,
parse_dates=[date_col],
names=names,
date_parser=date_parser)
return df
except Exception as e:
print('caught exception:', e)
print('No or malformed data: ', url)
return pd.DataFrame()
if not trades and not quotes:
return pd.DataFrame()
beginTime_s = self._date_wrap(beginTime)
endTime_s = self._date_wrap(endTime)
cache_key = 'AT:TICKDATA:{symbol}:{trades}:{quotes}:{beginTime}:{endTime}'
cache_key = cache_key.format(
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Return cached data
if self.cache and self.cache.exists(cache_key):
return pd.read_msgpack(self.cache.get(cache_key))
# Retrieve data not found in cache
else:
url = 'http://{host}:{port}/tickData?symbol={symbol}&trades={trades}' \
'"es={quotes}&beginTime={beginTime}&endTime={endTime}'
url = url.format(
host=self.host,
port=self.port,
symbol=symbol,
trades=int(trades),
quotes=int(quotes),
beginTime=beginTime_s,
endTime=endTime_s
)
# Quote column names
if quotes and not trades:
df = __at_request(url, q_names)
# Trade columns names
if trades and not quotes:
df = __at_request(url, t_names)
if trades and quotes:
df = __at_request(url, None)
if not df.empty:
df = __get_trades(df).append(__get_quotes(df)).sort_index(axis=0)
if self.cache:
self.cache.set(cache_key, df.to_msgpack(compress='zlib'))
return df
def optionChain(self, symbol):
"""
Returns unnamed pandas dataframe of option symbols currently listed for underlying symbol
:param symbol:
String, ticker symbol for underlying
:return:
Raw unnamed dataframe from ActiveTick
"""
url = 'http://{host}:{port}/optionChain?symbol={symbol}'.format(
host=self.host,
port=self.port,
symbol=symbol)
df = pd.read_csv(url)
return df
__version__ = '0.12.1'
__url__ = 'https://github.com/uberscientist/activetick_http'
if __name__ == '__main__':
print('ActiveTick Python Module' + __version__ +
', attaches to ActiveTick HTTP Proxy, returns Pandas DataFrames.\n'
'http://www.activetick.com/activetick/contents/PersonalServicesDataAPIDownload.aspx',
'Git repo:' + __url__,
'Uses pytest for tests.\n',
'Has optional (recommended) Redis (http://redis.io) caching built in..', sep='\n')
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import concurrent.futures
import logging
from time import sleep
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
from jinja2 import Template
class Runner(object):
def __init__(self, test_suite, salt_api, max_iterations=25, sleep_duration=0.1):
self.test_suite = test_suite
self.application_logger = logging.getLogger('nuts-application')
self.test_report_logger = logging.getLogger('nuts-test-report')
self.api = salt_api
self.max_iterations = max_iterations
self.sleep_duration = sleep_duration
def _collect_result(self, test_case):
counter = 0
not_contained = True
while not_contained and (counter < self.max_iterations):
salt_result = self.api.get_task_result(taskid=test_case.job_id)
not_contained = False
self.application_logger.debug('jid %s counter %s salt_result %s', test_case.job_id, counter, salt_result)
for minion in test_case.minions:
if minion not in salt_result['return'][0]:
not_contained = True
sleep(self.sleep_duration)
counter += 1
return_value = self._extract_return(salt_result)
test_case.set_actual_result(return_value)
if hasattr(test_case, 'teardown_tasks'):
self.test_report_logger.debug('%s has %d teardown tasks', test_case.name, len(test_case.teardown_tasks))
self._start_tasks(test_case.teardown_tasks, test_case.saved_data)
def _start_test_async(self, test_case):
try:
saved_data = {}
if hasattr(test_case, 'setup_tasks'):
self.test_report_logger.debug('%s has %d setup tasks', test_case.name, len(test_case.setup_tasks))
saved_data = self._start_tasks(test_case.setup_tasks)
test_case.saved_data = saved_data
task = self.create_test_task(test_case.devices, test_case.command, test_case.parameter, saved_data)
task_information = self.api.start_task_async(task)
test_case.set_job(task_information['return'][0]['jid'])
test_case.set_minions(task_information['return'][0]['minions'])
self.application_logger.debug(task_information)
return True
except URLError as e:
self.application_logger.exception('Failed to start test case. Salt API URLError: %s', e.args[0].strerror)
self.test_report_logger.debug(e)
return False
except KeyError as e:
self.application_logger.exception('Failed to start test case. Probably devices match no minions')
self.test_report_logger.debug(e)
return False
except Exception as e:
self.application_logger.exception('Failed to start test case. Exception: %s', e.message)
self.test_report_logger.debug(e)
return False
def _start_test_sync(self, test_case):
if hasattr(test_case, 'setup_tasks'):
self.test_report_logger.debug('%s has %d setup tasks', test_case.name, len(test_case.setup_tasks))
saved_data = self._start_tasks(test_case.setup_tasks)
test_case.saved_data = saved_data
self.test_report_logger.debug('%s start sync test', test_case.name)
result = self._get_task_result(test_case, saved_data)
test_case.set_minions(result.keys())
test_case.set_actual_result(result)
if hasattr(test_case, 'teardown_tasks'):
self.test_report_logger.debug('%s has %d teardown tasks', test_case.name, len(test_case.teardown_tasks))
self._start_tasks(test_case.teardown_tasks, saved_data)
def _start_tasks(self, tasks, result=None):
if result is None:
result = {}
for task in tasks:
save = task.pop('save', None)
parameter = self.create_test_task(render_data=result, **task)
response = self.api.start_task(parameter)
self.test_report_logger.debug('%s %s returned %s', parameter['function'], parameter['arguments'], response)
if not len(response['return'][0]):
raise Exception('No device responding. devices: {}, command: {}'.format(task['devices'],
task['command']))
for minion, value in response['return'][0].items():
if value is None:
raise Exception('No response value from minion {}'.format(minion))
elif 'is not available' in value:
raise Exception('Command {} not available on {}'.format(task['command'], minion))
elif 'Passed invalid arguments' in value:
raise Exception('Passed invalid arguments for {} on {}. Arguments: {}'.format(task['command'],
minion,
task['parameter']))
if save:
# Normally no one minion will answer to a saved task. In this case the value is directly saved
# in a dictionary. On multiple answers, the minion will be the dictionary key to access the value.
# Multiple answer example for `save: ip`: `result['ip']['minion_name']`
# One answer example for `save: ip`: `result['ip']`
# The test write has to know when more then one minion will response.
try:
if len(response['return'][0]) == 1:
result[save] = response['return'][0].popitem()[1]
elif len(response['return'][0]) > 1:
result[save] = response['return'][0]
except KeyError:
pass
return result
@staticmethod
def create_test_task(devices, command, parameter=None, render_data=None):
if parameter is None:
parameter = []
if render_data is None:
render_data = {}
devices = Template(devices).render(render_data)
command = Template(command).render(render_data)
parameter = list(map(lambda x: Template(x).render(render_data), parameter))
if '.' not in command:
command = 'nuts.{}'.format(command)
task = {
'targets': devices,
'function': command,
'arguments': parameter
}
return task
def _get_task_result(self, test_case, saved_data):
result = ''
task = self.create_test_task(test_case.devices, test_case.command, test_case.parameter, saved_data)
try:
result = self.api.start_task(task)
self.application_logger.debug('%s returned %s ', test_case.name, result)
if 'ERROR' in result:
raise Exception('A salt error occurred!\n' + result)
return self._extract_return(result)
except Exception as e:
self.application_logger.exception('Error with %s \nSalt-Error: %s ', task, result)
self.test_report_logger.exception(e)
return {
'resulttype': 'single',
'result': 'ERROR'
}
@staticmethod
def _extract_return(result):
'''This helper extracts the returnvalue from the result
At the moment it only expects one return value for each task'''
result_dict = result['return'][0]
return {k: Runner._extract_result_entry(v) for k, v in result_dict.items()}
@staticmethod
def _extract_result_entry(result_entry):
if result_entry is None:
return {
'resulttype': 'single',
'result': None
}
else:
return result_entry
def run_all(self):
try:
self.api.connect()
except URLError as e:
self.application_logger.exception('Failed to connect to the server. Salt API URLError: %s',
e.args[0].strerror)
self.test_report_logger.debug(e)
exit(1)
# Run async tests
started_counter = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = []
for test in self.test_suite.test_cases_async:
self.application_logger.info('Start test ' + test.name)
futures.append(executor.submit(self._start_test_async, test))
for x in concurrent.futures.as_completed(futures):
if not x.result():
self.application_logger.error('Error starting async test')
executor.shutdown(wait=False)
exit(1)
started_counter += 1
self.application_logger.info('Started test %s of %s', started_counter,
len(self.test_suite.test_cases_async))
test_counter = 0
self.application_logger.info('----------------Started all tests-----------------')
for test in self.test_suite.test_cases_async:
self.application_logger.info('CollectResult of Test ' + test.name)
self._collect_result(test)
test_counter += 1
self.application_logger.info('Collected results from %s of %s tests', test_counter,
len(self.test_suite.test_cases_async))
self.application_logger.info('--------------Collected all results---------------')
# Run sync tests
for test in self.test_suite.test_cases_sync:
self.application_logger.info('Start Test ' + test.name)
self._start_test_sync(test)
self.application_logger.info('\n')
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Penguin Computing, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import logging
import cookielib
import requests
try:
import simplejson as json
except ImportError:
import json
from beowebclient.common.client import HTTPClient
from beowebclient.common import exceptions as beowebexc
LOG = logging.getLogger(__name__)
class BeowebClient(HTTPClient):
"""Class that implements the Beoweb API
Create an instance with the Beoweb connection details::
>>> client = BeowebClient(UNAME, HOSTNAME)
Then login to gain a session ID and call methods
>>> client.password_login(PASSWORD)
>>> client.get_jobs()
"""
def __init__(self, user, beoweb_host, ssl=True, port=None, cjfile=None):
super(BeowebClient, self).__init__(user, beoweb_host, ssl=ssl,
port=port)
self.client.params.update({'format': "json"})
self.cjfile = None
if cjfile:
self.cjfile = cjfile
self.cj = cookielib.LWPCookieJar(self.cjfile)
if os.path.isfile(self.cjfile):
try:
self.cj.load(self.cjfile)
except cookielib.LoadError as e:
LOG.error(("Problem occured loading CookieJar file: %s, "
"error: %s"), self.cjfile, e)
LOG.error("Attempting to destroy and recreate file")
os.remove(self.cjfile)
self.client.cookies = self.cj
def close(self):
"""finalize any connections with Beoweb"""
if self.cjfile:
self.cj.save(filename=self.cjfile)
self.client.close()
def password_login(self, password):
"""Attempt to gain a session ID from beoweb using username/password
Requires the "crypt" library, which is only found on *nix OS's
Returns beoweb results if no error is found. Beoweb sets the session
ID in a cookie, so the session ID returned may not be needed depending
on configuration
:param password: plaintext string containing password to send
:returns: Beoweb JSON data
"""
try:
from crypt import crypt
except ImportError, e:
LOG.critical("Unable to use crypt library.")
raise beowebexc.BeowebAuthError(
"Unable to use crypt library. Password auth not supported")
params = {"user": self.user}
try:
resp = self.request("auth/request_public_keys", "GET",
params=params)
except requests.exceptions.HTTPError:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
keys = json.loads(resp.text)
mode = keys['data']['mode']
ts = keys['data']['tempSalt']
ps = keys['data']['permSalt']
#Basically, turn your plaintext pass into the shadow-crypted pass,
#then crypt that with the temp salt.
passcode = crypt(password, "$" + str(mode) + "$" + ps)
passcode = passcode.split("$")[3]
passcode = crypt(passcode, "$" + str(mode) + "$" + ts)
passcode = passcode.split("$")[3]
data = {"user": self.user, "password": passcode}
try:
resp = self.request("auth/login", "POST", data=data)
except requests.exceptions.HTTPError:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
results = json.loads(resp.text)
if not results['success']:
raise beowebexc.BeowebAuthError(results['error'])
return results
def cloudauth_login(self, auth_url, key, secret):
"""Attempt to gain a session ID from beoweb using CloudAuth Tokens
Requires the cloudauthclient library
Use the user's CloudAuth API key and secret to get a token
from the given CloudAuth server, then provide the system username
and token to beoweb
Returns beoweb results if no error is found. Beoweb sets the session
ID in a cookie, so the session ID returned may not be needed depending
on configuration
:param auth_url: https URL for CloudAuth server
:param key: API Key
:param secret: API Secret
:returns: Beoweb JSON data
"""
try:
from cloudauthclient.v1.client import Client
from cloudauthclient import exceptions as authexc
except ImportError as e:
LOG.critical("Unable to import CloudAuth library")
raise beowebexc.BeowebAuthError(
"Unable to import CloudAuth library.")
# Get token from cloudauth
try:
authclient = Client(auth_url, key, secret)
authclient.authenticate()
except authexc.AuthenticationError as e:
raise beowebexc.BeowebAuthError(
"Unable to get CloudAuth Token")
# log in to beoweb with token
LOG.debug("Successfully aquired token from cloudauth: %s",
authclient.token)
data = {"beoweb_user": self.user,
"auth_token": authclient.token}
try:
resp = self.request("cloud_auth/login", "POST", data=data)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
results = json.loads(resp.text)
if not results['success']:
raise beowebexc.BeowebAuthError(results['error'])
return results
def sshkey_login(self, auth_host, auth_port):
"""Attempt to gain a session ID from beoweb using SSH public key
Requires the paramiko library
Connects to the given beoweb auth host/port and sends the system
username and any found SSH public keys.
Return the Session ID upon success -- this auth method will not
result in beoweb setting the Session ID in a cookie, so the Session ID
is automatically added to the param string of all subsequent reqeusts
to Beoweb
:param auth_host: hostname of SSH server to connect to
:param auth_port: TCP port of SSH server to connect to
:returns: new session ID from Beoweb
"""
try:
from paramiko import SSHClient, AutoAddPolicy
from paramiko import AuthenticationException
except ImportError:
LOG.critical("Unable to import Paramiko library")
raise beowebexc.BeowebAuthError(
"Unable to import Paramiko library.")
try:
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
LOG.debug("SSH Auth connecting to %s:%s, user: %s",
auth_host, auth_port, self.user)
client.connect(auth_host,
port=auth_port,
username=self.user)
msg = client.get_transport().global_request("session_id",
wait=True)
client.close()
if msg:
sid = msg.get_string()
self.client.params.update({'session_id': sid})
else:
LOG.critical(("Authentication was successful, "
"but no session ID returned"))
raise beowebexc.BeowebAuthError(
"Unable to get session ID from beoweb")
return sid
except AuthenticationException:
raise beowebexc.BeowebAuthError(
"SSH Key authentication failed")
finally:
try:
client.close()
except:
pass
def logout(self, session_id=None):
"""Logs out of beoweb, invalidating the session
:param session_id: optional explicit session_id to, for use if
session is not already in a cookie.
"""
params = {}
if session_id:
params = {"session_id": session_id}
try:
self.request("auth/logout", "GET", params=params)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
def delete_jobs(self, jobs):
"""Deletes each of the jobs found in jobs
:param jobs: list of job ids to delete
:returns: JSON data structure from Beoweb
Example:
>>> beoclient.delete_jobs(["123.pod"])
"""
job_list = ",".join(jobs)
data = {"job_ids": job_list}
try:
resp = self.request("scheduler/delete_job", "POST", data=data)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
results = json.loads(resp.text)
if results['success'] != "true":
raise beowebexc.BeowebJobDeleteError(results['error'])
def release_job(self, job):
"""Release scheduler hold on given job
:param job: Job id to release
:returns: JSON data structure from Beoweb
Example:
>>> beoclient.release_job("123.pod")
"""
data = {"jobid": job, "pod_user": self.user}
try:
resp = self.request("pod/release", "POST", data=data)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
results = json.loads(resp.text)
if not results['success']:
raise beowebexc.BeowebJobReleaseError(
"" if "msg" not in results else results['msg'])
def get_jobs(self, job_id=None, group=None):
"""Get job details from the scheduler
:param job_id: optional job id to request single job status
:param group: optional Unix group name to request all jobs from group
members
:returns: JSON data from Beoweb
Example:
>>> beoclient.get_jobs()
>>> beoclient.get_jobs(job_id="123.pod")
"""
data = {"pod_user": self.user}
data.update({"jobid": "ALL" if not job_id else job_id})
if group:
data.update({"group": group})
try:
resp = self.request("pod/status", "POST", data=data)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
results = json.loads(resp.text)
if not results['success']:
raise beowebexc.BeowebJobGetError(
"" if "msg" not in results else results['msg'])
return results
def submit_job(self, jobscript,
scheduler=None, overwrite=True, hold=False,
**kwargs):
"""Submit a new job to the scheduler
:param jobscript: dictionary of {name: fileobject}
:param scheduler: scheduler to use. One of ["TRQ","SGE"].
"TRQ" is default if not given
:param overwrite: boolean flag to overwrite an existing script
:param hold: boolean flag whether to submit job with a scheduler hold
:returns: JSON data from Beoweb
Example:
>>> beoclient.submit_job({'test.sub':
open('/root/test.sub', 'rb')})
"""
data = {"pod_user": self.user,
"scheduler": "TRQ" if not scheduler else scheduler}
# Beoweb just looks for the presence of overwrite and hold, not value
if overwrite:
data.update({"overwrite": "True"})
if hold:
data.update({"hold": "True"})
if "data" in kwargs:
data.update(kwargs["data"])
if "hash_algo" not in data:
data.update({"hash_algo": "md5"})
files = {'jobscript': jobscript.items()[0]}
try:
resp = self.request("pod/submit", "POST", data=data, files=files)
except requests.exceptions.HTTPError as e:
LOG.error("HTTP Error from beoweb: %s", e)
raise beowebexc.BeowebAPIError(
"HTTP Error received from Beoweb host")
except beowebexc.BeowebSessionError:
# rewind the file if we will be retrying after authentication
files['jobscript'][1].seek(0)
raise
results = json.loads(resp.text)
if not results['success']:
raise beowebexc.BeowebJobSubmitError(
"" if "msg" not in results else results['msg'])
return results
| |
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default SPIRAL agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import dm_env as environment
import six
import sonnet as snt
import tensorflow as tf
from spiral.agents import utils
nest = tf.contrib.framework.nest
# Spatial action arguments need to be treated in a special way.
LOCATION_KEYS = ["end", "control"]
def _xy_grids(batch_size, height, width):
x_grid = tf.linspace(-1., 1., width, name="linspace")
x_grid = tf.reshape(x_grid, [1, 1, width, 1])
x_grid = tf.tile(x_grid, [batch_size, height, 1, 1])
y_grid = tf.linspace(-1., 1., height, name="linspace")
y_grid = tf.reshape(y_grid, [1, height, 1, 1])
y_grid = tf.tile(y_grid, [batch_size, 1, width, 1])
return x_grid, y_grid
class AutoregressiveHeads(snt.AbstractModule):
"""A module for autoregressive action heads."""
ORDERS = {
"libmypaint": ["flag", "end", "control", "size", "pressure",
"red", "green", "blue"],
"fluid": ["flag", "end", "control", "size", "speed",
"red", "green", "blue", "alpha"],
}
def __init__(self,
z_dim,
embed_dim,
action_spec,
decoder_params,
order,
grid_height,
grid_width,
name="autoregressive_heads"):
super(AutoregressiveHeads, self).__init__(name=name)
self._z_dim = z_dim
self._action_spec = action_spec
self._grid_height = grid_height
self._grid_width = grid_width
# Filter the order of actions according to the actual action specification.
order = self.ORDERS[order]
self._order = [k for k in order if k in action_spec]
with self._enter_variable_scope():
self._action_embeds = collections.OrderedDict(
[(k, snt.Linear(output_size=embed_dim,
name=k + "_action_embed"))
for k in six.iterkeys(action_spec)])
self._action_heads = []
for k, v in six.iteritems(action_spec):
if k in LOCATION_KEYS:
decoder = utils.ConvDecoder( # pylint: disable=not-callable
name=k + "_action_decoder",
**decoder_params)
action_head = snt.Sequential([
snt.BatchReshape([4, 4, -1]),
decoder,
snt.BatchFlatten()], name=k + "_action_head")
else:
output_size = v.maximum - v.minimum + 1
action_head = snt.Linear(
output_size=output_size, name=k + "_action_head")
self._action_heads.append((k, action_head))
self._action_heads = collections.OrderedDict(self._action_heads)
self._residual_mlps = {}
for k, v in six.iteritems(self._action_spec):
self._residual_mlps[k] = snt.nets.MLP(
output_sizes=[16, 32, self._z_dim], name=k + "_residual_mlp")
def _build(self, z):
logits = {}
action = {}
for k in self._order:
logits[k] = tf.check_numerics(
self._action_heads[k](z), "Logits for {k} are not valid")
a = tf.squeeze(tf.multinomial(logits[k], num_samples=1), -1)
a = tf.cast(a, tf.int32, name=k + "_action")
action[k] = a
depth = self._action_spec[k].maximum - self._action_spec[k].minimum + 1
# Asserts actions are valid.
assert_op = tf.assert_less_equal(a, tf.constant(depth, dtype=a.dtype))
with tf.control_dependencies([assert_op]):
if k in LOCATION_KEYS:
if depth != self._grid_height * self._grid_width:
raise AssertionError(
"Action space {depth} != grid_height * grid_width "
"{self._grid_height}x{self._grid_width}.")
w = self._grid_width
h = self._grid_height
y = -1.0 + 2.0 * tf.cast(a // w, tf.float32) / (h - 1)
x = -1.0 + 2.0 * tf.cast(a % w, tf.float32) / (w - 1)
a_vec = tf.stack([y, x], axis=1)
else:
a_vec = tf.one_hot(a, depth)
a_embed = self._action_embeds[k](a_vec)
residual = self._residual_mlps[k](tf.concat([z, a_embed], axis=1))
z = tf.nn.relu(z + residual)
action = collections.OrderedDict(
[(k, action[k]) for k in six.iterkeys(self._action_spec)])
logits = collections.OrderedDict(
[(k, logits[k]) for k in six.iterkeys(self._action_spec)])
return logits, action
class Agent(snt.AbstractModule):
"""A module for the default agent."""
def __init__(
self,
action_spec,
input_shape,
grid_shape,
action_order,
name="default"):
"""Initialises the agent."""
super(Agent, self).__init__(name=name)
self._action_order = action_order
self._action_spec = collections.OrderedDict(action_spec)
self._z_dim = 256
input_height, input_width = input_shape
self._grid_height, self._grid_width = grid_shape
enc_factor_h = input_height // 8 # Height of feature after encoding is 8
enc_factor_w = input_width // 8 # Width of feature after encoding is 8
dec_factor_h = self._grid_height // 4 # Height of feature after core is 4
dec_factor_w = self._grid_width // 4 # Width of feature after core is 4
self._encoder_params = {
"factor_h": enc_factor_h,
"factor_w": enc_factor_w,
"num_hiddens": 32,
"num_residual_layers": 8,
"num_residual_hiddens": 32,
}
self._decoder_params = {
"factor_h": dec_factor_h,
"factor_w": dec_factor_w,
"num_hiddens": 32,
"num_residual_layers": 8,
"num_residual_hiddens": 32,
"num_output_channels": 1,
}
with self._enter_variable_scope():
self._core = snt.LSTM(self._z_dim)
def initial_state(self, batch_size):
return utils.AgentState(
lstm_state=self._core.initial_state(batch_size),
prev_action=nest.map_structure(
lambda spec: tf.zeros((batch_size,) + spec.shape, dtype=spec.dtype),
self._action_spec))
def _maybe_reset_core_state(self, core_state, should_reset):
with tf.control_dependencies(None):
if should_reset.shape.is_fully_defined():
batch_size = should_reset.shape[0]
else:
batch_size = tf.shape(should_reset)[0]
initial_core_state = self._core.initial_state(batch_size)
# Use a reset state for the selected elements in the batch.
state = nest.map_structure(
lambda i, s: tf.where(should_reset, i, s),
initial_core_state, core_state)
return state
def _compute_condition(self, action, mask):
mask = tuple(mask[k] for k in self._action_spec.keys())
conds = []
action = action.values()
for k, a, m in zip(self._action_spec.keys(), action, mask):
depth = self._action_spec[k].maximum - self._action_spec[k].minimum + 1
embed = snt.Linear(16)
if k in LOCATION_KEYS:
if depth != self._grid_height * self._grid_width:
raise AssertionError(
"Action space {depth} != grid_height * grid_width "
"{self._grid_height}x{self._grid_width}.")
w = self._grid_width
h = self._grid_height
y = -1.0 + 2.0 * tf.cast(a // w, tf.float32) / (h - 1)
x = -1.0 + 2.0 * tf.cast(a % w, tf.float32) / (w - 1)
a_vec = tf.concat([y, x], axis=1)
else:
a_vec = tf.one_hot(a, depth)[:, 0, :]
cond = embed(a_vec) * m
conds.append(cond)
cond = tf.concat(conds, axis=1)
cond = snt.nets.MLP([64, 32, 32])(cond)
return cond
@snt.reuse_variables
def _torso(self,
observation,
prev_action,
should_reset):
batch_size, x_h, x_w, _ = observation["canvas"].get_shape().as_list()
x_grid, y_grid = _xy_grids(batch_size, x_h, x_w)
should_reset = tf.squeeze(should_reset, -1)
prev_action = nest.map_structure(lambda pa: tf.where( # pylint: disable=g-long-lambda
should_reset, tf.zeros_like(pa), pa), prev_action)
spatial_inputs = [observation["canvas"]]
spatial_inputs += [x_grid, y_grid]
data = tf.concat(spatial_inputs, axis=-1)
with tf.variable_scope("torso"):
h = snt.Conv2D(32, [5, 5])(data)
# Compute conditioning vector based on the previously taken action.
prev_action = nest.map_structure(
lambda pa: tf.expand_dims(pa, -1), prev_action)
cond = self._compute_condition(prev_action, observation["action_mask"])
# Adjust the conditioning vector according to the noise sample
# provided to the model. This is inspired by the original GAN framework.
# NOTE: Unlike in normal GANs, this noise sample is not the only source
# of stochasticity. Stochastic actions contribute as well.
assert observation["noise_sample"].shape.ndims == 2
cond += snt.nets.MLP([64, 32, 32])(observation["noise_sample"])
cond = tf.reshape(cond, [batch_size, 1, 1, -1])
h += cond
h = tf.nn.relu(h)
encoder = utils.ConvEncoder(**self._encoder_params)
h = snt.BatchFlatten()(encoder(h))
h = snt.Linear(256)(tf.nn.relu(h))
return h
@snt.reuse_variables
def _head(self, core_output):
with tf.variable_scope("head"):
head = AutoregressiveHeads(
z_dim=self._z_dim,
embed_dim=16,
action_spec=self._action_spec,
grid_height=self._grid_height,
grid_width=self._grid_width,
decoder_params=self._decoder_params,
order=self._action_order)
logits, actions = head( # pylint: disable=not-callable
core_output)
baseline = tf.squeeze(snt.Linear(1)(core_output), -1)
return utils.AgentOutput(actions, logits, baseline)
def step(self,
step_type,
observation,
prev_state):
"""Computes a single step of the agent."""
with self._capture_variables():
should_reset = tf.equal(step_type, environment.StepType.FIRST)
torso_output = self._torso(
observation,
prev_state.prev_action,
should_reset)
lstm_state = self._maybe_reset_core_state(
prev_state.lstm_state, should_reset)
core_output, new_core_state = self._core(torso_output, lstm_state)
agent_output = self._head(core_output)
new_state = utils.AgentState(
prev_action=agent_output.action,
lstm_state=new_core_state)
return agent_output, new_state
def _build(self, *args): # Unused.
# pylint: disable=no-value-for-parameter
return self.step(*args)
# pylint: enable=no-value-for-parameter
| |
import json
from datetime import datetime
import mock
from pyquery import PyQuery as pq
from django_extensions.db.fields.json import JSONList
from olympia import amo
from olympia.addons.models import Addon, AppSupport
from olympia.addons.utils import generate_addon_guid
from olympia.amo.tests import ESTestCase, TestCase, version_factory
from olympia.amo.urlresolvers import reverse
from olympia.compat import FIREFOX_COMPAT
from olympia.compat.cron import compatibility_report
from olympia.compat.indexers import AppCompatIndexer
from olympia.compat.models import CompatReport, CompatTotals
from olympia.stats.models import UpdateCount
from olympia.versions.models import ApplicationsVersions
class TestCompatReportModel(TestCase):
def test_none(self):
assert CompatReport.get_counts('xxx') == {'success': 0, 'failure': 0}
def test_some(self):
guid = '{2fa4ed95-0317-4c6a-a74c-5f3e3912c1f9}'
CompatReport.objects.create(
guid=guid,
works_properly=True,
app_multiprocess_enabled=True,
multiprocess_compatible=True)
CompatReport.objects.create(
guid=guid,
works_properly=True,
app_multiprocess_enabled=False,
multiprocess_compatible=True)
CompatReport.objects.create(
guid=guid,
works_properly=False,
app_multiprocess_enabled=False,
multiprocess_compatible=True)
CompatReport.objects.create(
guid='ballin',
works_properly=True,
app_multiprocess_enabled=True,
multiprocess_compatible=True)
CompatReport.objects.create(
guid='ballin',
works_properly=False,
app_multiprocess_enabled=True,
multiprocess_compatible=True)
assert CompatReport.get_counts(guid) == {'success': 2, 'failure': 1}
class TestIncoming(TestCase):
def setUp(self):
super(TestIncoming, self).setUp()
self.url = reverse('compat.incoming')
# This is the structure sent to /compatibility/incoming from the ACR.
self.data = {
'appBuild': '20110429030623',
'appGUID': '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
'appVersion': '6.0a1',
'clientOS': 'Intel Mac OS X 10.6',
'comments': 'what the what',
'guid': 'jid0-VsMuA0YYTKCjBh5F0pxHAudnEps@jetpack',
'otherAddons': [['yslow@yahoo-inc.com', '2.1.0']],
'version': '2.2',
'worksProperly': False,
'appMultiprocessEnabled': True,
'multiprocessCompatible': True,
}
self.json = json.dumps(self.data)
def test_success(self):
count = CompatReport.objects.count()
r = self.client.post(self.url, self.json,
content_type='application/json')
assert r.status_code == 204
assert CompatReport.objects.count() == count + 1
cr = CompatReport.objects.order_by('-id')[0]
assert cr.app_build == self.data['appBuild']
assert cr.app_guid == self.data['appGUID']
assert cr.works_properly == self.data['worksProperly']
assert cr.comments == self.data['comments']
assert cr.client_ip == '127.0.0.1'
assert cr.app_multiprocess_enabled == (
self.data['appMultiprocessEnabled'])
assert cr.multiprocess_compatible == (
self.data['multiprocessCompatible'])
# Check that the other_addons field is stored as json.
vals = CompatReport.objects.filter(id=cr.id).values('other_addons')
# django-extensions wraps values in `JSONList` so we'll test this
# explicitly. We can't see the actually stored JSON blob easily
assert isinstance(vals[0]['other_addons'], JSONList)
assert vals[0]['other_addons'] == self.data['otherAddons']
def test_e10s_status_unknown(self):
del self.data['multiprocessCompatible']
self.json = json.dumps(self.data)
count = CompatReport.objects.count()
r = self.client.post(self.url, self.json,
content_type='application/json')
assert r.status_code == 204
assert CompatReport.objects.count() == count + 1
cr = CompatReport.objects.order_by('-id')[0]
assert cr.multiprocess_compatible is None
def test_bad_json(self):
r = self.client.post(self.url, 'wuuu#$',
content_type='application/json')
assert r.status_code == 400
def test_bad_field(self):
self.data['save'] = 1
js = json.dumps(self.data)
r = self.client.post(self.url, js, content_type='application/json')
assert r.status_code == 400
class TestReporter(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporter, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.url = reverse('compat.reporter') + '?guid={0}'
def test_success(self):
r = self.client.get(reverse('compat.reporter'))
assert r.status_code == 200
def test_redirect(self):
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
expected = reverse('compat.reporter_detail', args=[self.addon.guid])
self.assert3xx(
self.client.get(self.url.format(self.addon.id)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.slug)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid)), expected)
self.assert3xx(
self.client.get(self.url.format(self.addon.guid[:5])), expected)
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_redirect_for_authorized(self):
"""Can display the reports for an unlisted addon if authorized."""
self.make_addon_unlisted(self.addon)
self.test_redirect()
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_redirect_for_unauthorized(self):
"""If the user isn't authorized, don't redirect to unlisted addon."""
self.make_addon_unlisted(self.addon)
CompatReport.objects.create(guid=self.addon.guid,
app_guid=amo.FIREFOX.guid)
assert self.client.get(
self.url.format(self.addon.id)).status_code == 200
assert self.client.get(
self.url.format(self.addon.slug)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid)).status_code == 200
assert self.client.get(
self.url.format(self.addon.guid[:5])).status_code == 200
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_mixed_listed_unlisted_redirect_for_unauthorized(self):
"""If the user isn't authorized, and the add-on has both unlisted and
listed versions, redirect to show the listed versions."""
self.make_addon_unlisted(self.addon)
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
self.test_redirect()
def test_unlisted_addons_listed_in_left_sidebar(self):
"""Display unlisted addons in the 'reports for your add-ons' list."""
self.make_addon_unlisted(self.addon)
self.client.login(email='del@icio.us')
response = self.client.get(reverse('compat.reporter'))
assert self.addon in response.context['addons']
class TestReporterDetail(TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestReporterDetail, self).setUp()
self.addon = Addon.objects.get(id=3615)
self.url = reverse('compat.reporter_detail', args=[self.addon.guid])
self.reports = []
def _generate(self, version=None):
apps = [
(amo.FIREFOX.guid, FIREFOX_COMPAT[0]['main'], True, False, False),
(amo.FIREFOX.guid, FIREFOX_COMPAT[0]['main'], True, False, False),
(amo.FIREFOX.guid, FIREFOX_COMPAT[1]['main'], True, True, False),
(amo.FIREFOX.guid, FIREFOX_COMPAT[2]['main'], False, False, False),
(amo.FIREFOX.guid, FIREFOX_COMPAT[3]['main'], False, False, False),
]
if version is None:
version = self.addon.find_latest_version(channel=None)
for (app_guid, app_version, works_properly, multiprocess_compatible,
app_multiprocess_enabled) in apps:
report = CompatReport.objects.create(
guid=self.addon.guid,
version=version,
app_guid=app_guid,
app_version=app_version,
works_properly=works_properly,
multiprocess_compatible=multiprocess_compatible,
app_multiprocess_enabled=app_multiprocess_enabled)
self.reports.append(report.pk)
def check_table(
self, data=None, good=0, bad=0, appver=None, report_pks=None):
if data is None:
data = {}
if report_pks is None:
report_pks = []
r = self.client.get(self.url, data)
assert r.status_code == 200
# Check that we got the correct reports.
assert sorted(r.id for r in r.context['reports'].object_list) == (
sorted(self.reports[pk] for pk in report_pks))
doc = pq(r.content)
assert doc('.compat-info tbody tr').length == good + bad
reports = doc('#reports')
if good == 0 and bad == 0:
assert reports.find('.good, .bad').length == 0
assert doc('.no-results').length == 1
else:
# Check "X success reports" and "X failure reports" buttons.
assert reports.find('.good').text().split()[0] == str(good)
assert reports.find('.bad').text().split()[0] == str(bad)
# Check "Filter by Application" field.
option = doc('#compat-form select[name="appver"] option[selected]')
assert option.val() == appver
return r
def test_appver_all(self):
self._generate()
self.check_table(
good=3, bad=2, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_single(self):
self._generate()
appver = FIREFOX_COMPAT[2]['main']
self.check_table(data={'appver': appver}, good=0, bad=1, appver=appver,
report_pks=[3])
def test_multiple(self):
self._generate()
appver = FIREFOX_COMPAT[0]['main']
self.check_table(data={'appver': appver}, good=2, bad=0, appver=appver,
report_pks=[0, 1])
def test_empty(self):
self._generate()
# Pick a version we haven't generated any reports for.
appver = FIREFOX_COMPAT[4]['main']
self.check_table(data={'appver': appver}, good=0, bad=0, appver=appver,
report_pks=[])
def test_unknown(self):
self._generate()
# If we have a bad version, we don't apply any filters.
appver = '0.9999'
self.check_table(
data={'appver': appver}, good=3, bad=2,
report_pks=[idx for idx, val in enumerate(self.reports)])
def test_app_unknown(self):
# Testing for some unknown application such as 'Conkeror'.
app_guid = '{a79fe89b-6662-4ff4-8e88-09950ad4dfde}'
report = CompatReport.objects.create(
guid=self.addon.guid, app_guid=app_guid, app_version='0.9.3',
works_properly=True)
self.reports.append(report.pk)
self.check_table(good=1, bad=0, appver='', report_pks=[0])
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: True)
def test_unlisted_addon_details_for_authorized(self):
"""If the user is authorized, display the reports."""
self.make_addon_unlisted(self.addon)
self._generate()
self.check_table(
good=3, bad=2, appver='',
report_pks=[idx for idx, val in enumerate(self.reports)])
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_unlisted_addon_no_details_for_unauthorized(self):
"""If the user isn't authorized, don't display the reports."""
self.make_addon_unlisted(self.addon)
self._generate()
self.check_table(
good=0, bad=0, appver=None,
report_pks=[])
@mock.patch('olympia.compat.views.owner_or_unlisted_reviewer',
lambda r, a: False)
def test_mixed_listed_unlisted_details_for_unauthorized(self):
"""If the user isn't authorized, and the add-on has both unlisted and
listed versions, display the listed versions."""
self.make_addon_unlisted(self.addon)
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED)
# Generate compat reports for the listed version.
self._generate(version=self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED))
reports_listed_only = list(self.reports)
# And generate some for the unlisted version we shouldn't see.
self._generate(version=self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED))
self.check_table(
good=3, bad=2, appver='',
report_pks=[idx for idx, val in enumerate(reports_listed_only)])
def test_e10s_field_appears(self):
self._generate()
appver = FIREFOX_COMPAT[0]['main']
r = self.check_table(data={'appver': appver}, good=2, bad=0,
appver=appver, report_pks=[0, 1])
doc = pq(r.content)
assert doc('.app-multiprocess-enabled').length > 0
assert doc('.multiprocess-compatible').length > 0
class TestAppCompatIndexer(TestCase):
def setUp(self):
self.indexer = AppCompatIndexer()
def test_mapping(self):
doc_name = self.indexer.get_doctype_name()
assert doc_name
mapping_properties = self.indexer.get_mapping()[doc_name]['properties']
# Spot check: make sure addon-specific 'summary' field is not present.
assert 'summary' not in mapping_properties
def test_no_extract(self):
# Extraction is handled differently for this class because it's quite
# specific, so it does not have an extract_document() method.
assert not hasattr(self.indexer, 'extract_document')
class TestCompatibilityReportCronMixin(object):
def run_compatibility_report(self):
compatibility_report()
self.refresh()
def populate(self):
now = datetime.now()
guid = generate_addon_guid()
name = 'Addon %s' % guid
addon = amo.tests.addon_factory(name=name, guid=guid)
UpdateCount.objects.create(addon=addon, count=10, date=now)
return addon
def generate_reports(self, addon, good, bad, app, app_version):
defaults = {
'guid': addon.guid,
'app_guid': app.guid,
'app_version': app_version}
for x in xrange(good):
CompatReport.objects.create(works_properly=True, **defaults)
for x in xrange(bad):
CompatReport.objects.create(works_properly=False, **defaults)
class TestCompatibilityReportCron(
TestCompatibilityReportCronMixin, ESTestCase):
def setUp(self):
self.app_version = FIREFOX_COMPAT[0]['main']
super(TestCompatibilityReportCron, self).setUp()
def test_with_bad_support_data(self):
# Test containing an addon which has an AppSupport data indicating it
# supports Firefox but does not have Firefox in its compatible apps for
# some reason (https://github.com/mozilla/addons-server/issues/3353).
addon = self.populate()
self.generate_reports(addon=addon, good=1, bad=1, app=amo.FIREFOX,
app_version=self.app_version)
# Now change compatibility to support Thunderbird instead of Firefox,
# but make sure AppSupport stays in the previous state.
ApplicationsVersions.objects.filter(
application=amo.FIREFOX.id).update(application=amo.THUNDERBIRD.id)
assert AppSupport.objects.filter(
addon=addon, app=amo.FIREFOX.id).exists()
self.run_compatibility_report()
assert CompatTotals.objects.count() == 1
assert CompatTotals.objects.get().total == 10
def test_with_no_compat_at_all(self):
# Test containing an add-on which has `None` as its compat info for
# Firefox (https://github.com/mozilla/addons-server/issues/6161).
addon = self.populate()
self.generate_reports(addon=addon, good=1, bad=1, app=amo.FIREFOX,
app_version=self.app_version)
addon.update(type=amo.ADDON_DICT)
assert AppSupport.objects.filter(
addon=addon, app=amo.FIREFOX.id).exists()
self.run_compatibility_report()
assert CompatTotals.objects.count() == 1
assert CompatTotals.objects.get().total == 10
def test_compat_totals(self):
assert not CompatTotals.objects.exists()
# Add second add-on, generate reports for both.
addon1 = self.populate()
addon2 = self.populate()
# count needs to be higher than 50 to test totals properly.
UpdateCount.objects.filter(addon=addon1).update(count=60)
self.generate_reports(addon1, good=1, bad=2, app=amo.FIREFOX,
app_version=self.app_version)
self.generate_reports(addon2, good=3, bad=4, app=amo.FIREFOX,
app_version=self.app_version)
self.run_compatibility_report()
assert CompatTotals.objects.count() == 1
assert CompatTotals.objects.get().total == 70
def test_compat_totals_already_exists(self):
CompatTotals.objects.create(total=42)
# Add second add-on, generate reports for both.
addon1 = self.populate()
addon2 = self.populate()
# count needs to be higher than 50 to test totals properly.
UpdateCount.objects.filter(addon=addon1).update(count=60)
self.generate_reports(addon1, good=1, bad=2, app=amo.FIREFOX,
app_version=self.app_version)
self.generate_reports(addon2, good=3, bad=4, app=amo.FIREFOX,
app_version=self.app_version)
self.run_compatibility_report()
assert CompatTotals.objects.count() == 1
assert CompatTotals.objects.get().total == 70
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
# TODO: Remove the conversion if cython supports np.float16_t
fast_tensor_util.AppendFloat16ArrayToTensorProto(
tensor_proto,
np.asarray(proto_values, dtype=np.float16).view(np.uint16))
def ExtractBitsFromBFloat16(x):
return np.asarray(
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
def FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
fast_tensor_util.AppendBFloat16ArrayToTensorProto(
tensor_proto, np.asarray(
proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
FastAppendBFloat16ArrayToTensorProto,
np.float16:
_MediumAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([x.item() for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([x.item() for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item() for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([x.item() for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item()[0] for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([x.item() for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterTuple(v):
if not isinstance(v, (list, tuple)):
return v
if isinstance(v, tuple):
if not any(isinstance(x, (list, tuple)) for x in v):
return None
if isinstance(v, list):
if not any(isinstance(x, (list, tuple)) for x in v):
return _FirstNotNone(
[None if isinstance(x, (list, tuple)) else x for x in v])
return _FirstNotNone([_FilterTuple(x) for x in v])
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(
v, (compat.integral_types, tensor_shape.Dimension)) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: [_FilterBool],
dtypes.complex128: [_FilterComplex],
dtypes.complex64: [_FilterComplex],
dtypes.float16: [_FilterFloat],
dtypes.float32: [_FilterFloat],
dtypes.float64: [_FilterFloat],
dtypes.int16: [_FilterInt],
dtypes.int32: [_FilterInt],
dtypes.int64: [_FilterInt],
dtypes.int8: [_FilterInt],
dtypes.qint16: [_FilterInt, _FilterTuple],
dtypes.qint32: [_FilterInt, _FilterTuple],
dtypes.qint8: [_FilterInt, _FilterTuple],
dtypes.quint16: [_FilterInt, _FilterTuple],
dtypes.quint8: [_FilterInt, _FilterTuple],
dtypes.string: [_FilterStr],
dtypes.uint16: [_FilterInt],
dtypes.uint8: [_FilterInt],
dtypes.uint32: [_FilterInt],
dtypes.uint64: [_FilterInt],
}
def _AssertCompatible(values, dtype):
if dtype is None:
fn_list = [_FilterNotTensor]
else:
try:
fn_list = _TF_TO_IS_OK[dtype]
except KeyError:
# There isn't a specific fn_list, so we try to do the best possible.
if dtype.is_integer:
fn_list = [_FilterInt]
elif dtype.is_floating:
fn_list = [_FilterFloat]
elif dtype.is_complex:
fn_list = [_FilterComplex]
elif dtype.is_quantized:
fn_list = [_FilterInt, _FilterTuple]
else:
fn_list = [_FilterNotTensor]
mismatch = _FirstNotNone([fn(values) for fn in fn_list])
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
# pylint: disable=invalid-name
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
allow_broadcast=False):
"""Create a TensorProto.
In TensorFlow 2.0, representing tensors as protos should no longer be a
common workflow. That said, this utility function is still useful for
generating TF Serving request protos:
request = tensorflow_serving.apis.predict_pb2.PredictRequest()
request.model_spec.name = "my_model"
request.model_spec.signature_name = "serving_default"
request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
allow_broadcast: Boolean that enables allowing scalars and 1 length vector
broadcasting. Cannot be true when verify_shape is true.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tf.make_ndarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
"""
if allow_broadcast and verify_shape:
raise ValueError("allow_broadcast and verify_shape are not both allowed.")
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)) or isinstance(
getattr(values, "__array_interface__", None), dict):
# If a class has the __array__ method, or __array_interface__ dict, then it
# is possible to convert to numpy array.
nparray = np.asarray(values, dtype=dtype)
# This is the preferred way to create an array from the object, so replace
# the `values` with the array so that _FlattenToStrings is not run.
values = nparray
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but
# raises exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if allow_broadcast:
if nparray.shape == (1,) or nparray.shape == tuple():
pass
elif nparray.size != shape_size:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
else:
if verify_shape and nparray.shape != tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
# pylint: enable=invalid-name
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content,
dtype=dtype).copy().reshape(shape))
if tensor_dtype == dtypes.string:
# np.pad throws on these arrays of type np.object.
values = list(tensor.string_val)
padding = num_elements - len(values)
if padding > 0:
last = values[-1] if values else ""
values.extend([last] * padding)
return np.array(values, dtype=dtype).reshape(shape)
if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
values = np.fromiter(tensor.half_val, dtype=np.uint16)
values.dtype = tensor_dtype.as_numpy_dtype
elif tensor_dtype == dtypes.float32:
values = np.fromiter(tensor.float_val, dtype=dtype)
elif tensor_dtype == dtypes.float64:
values = np.fromiter(tensor.double_val, dtype=dtype)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
]:
values = np.fromiter(tensor.int_val, dtype=dtype)
elif tensor_dtype == dtypes.int64:
values = np.fromiter(tensor.int64_val, dtype=dtype)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.bool:
values = np.fromiter(tensor.bool_val, dtype=dtype)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
if values.size == 0:
return np.zeros(shape, dtype)
if values.size != num_elements:
values = np.pad(values, (0, num_elements - values.size), "edge")
return values.reshape(shape)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("%r is not a Tensor, has type %s" % (tensor, type(tensor)))
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
else:
return None
@tf_export("get_static_value")
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it
will no longer be possible to feed a different value for `tensor`. This allows
the result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
if not is_tensor(tensor):
return tensor
if not isinstance(tensor, ops.Tensor):
return None
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor_shape.as_shape(
[dim if dim != -1 else None for dim in tensor.numpy()])
if tensor.get_shape().ndims == 0:
value = constant_value(tensor)
if value is None:
raise ValueError(
"Received a scalar with unknown value as shape; require a statically "
"known scalar with value '-1' to describe an unknown shape.")
if value != -1:
raise ValueError(
"Received a scalar value '%s' as shape; require a statically known "
"scalar with value '-1' to describe an unknown shape." % value)
return tensor_shape.unknown_shape()
shape = tensor.get_shape().with_rank(1)
if shape == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
ret = tensor_shape.unknown_shape(shape.dims[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
@tf_export("is_tensor")
def is_tensor(x): # pylint: disable=invalid-name
"""Checks whether `x` is a tensor or "tensor-like".
If `is_tensor(x)` returns `True`, it is safe to assume that `x` is a tensor or
can be converted to a tensor using `ops.convert_to_tensor(x)`.
Args:
x: A python object to check.
Returns:
`True` if `x` is a tensor or "tensor-like", `False` if not.
"""
return (isinstance(x, tensor_like._TensorLike) or # pylint: disable=protected-access
ops.is_dense_tensor_like(x) or
getattr(x, "is_tensor_like", False))
| |
import unittest
from unittest import mock
from betfairlightweight import APIClient
from betfairlightweight import resources
from betfairlightweight.endpoints.betting import Betting
from betfairlightweight.exceptions import APIError
from tests.tools import create_mock_json
class BettingInit(unittest.TestCase):
def test_base_endpoint_init(self):
client = APIClient("username", "password", "app_key")
betting = Betting(client)
assert betting.connect_timeout == 3.05
assert betting.read_timeout == 16
assert betting._error == APIError
assert betting.client == client
assert betting.URI == "SportsAPING/v1.0/"
class BettingTest(unittest.TestCase):
def setUp(self):
client = APIClient("username", "password", "app_key", "UK")
self.betting = Betting(client)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_event_types(self, mock_response):
mock = create_mock_json("tests/resources/list_event_types.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_event_types()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listEventTypes", {"filter": {}}, None
)
assert isinstance(response[0], resources.EventTypeResult)
assert len(response) == 2
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_competitions(self, mock_response):
mock = create_mock_json("tests/resources/list_competitions.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_competitions()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listCompetitions", {"filter": {}}, None
)
assert isinstance(response[0], resources.CompetitionResult)
assert len(response) == 22
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_time_ranges(self, mock_response):
mock = create_mock_json("tests/resources/list_time_ranges.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_time_ranges()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listTimeRanges",
{"granularity": "DAYS", "filter": {}},
None,
)
assert isinstance(response[0], resources.TimeRangeResult)
assert len(response) == 30
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_events(self, mock_response):
mock = create_mock_json("tests/resources/list_events.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_events()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listEvents", {"filter": {}}, None
)
assert isinstance(response[0], resources.EventResult)
assert len(response) == 7
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_market_types(self, mock_response):
mock = create_mock_json("tests/resources/list_market_types.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_market_types()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listMarketTypes", {"filter": {}}, None
)
assert isinstance(response[0], resources.MarketTypeResult)
assert len(response) == 25
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_countries(self, mock_response):
mock = create_mock_json("tests/resources/list_countries.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_countries()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listCountries", {"filter": {}}, None
)
assert isinstance(response[0], resources.CountryResult)
assert len(response) == 4
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_venues(self, mock_response):
mock = create_mock_json("tests/resources/list_venues.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_venues()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listVenues", {"filter": {}}, None
)
assert isinstance(response[0], resources.VenueResult)
assert len(response) == 30
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_market_catalogue(self, mock_response):
mock = create_mock_json("tests/resources/list_market_catalogue.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_market_catalogue()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listMarketCatalogue",
{"maxResults": 1, "filter": {}},
None,
)
assert isinstance(response[0], resources.MarketCatalogue)
assert len(response) == 1
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_market_book(self, mock_response):
mock = create_mock_json("tests/resources/list_market_book.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketIds = mock.Mock()
response = self.betting.list_market_book(marketIds)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listMarketBook", {"marketIds": marketIds}, None
)
assert isinstance(response[0], resources.MarketBook)
assert len(response) == 1
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_runner_book(self, mock_response):
mock = create_mock_json("tests/resources/list_runner_book.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketId = mock.Mock()
selectionId = mock.Mock()
response = self.betting.list_runner_book(marketId, selectionId)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listRunnerBook",
{"marketId": marketId, "selectionId": selectionId},
None,
)
assert isinstance(response[0], resources.MarketBook)
assert len(response) == 1
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_current_orders(self, mock_response):
mock = create_mock_json("tests/resources/list_current_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_current_orders()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listCurrentOrders",
{"dateRange": {"from": None, "to": None}},
None,
)
assert isinstance(response, resources.CurrentOrders)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_list_cleared_orders(self, mock_response):
mock = create_mock_json("tests/resources/list_cleared_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
response = self.betting.list_cleared_orders()
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/listClearedOrders",
{"settledDateRange": {"to": None, "from": None}, "betStatus": "SETTLED"},
None,
)
assert isinstance(response, resources.ClearedOrders)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_place_orders(self, mock_response):
mock = create_mock_json("tests/resources/place_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketId = mock.Mock()
instructions = mock.Mock()
response = self.betting.place_orders(marketId, instructions)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/placeOrders",
{"marketId": marketId, "instructions": instructions},
None,
)
assert isinstance(response, resources.PlaceOrders)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_cancel_orders(self, mock_response):
mock = create_mock_json("tests/resources/cancel_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketId = mock.Mock()
instructions = mock.Mock()
response = self.betting.cancel_orders(marketId, instructions)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/cancelOrders",
{"marketId": marketId, "instructions": instructions},
None,
)
assert isinstance(response, resources.CancelOrders)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_update_orders(self, mock_response):
mock = create_mock_json("tests/resources/update_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketId = mock.Mock()
instructions = mock.Mock()
response = self.betting.update_orders(marketId, instructions)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/updateOrders",
{"marketId": marketId, "instructions": instructions},
None,
)
assert isinstance(response, resources.UpdateOrders)
@mock.patch("betfairlightweight.endpoints.betting.Betting.request")
def test_replace_orders(self, mock_response):
mock = create_mock_json("tests/resources/replace_orders.json")
mock_response.return_value = (mock.Mock(), mock.json(), 1.3)
marketId = mock.Mock()
instructions = mock.Mock()
response = self.betting.replace_orders(marketId, instructions)
assert mock.json.call_count == 1
mock_response.assert_called_with(
"SportsAPING/v1.0/replaceOrders",
{"marketId": marketId, "instructions": instructions},
None,
)
assert isinstance(response, resources.ReplaceOrders)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
import modelcluster.tags
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0002_initial_data'),
('wagtaildocs', '0002_initial_data'),
('wagtailcore', '0002_initial_data'),
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(null=True, blank=True)),
('text', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('advert', models.ForeignKey(related_name=b'+', to='ethagaval.Advert')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField()),
('date', models.DateField(verbose_name=b'Post date')),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BlogPageTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name=b'tagged_items', to='ethagaval.BlogPage')),
('tag', models.ForeignKey(related_name='ethagaval_blogpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContactPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('telephone', models.CharField(max_length=20, blank=True)),
('email', models.EmailField(max_length=75, blank=True)),
('address_1', models.CharField(max_length=255, blank=True)),
('address_2', models.CharField(max_length=255, blank=True)),
('city', models.CharField(max_length=255, blank=True)),
('country', models.CharField(max_length=255, blank=True)),
('post_code', models.CharField(max_length=10, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='EventIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date_from', models.DateField(verbose_name=b'Start date')),
('date_to', models.DateField(help_text=b'Not required if event is on a single day', null=True, verbose_name=b'End date', blank=True)),
('time_from', models.TimeField(null=True, verbose_name=b'Start time', blank=True)),
('time_to', models.TimeField(null=True, verbose_name=b'End time', blank=True)),
('audience', models.CharField(max_length=255, choices=[(b'public', b'Public'), (b'private', b'Private')])),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('first_name', models.CharField(max_length=255, verbose_name=b'Name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name=b'Surname', blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255)),
('field_type', models.CharField(max_length=16, choices=[(b'singleline', 'Single line text'), (b'multiline', 'Multi-line text'), (b'email', 'Email'), (b'number', 'Number'), (b'url', 'URL'), (b'checkbox', 'Checkbox'), (b'checkboxes', 'Checkboxes'), (b'dropdown', 'Drop down'), (b'radio', 'Radio buttons'), (b'date', 'Date'), (b'datetime', 'Date/time')])),
('required', models.BooleanField(default=True)),
('choices', models.CharField(help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=512, blank=True)),
('default_value', models.CharField(help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, blank=True)),
('help_text', models.CharField(max_length=255, blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('to_address', models.CharField(help_text='Optional - form submissions will be emailed to this address', max_length=255, blank=True)),
('from_address', models.CharField(max_length=255, blank=True)),
('subject', models.CharField(max_length=255, blank=True)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('thank_you_text', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'verbose_name': 'Homepage',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HomePageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PersonPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('telephone', models.CharField(max_length=20, blank=True)),
('email', models.EmailField(max_length=75, blank=True)),
('address_1', models.CharField(max_length=255, blank=True)),
('address_2', models.CharField(max_length=255, blank=True)),
('city', models.CharField(max_length=255, blank=True)),
('country', models.CharField(max_length=255, blank=True)),
('post_code', models.CharField(max_length=10, blank=True)),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('biography', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='PersonPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', models.CharField(max_length=255, blank=True)),
('image', models.ForeignKey(related_name=b'+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name=b'+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.StandardIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='personpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='personpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.PersonPage'),
preserve_default=True,
),
migrations.AddField(
model_name='homepagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='homepagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.HomePage'),
preserve_default=True,
),
migrations.AddField(
model_name='homepagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='homepagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.HomePage'),
preserve_default=True,
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'form_fields', to='ethagaval.FormPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'speakers', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.EventPage'),
preserve_default=True,
),
migrations.AddField(
model_name='eventindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.EventIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.BlogPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'carousel_items', to='ethagaval.BlogPage'),
preserve_default=True,
),
migrations.AddField(
model_name='blogpage',
name='tags',
field=modelcluster.tags.ClusterTaggableManager(to='taggit.Tag', through='ethagaval.BlogPageTag', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name=b'+', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='blogindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'related_links', to='ethagaval.BlogIndexPage'),
preserve_default=True,
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(related_name=b'advert_placements', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AddField(
model_name='advert',
name='page',
field=models.ForeignKey(related_name=b'adverts', blank=True, to='wagtailcore.Page', null=True),
preserve_default=True,
),
]
| |
"""
pakbase module
This module contains the base package class from which
all of the other packages inherit from.
"""
from __future__ import print_function
import os
import webbrowser as wb
import numpy as np
from numpy.lib.recfunctions import stack_arrays
from .modflow.mfparbc import ModflowParBc as mfparbc
from .utils import Util2d, Util3d, Transient2d, MfList, check
class Package(object):
"""
Base package class from which most other packages are derived.
"""
def __init__(self, parent, extension='glo', name='GLOBAL', unit_number=1,
extra='', filenames=None, allowDuplicates=False):
"""
Package init
"""
self.parent = parent # To be able to access the parent modflow object's attributes
if (not isinstance(extension, list)):
extension = [extension]
self.extension = []
self.file_name = []
for idx, e in enumerate(extension):
self.extension.append(e)
file_name = self.parent.name + '.' + e
if filenames is not None:
try:
if filenames[idx] is not None:
file_name = filenames[idx]
except:
pass
self.file_name.append(file_name)
self.fn_path = os.path.join(self.parent.model_ws, self.file_name[0])
if (not isinstance(name, list)):
name = [name]
self.name = name
if (not isinstance(unit_number, list)):
unit_number = [unit_number]
self.unit_number = unit_number
if (not isinstance(extra, list)):
self.extra = len(self.unit_number) * [extra]
else:
self.extra = extra
self.url = 'index.html'
self.allowDuplicates = allowDuplicates
self.acceptable_dtypes = [int, np.float32, str]
return
def __repr__(self):
s = self.__doc__
exclude_attributes = ['extension', 'heading', 'name', 'parent', 'url']
for attr, value in sorted(self.__dict__.items()):
if not (attr in exclude_attributes):
if (isinstance(value, list)):
if (len(value) == 1):
s = s + ' {0:s} = {1:s}\n'.format(attr, str(value[0]))
else:
s = s + ' {0:s} (list, items = {1:d}\n'.format(attr,
len(
value))
elif (isinstance(value, np.ndarray)):
s = s + ' {0:s} (array, shape = {1:s})\n'.format(attr,
value.shape.__str__()[
1:-1])
else:
s = s + ' {0:s} = {1:s} ({2:s})\n'.format(attr, str(value),
str(type(value))[
7:-2])
return s
def __getitem__(self, item):
if hasattr(self, 'stress_period_data'):
# added this check because stress_period_data also used in Oc and Oc88 but is not a MfList
if isinstance(item, MfList):
if not isinstance(item, list) and not isinstance(item, tuple):
assert item in list(
self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item) + " not in data.keys()"
return self.stress_period_data[item]
else:
if item[1] not in self.dtype.names:
raise Exception(
"package.__getitem(): item \'" + item + "\' not in dtype names " + str(
self.dtype.names))
assert item[0] in list(
self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item[0]) + " not in data.keys()"
if self.stress_period_data.vtype[item[0]] == np.recarray:
return self.stress_period_data[item[0]][item[1]]
def __setitem__(self, key, value):
raise NotImplementedError("package.__setitem__() not implemented")
def __setattr__(self, key, value):
var_dict = vars(self)
if key in list(var_dict.keys()):
old_value = var_dict[key]
if isinstance(old_value, Util2d):
value = Util2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name,
fmtin=old_value.format.fortran,
locat=old_value.locat,
array_free_format=old_value.format.array_free_format)
elif isinstance(old_value, Util3d):
value = Util3d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat,
array_free_format=old_value.array_free_format)
elif isinstance(old_value, Transient2d):
value = Transient2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat)
elif isinstance(old_value, MfList):
value = MfList(self, dtype=old_value.dtype,
data=value)
elif isinstance(old_value, list):
if len(old_value) > 0:
if isinstance(old_value[0], Util3d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(Util3d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name_base,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
elif isinstance(old_value[0], Util2d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(Util2d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
super(Package, self).__setattr__(key, value)
def export(self, f, **kwargs):
from flopy import export
return export.utils.package_helper(f, self, **kwargs)
@staticmethod
def add_to_dtype(dtype, field_names, field_types):
if not isinstance(field_names, list):
field_names = [field_names]
if not isinstance(field_types, list):
field_types = [field_types] * len(field_names)
newdtypes = [dtype]
for field_name, field_type in zip(field_names, field_types):
tempdtype = np.dtype([(field_name, field_type)])
newdtypes.append(tempdtype)
newdtype = sum((dtype.descr for dtype in newdtypes), [])
newdtype = np.dtype(newdtype)
return newdtype
def check(self, f=None, verbose=True, level=1):
"""
Check package data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a sting is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.dis.check()
"""
chk = None
if self.__dict__.get('stress_period_data', None) is not None and \
self.name[0] != 'OC':
spd_inds_valid = True
chk = check(self, f=f, verbose=verbose, level=level)
for per in self.stress_period_data.data.keys():
if isinstance(self.stress_period_data.data[per], np.recarray):
spd = self.stress_period_data.data[per]
inds = (spd.k, spd.i, spd.j) if self.parent.structured \
else (spd.node)
# General BC checks
# check for valid cell indices
spd_inds_valid = chk._stress_period_data_valid_indices(spd)
# first check for and list nan values
chk._stress_period_data_nans(spd)
if spd_inds_valid:
# next check for BCs in inactive cells
chk._stress_period_data_inactivecells(spd)
# More specific BC checks
# check elevations in the ghb, drain, and riv packages
if self.name[0] in check.bc_stage_names.keys():
# check that bc elevations are above model cell bottoms
# also checks for nan values
elev_name = chk.bc_stage_names[self.name[0]]
botms = self.parent.dis.botm.array[inds]
chk.stress_period_data_values(spd, spd[elev_name] < botms,
col=elev_name,
error_name='BC elevation below cell bottom',
error_type='Error')
chk.summarize()
# check property values in upw and lpf packages
elif self.name[0] in ['UPW', 'LPF']:
chk = check(self, f=f, verbose=verbose, level=level)
active = chk.get_active()
# check for confined layers above convertable layers
confined = False
thickstrt = False
for option in self.options:
if option.lower() == 'thickstrt':
thickstrt = True
for i, l in enumerate(self.laytyp.array.tolist()):
if l == 0 or l < 0 and thickstrt:
confined = True
continue
if confined and l > 0:
chk._add_to_summary(type='Warning',
desc='\r LAYTYP: unconfined (convertible) ' + \
'layer below confined layer')
# check for zero or negative values of hydraulic conductivity, anisotropy,
# and quasi-3D confining beds
kparams = {'hk': 'horizontal hydraulic conductivity',
'vka': 'vertical hydraulic conductivity'}
for kp, name in kparams.items():
chk.values(self.__dict__[kp].array,
active & (self.__dict__[kp].array <= 0),
'zero or negative {} values'.format(name), 'Error')
# check for negative hani
chk.values(self.__dict__['hani'].array,
active & (self.__dict__['hani'].array < 0),
'negative horizontal anisotropy values', 'Error')
def check_thresholds(array, active, thresholds, name):
"""Checks array against min and max threshold values."""
mn, mx = thresholds
chk.values(array, active & (array < mn),
'{} values below checker threshold of {}'
.format(name, mn), 'Warning')
chk.values(array, active & (array > mx),
'{} values above checker threshold of {}'
.format(name, mx), 'Warning')
# check for unusually high or low values of hydraulic conductivity
if self.layvka.sum() > 0: # convert vertical anistropy to Kv for checking
vka = self.vka.array.copy()
for l in range(vka.shape[0]):
vka[l] *= self.hk.array[l] if self.layvka.array[
l] != 0 else 1
check_thresholds(vka, active,
chk.property_threshold_values['vka'],
kparams.pop('vka'))
for kp, name in kparams.items():
check_thresholds(self.__dict__[kp].array, active,
chk.property_threshold_values[kp],
name)
# check vkcb if there are any quasi-3D layers
if self.parent.dis.laycbd.sum() > 0:
# pad non-quasi-3D layers in vkcb array with ones so they won't fail checker
vkcb = self.vkcb.array.copy()
for l in range(self.vkcb.shape[0]):
if self.parent.dis.laycbd[l] == 0:
vkcb[l, :,
:] = 1 # assign 1 instead of zero as default value that won't violate checker
# (allows for same structure as other checks)
chk.values(vkcb, active & (vkcb <= 0),
'zero or negative quasi-3D confining bed Kv values',
'Error')
check_thresholds(vkcb, active,
chk.property_threshold_values['vkcb'],
'quasi-3D confining bed Kv')
if not np.all(self.parent.dis.steady): # only check storage if model is transient
# do the same for storage if the model is transient
sarrays = {'ss': self.ss.array, 'sy': self.sy.array}
if 'STORAGECOEFFICIENT' in self.options: # convert to specific for checking
chk._add_to_summary(type='Warning',
desc='\r STORAGECOEFFICIENT option is activated, \
storage values are read storage coefficients')
tshape = (self.parent.nlay, self.parent.nrow,
self.parent.ncol)
sarrays['ss'].shape != tshape
sarrays['sy'].shape != tshape
chk.values(sarrays['ss'], active & (sarrays['ss'] < 0),
'zero or negative specific storage values', 'Error')
check_thresholds(sarrays['ss'], active,
chk.property_threshold_values['ss'],
'specific storage')
# only check specific yield for convertible layers
inds = np.array(
[True if l > 0 or l < 0 and 'THICKSRT' in self.options
else False for l in self.laytyp])
sarrays['sy'] = sarrays['sy'][inds, :, :]
active = active[inds, :, :]
chk.values(sarrays['sy'], active & (sarrays['sy'] < 0),
'zero or negative specific yield values', 'Error')
check_thresholds(sarrays['sy'], active,
chk.property_threshold_values['sy'],
'specific yield')
chk.summarize()
else:
txt = 'check method not implemented for {} Package.'.format(
self.name[0])
if f is not None:
if isinstance(f, str):
pth = os.path.join(self.parent.model_ws, f)
f = open(pth, 'w')
f.write(txt)
f.close()
if verbose:
print(txt)
return chk
def level1_arraylist(self, idx, v, name, txt):
ndim = v.ndim
if ndim == 3:
kon = -1
for [k, i, j] in idx:
if k > kon:
kon = k
txt += ' {:>10s}{:>10s}{:>10s}{:>15s}\n'.format('layer',
'row',
'column',
name[
k].lower().replace(
' layer ',
''))
txt += ' {:10d}{:10d}{:10d}{:15.7g}\n'.format(k + 1, i + 1,
j + 1,
v[k, i, j])
elif ndim == 2:
txt += ' {:>10s}{:>10s}{:>15s}\n'.format('row', 'column',
name[
0].lower().replace(
' layer ', ''))
for [i, j] in idx:
txt += ' {:10d}{:10d}{:15.7g}\n'.format(i + 1, j + 1,
v[i, j])
elif ndim == 1:
txt += ' {:>10s}{:>15s}\n'.format('number', name[0])
for i in idx:
txt += ' {:10d}{:15.7g}\n'.format(i + 1, v[i])
return txt
def plot(self, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
package input data
Parameters
----------
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is
zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.dis.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = kwargs.pop('kper')
else:
kper = 0
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if 'initial_fig' in kwargs:
ifig = int(kwargs.pop('initial_fig'))
else:
ifig = 0
inc = self.parent.nlay
if mflay is not None:
inc = 1
axes = []
for item, value in self.__dict__.items():
caxs = []
if isinstance(value, MfList):
if self.parent.verbose:
print('plotting {} package MfList instance: {}'.format(
self.name[0], item))
if key is None:
names = ['{} location stress period {} layer {}'.format(
self.name[0], kper + 1, k + 1)
for k in range(self.parent.nlay)]
colorbar = False
else:
names = ['{} {} data stress period {} layer {}'.format(
self.name[0], key, kper + 1, k + 1)
for k in range(self.parent.nlay)]
colorbar = True
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(value.plot(key, names, kper,
filename_base=fileb,
file_extension=fext, mflay=mflay,
fignum=fignum, colorbar=colorbar,
**kwargs))
elif isinstance(value, Util3d):
if self.parent.verbose:
print('plotting {} package Util3d instance: {}'.format(
self.name[0], item))
# fignum = list(range(ifig, ifig + inc))
fignum = list(range(ifig, ifig + value.shape[0]))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb, file_extension=fext,
mflay=mflay,
fignum=fignum, colorbar=True))
elif isinstance(value, Util2d):
if len(value.shape) == 2:
if self.parent.verbose:
print('plotting {} package Util2d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + 1))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb,
file_extension=fext,
fignum=fignum, colorbar=True))
elif isinstance(value, Transient2d):
if self.parent.verbose:
print(
'plotting {} package Transient2d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb, file_extension=fext,
kper=kper,
fignum=fignum, colorbar=True))
elif isinstance(value, list):
for v in value:
if isinstance(v, Util3d):
if self.parent.verbose:
print(
'plotting {} package Util3d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(
v.plot(filename_base=fileb,
file_extension=fext,
mflay=mflay,
fignum=fignum, colorbar=True))
else:
pass
# unroll nested lists os axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
if isinstance(c, list):
for cc in c:
axes.append(cc)
else:
axes.append(c)
else:
axes.append(caxs)
return axes
def to_shapefile(self, filename, **kwargs):
"""
Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons).
Adds an attribute for each layer in each data array
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.to_shapefile('test_hk.shp')
"""
import warnings
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename)
def webdoc(self):
if self.parent.version == 'mf2k':
wb.open(
'http://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/' + self.url)
elif self.parent.version == 'mf2005':
wb.open(
'http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/' + self.url)
elif self.parent.version == 'ModflowNwt':
wb.open(
'http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/' + self.url)
def write_file(self, check=False):
"""
Every Package needs its own write_file function
"""
print('IMPLEMENTATION ERROR: write_file must be overloaded')
return
@staticmethod
def load(model, pack_type, f, nper=None, pop_key_list=None, check=True,
unitnumber=None, ext_unit_dict=None):
"""
The load method has not been implemented for this package.
"""
bc_pack_types = []
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# check for parameters
nppak = 0
if "parameter" in line.lower():
t = line.strip().split()
# assert int(t[1]) == 0,"Parameters are not supported"
nppak = np.int(t[1])
mxl = 0
if nppak > 0:
mxl = np.int(t[2])
if model.verbose:
print(' Parameters detected. Number of parameters = ',
nppak)
line = f.readline()
# dataset 2a
t = line.strip().split()
ipakcb = 0
try:
ipakcb = int(t[1])
except:
pass
options = []
aux_names = []
if len(t) > 2:
it = 2
while it < len(t):
toption = t[it]
if toption.lower() is 'noprint':
options.append(toption)
elif 'aux' in toption.lower():
options.append(' '.join(t[it:it + 2]))
aux_names.append(t[it + 1].lower())
it += 1
it += 1
# set partype
# and read phiramp for modflow-nwt well package
partype = ['cond']
if "modflowwel" in str(pack_type).lower():
partype = ['flux']
if "nwt" in model.version.lower() and 'flopy.modflow.mfwel.modflowwel'.lower() in str(pack_type).lower():
specify = False
ipos = f.tell()
line = f.readline()
# test for specify keyword if a NWT well file - This is a temporary hack
if 'specify' in line.lower():
specify = True
t = line.strip().split()
phiramp = np.float32(t[1])
try:
phiramp_unit = np.int32(t[2])
except:
phiramp_unit = 2
options.append('specify {} {} '.format(phiramp, phiramp_unit))
else:
f.seek(ipos)
elif 'flopy.modflow.mfchd.modflowchd'.lower() in str(
pack_type).lower():
partype = ['shead', 'ehead']
# read parameter data
if nppak > 0:
dt = pack_type.get_empty(1, aux_names=aux_names,
structured=model.structured).dtype
pak_parms = mfparbc.load(f, nppak, dt, model.verbose)
# pak_parms = mfparbc.load(f, nppak, len(dt.names))
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# read data for every stress period
bnd_output = None
stress_period_data = {}
for iper in range(nper):
if model.verbose:
print(
" loading " + str(
pack_type) + " for kper {0:5d}".format(
iper + 1))
line = f.readline()
if line == '':
break
t = line.strip().split()
itmp = int(t[0])
itmpp = 0
try:
itmpp = int(t[1])
except:
pass
if itmp == 0:
bnd_output = None
current = pack_type.get_empty(itmp, aux_names=aux_names,
structured=model.structured)
elif itmp > 0:
current = pack_type.get_empty(itmp, aux_names=aux_names,
structured=model.structured)
for ibnd in range(itmp):
line = f.readline()
if "open/close" in line.lower():
binary = False
if '(binary)' in line.lower():
binary = True
# need to strip out existing path seps and
# replace current-system path seps
raw = line.strip().split()
fname = raw[1]
if '/' in fname:
raw = fname.split('/')
elif '\\' in fname:
raw = fname.split('\\')
else:
raw = [fname]
fname = os.path.join(*raw)
oc_filename = os.path.join(model.model_ws, fname)
assert os.path.exists(
oc_filename), "Package.load() error: open/close filename " + \
oc_filename + " not found"
try:
if binary:
dtype2 = []
for name in current.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.fromfile(oc_filename,
dtype=dtype2,
count=itmp)
current = np.array(d, dtype=current.dtype)
else:
current = np.genfromtxt(oc_filename,
dtype=current.dtype)
current = current.view(np.recarray)
except Exception as e:
raise Exception(
"Package.load() error loading open/close file " + oc_filename + \
" :" + str(e))
assert current.shape[
0] == itmp, "Package.load() error: open/close rec array from file " + \
oc_filename + " shape (" + str(
current.shape) + \
") does not match itmp: {0:d}".format(
itmp)
break
try:
t = line.strip().split()
current[ibnd] = tuple(t[:len(current.dtype.names)])
except:
t = []
for ivar in range(len(current.dtype.names)):
istart = ivar * 10
istop = istart + 10
t.append(line[istart:istop])
current[ibnd] = tuple(t[:len(current.dtype.names)])
# convert indices to zero-based
if model.structured:
current['k'] -= 1
current['i'] -= 1
current['j'] -= 1
else:
current['node'] -= 1
bnd_output = np.recarray.copy(current)
else:
bnd_output = np.recarray.copy(current)
for iparm in range(itmpp):
line = f.readline()
t = line.strip().split()
pname = t[0].lower()
iname = 'static'
try:
tn = t[1]
c = tn.lower()
instance_dict = pak_parms.bc_parms[pname][1]
if c in instance_dict:
iname = c
else:
iname = 'static'
except:
pass
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
par_current = pack_type.get_empty(par_dict['nlst'],
aux_names=aux_names)
# get appropriate parval
if model.mfpar.pval is None:
parval = np.float(par_dict['parval'])
else:
try:
parval = np.float(model.mfpar.pval.pval_dict[pname])
except:
parval = np.float(par_dict['parval'])
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
par_current[ibnd] = tuple(t[:len(par_current.dtype.names)])
if model.structured:
par_current['k'] -= 1
par_current['i'] -= 1
par_current['j'] -= 1
else:
par_current['node'] -= 1
for ptype in partype:
par_current[ptype] *= parval
if bnd_output is None:
bnd_output = np.recarray.copy(par_current)
else:
bnd_output = stack_arrays((bnd_output, par_current),
asrecarray=True, usemask=False)
if bnd_output is None:
stress_period_data[iper] = itmp
else:
stress_period_data[iper] = bnd_output
dtype = pack_type.get_empty(0, aux_names=aux_names,
structured=model.structured).dtype
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=pack_type.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
pak = pack_type(model, ipakcb=ipakcb,
stress_period_data=stress_period_data,
dtype=dtype, options=options,
unitnumber=unitnumber, filenames=filenames)
if check:
pak.check(f='{}.chk'.format(pak.name[0]),
verbose=pak.parent.verbose, level=0)
return pak
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Invoke PHP after setting up the App Engine environment."""
import argparse
import os
import subprocess
import sys
import tempfile
import wsgiref.util
import google
from google.appengine.api import request_info
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2.php import runtime
def _get_gcs_server():
server = gcs_server.GCSServer('localhost', 0)
server.start()
return server
class APIRequestInfo(request_info.RequestInfo):
"""Allows stubs to lookup state linked to the request making the API call."""
def __init__(self):
self._environ = {}
wsgiref.util.setup_testing_defaults(self._environ)
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
return wsgiref.util.request_uri(self._environ)
def get_request_environ(self, request_id):
"""Returns a dict containing the WSGI environ for the request."""
return self._environ
def get_module(self, request_id):
"""Returns the name of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the module name.
"""
return 'default'
def get_version(self, request_id):
"""Returns the version of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the version.
"""
return '1'
def get_instance(self, request_id):
"""Returns the instance serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
An opaque representation of the instance serving this request. It should
only be passed to dispatcher methods expecting an instance.
"""
return object()
def get_dispatcher(self):
"""Returns the Dispatcher.
Returns:
The Dispatcher instance.
"""
return request_info._LocalFakeDispatcher()
def _get_api_server(app_id):
"""Return a configured and started api_server.APIServer."""
tmp_dir = tempfile.mkdtemp()
os.environ['APPLICATION_ID'] = app_id
api_server.setup_stubs(
request_data=APIRequestInfo(),
app_id=app_id,
application_root=os.getcwd(),
trusted=False,
appidentity_email_address=None,
appidentity_private_key_path=None,
blobstore_path=tmp_dir,
datastore_consistency=
datastore_stub_util.PseudoRandomHRConsistencyPolicy(),
datastore_path=':memory:',
datastore_require_indexes=False,
datastore_auto_id_policy=datastore_stub_util.SCATTERED,
images_host_prefix='http://localhost:8080',
logs_path=':memory:',
mail_smtp_host='',
mail_smtp_port=25,
mail_smtp_user='',
mail_smtp_password='',
mail_enable_sendmail=False,
mail_show_mail_body=False,
mail_allow_tls=False,
search_index_path=None,
taskqueue_auto_run_tasks=False,
taskqueue_default_http_server='http://localhost:8080',
user_login_url='/_ah/login?continue=%s',
user_logout_url='/_ah/login?continue=%s',
default_gcs_bucket_name=None)
server = api_server.APIServer('localhost', 0, app_id)
server.start()
return server
def _get_default_php_cli_path():
"""Returns the path to the siloed php cli binary or None if not present."""
default_php_executable_path = None
google_package_directory = os.path.dirname(google.__file__)
sdk_directory = os.path.dirname(google_package_directory)
if sys.platform == 'win32':
default_php_executable_path = os.path.abspath(
os.path.join(sdk_directory, 'php/php-5.5-Win32-VC11-x86/php.exe'))
elif sys.platform == 'darwin':
default_php_executable_path = os.path.abspath(
os.path.join(os.path.dirname(sdk_directory), 'php'))
if (default_php_executable_path and
os.path.exists(default_php_executable_path)):
return default_php_executable_path
return None
def _parse_path(value):
"""Returns the given path with ~ and environment variables expanded."""
return os.path.expanduser(os.path.expandvars(value))
def _create_command_line_parser():
"""Returns an argparse.ArgumentParser to parse command line arguments."""
parser = argparse.ArgumentParser(
usage='usage: %(prog)s [execution options] <script> [script_args]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'script',
help='the path to the PHP script that should be executed')
parser.add_argument(
'script_args',
help='the command arguments that will be passed to the script',
nargs=argparse.REMAINDER)
execution_group = parser.add_argument_group('Execution Options')
php_cli_path = _get_default_php_cli_path()
execution_group.add_argument('--php_executable_path', metavar='PATH',
type=_parse_path,
default=php_cli_path,
required=php_cli_path is None,
help='path to the PHP executable')
return parser
def main():
parser = _create_command_line_parser()
options = parser.parse_args()
if not options.php_executable_path:
parser.error('--php_executable_path must be set')
elif not os.path.exists(options.php_executable_path):
parser.error('--php_executable_path=%s, %s does not exist' % (
options.php_executable_path, options.php_executable_path))
php_script = os.path.abspath(_parse_path(options.script))
if not os.path.exists(php_script):
parser.error('%s does not exist' % php_script)
api_srver = _get_api_server(app_id='dummy_app_id')
gcs_srver = _get_gcs_server()
include_paths = [runtime.SDK_PATH]
if sys.platform == 'win32':
include_path = 'include_path="%s"' % ';'.join(include_paths)
else:
include_path = 'include_path=%s' % ':'.join(include_paths)
php_args = [options.php_executable_path,
'-d', include_path,
'-f', runtime.SETUP_PHP_PATH,]
php_args.extend(options.script_args)
env = dict(HTTP_HOST='localhost:%d' % gcs_srver.port,
SERVER_SOFTWARE='Development/CLI',
REAL_SCRIPT_FILENAME=php_script,
REMOTE_API_HOST='localhost',
REMOTE_API_PORT=str(api_srver.port),
REMOTE_REQUEST_ID='51',
APPLICATION_ROOT=os.path.dirname(php_script))
if 'SYSTEMROOT' in os.environ:
env['SYSTEMROOT'] = os.environ['SYSTEMROOT']
php_process = subprocess.Popen(php_args, env=env)
script_return = php_process.wait()
api_srver.quit()
gcs_srver.quit()
sys.exit(script_return)
if __name__ == '__main__':
main()
| |
import logging
import csv
import StringIO
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, logout
from django.contrib.auth.decorators import user_passes_test
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render, get_object_or_404
from tastypie.models import ApiKey
from dojo.models import Finding, Notes
from dojo.filters import UserFilter
from dojo.forms import DojoUserForm, AddDojoUserForm, DeleteUserForm, APIKeyForm, UserContactInfoForm
from dojo.models import Product, Dojo_User, UserContactInfo, Alerts
from dojo.utils import get_page_items, add_breadcrumb, get_system_setting
logging.basicConfig(
level=logging.DEBUG,
format='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
datefmt='%d/%b/%Y %H:%M:%S',
filename=settings.DOJO_ROOT + '/../django_app.log',
)
logger = logging.getLogger(__name__)
# # tastypie api
def upload_cvffv1(request):
if request.method == 'POST':
form = UploadCVFFForm(request.POST, request.FILES)
if form.is_valid():
handle_uploaded_cvff(request, request.FILES['file'])
messages.add_message(request,
messages.SUCCESS,
'WSO2 CVFFv1 file import was successful.',
extra_tags='alert-success')
return HttpResponseRedirect("/")
else:
form = UploadCVFFForm()
return render(request,
'dojo/up_cvff.html',
{'form': form})
class UploadCVFFForm(forms.Form):
file = forms.FileField(widget=forms.widgets.FileInput(
attrs={"accept": ".csv"}),
label="Select WSO2 CVFFv1 Input File (CSV)")
def handle_uploaded_cvff(request, f):
output = StringIO.StringIO()
for chunk in f.chunks():
output.write(chunk)
csvString = output.getvalue().splitlines(True)[1:]
inputCSV = csv.reader(csvString, quoting=csv.QUOTE_NONNUMERIC)
logger.error('Before moving into loop')
isHeader = 1
indexOfResolution = 0
for row in inputCSV:
if isHeader == 1:
for col in row:
if str(col) == "WSO2_resolution":
isHeader = 0
break
indexOfResolution = indexOfResolution + 1
try:
finding = Finding.objects.filter(pk=float(row[0]))[0];
logger.error('Finding note count for id '+ str(row[0]) +' is : ' + str(finding.notes.count()))
status = str(row[indexOfResolution]).strip().split("(")[0]
if finding.notes.count() == 0:
note = Notes(entry="[ " + status + " ] ~ " + row[indexOfResolution + 2], author=request.user)
note.save()
finding.notes.add(note);
logger.info('Adding new note')
else:
note = finding.notes.all()[0]
note.entry = "[ " + status + " ] ~ " + row[indexOfResolution + 2]
note.author=request.user
note.save()
logger.info('Updating existing note' + str(note.id))
status = status.replace('.','').replace(',','').replace(' ','').lower()
finding.false_p = False
finding.verified = False
finding.active = False
finding.out_of_scope = False
finding.save()
if status == 'falsepositive':
finding.false_p = True
finding.save()
elif status == 'notathreat':
finding.verified = True
finding.save()
elif status == 'needtobefixed':
finding.active = True
finding.save()
elif status == 'needtofix':
finding.active = True
finding.save()
elif status == 'truepositive':
finding.active = True
finding.save()
elif status == 'alreadymitigated':
finding.out_of_scope = True
finding.save()
elif status == 'notapplicable':
finding.under_review = True
finding.save()
#elif status == 'cannotreproduce':
# finding.under_review = True
# finding.save()
else:
logger.error('Unknown status for : ' + str(row[0]) + ". Status is : " + status)
except Exception as e:
logger.error(e.message)
logger.error('Error in processing row: ' + str(row[0]) + ". Skipping.")
def api_key(request):
api_key = ''
form = APIKeyForm(instance=request.user)
if request.method == 'POST': # new key requested
form = APIKeyForm(request.POST, instance=request.user)
if form.is_valid() and form.cleaned_data['id'] == request.user.id:
try:
api_key = ApiKey.objects.get(user=request.user)
api_key.key = None
api_key.save()
except ApiKey.DoesNotExist:
api_key = ApiKey.objects.create(user=request.user)
messages.add_message(request,
messages.SUCCESS,
'API Key generated successfully.',
extra_tags='alert-success')
else:
raise PermissionDenied
else:
try:
api_key = ApiKey.objects.get(user=request.user)
except ApiKey.DoesNotExist:
api_key = ApiKey.objects.create(user=request.user)
add_breadcrumb(title="API Key", top_level=True, request=request)
return render(request, 'dojo/api_key.html',
{'name': 'API Key',
'metric': False,
'user': request.user,
'key': api_key,
'form': form,
})
# # user specific
def logout_view(request):
logout(request)
messages.add_message(request,
messages.SUCCESS,
'You have logged out successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('login'))
# @user_passes_test(lambda u: u.is_staff)
def alerts(request):
alerts = Alerts.objects.filter(user_id=request.user)
if request.method == 'POST':
removed_alerts = request.POST.getlist('alert_select')
alerts.filter(id__in=removed_alerts).delete()
alerts = alerts.filter(~Q(id__in=removed_alerts))
paged_alerts = get_page_items(request, alerts, 25)
add_breadcrumb(title="Alerts for " + request.user.get_full_name(), top_level=True, request=request)
return render(request,
'dojo/alerts.html',
{'alerts': paged_alerts})
def alerts_json(request, limit=None):
limit = request.GET.get('limit')
if limit:
alerts = serializers.serialize('json', Alerts.objects.filter(user_id=request.user)[:limit])
else:
alerts = serializers.serialize('json', Alerts.objects.filter(user_id=request.user))
return HttpResponse(alerts, content_type='application/json')
def alertcount(request):
count = Alerts.objects.filter(user_id=request.user).count()
return JsonResponse({'count':count})
def view_profile(request):
user = get_object_or_404(Dojo_User, pk=request.user.id)
try:
user_contact = UserContactInfo.objects.get(user=user)
except UserContactInfo.DoesNotExist:
user_contact = None
form = DojoUserForm(instance=user)
if user_contact is None:
contact_form = UserContactInfoForm()
else:
contact_form = UserContactInfoForm(instance=user_contact)
if request.method == 'POST':
form = DojoUserForm(request.POST, instance=user)
contact_form = UserContactInfoForm(request.POST, instance=user_contact)
if form.is_valid() and contact_form.is_valid():
form.save()
contact = contact_form.save(commit=False)
contact.user = user
contact.save()
messages.add_message(request,
messages.SUCCESS,
'Profile updated successfully.',
extra_tags='alert-success')
add_breadcrumb(title="User Profile - " + user.get_full_name(), top_level=True, request=request)
return render(request, 'dojo/profile.html', {
'name': 'Engineer Profile',
'metric': False,
'user': user,
'form': form,
'contact_form': contact_form})
def change_password(request):
if request.method == 'POST':
current_pwd = request.POST['current_password']
new_pwd = request.POST['new_password']
user = authenticate(username=request.user.username,
password=current_pwd)
if user is not None:
if user.is_active:
user.set_password(new_pwd)
user.save()
messages.add_message(request,
messages.SUCCESS,
'Your password has been changed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('view_profile'))
messages.add_message(request,
messages.ERROR,
'Your password has not been changed.',
extra_tags='alert-danger')
add_breadcrumb(title="Change Password", top_level=False, request=request)
return render(request, 'dojo/change_pwd.html',
{'error': ''})
@user_passes_test(lambda u: u.is_staff)
def user(request):
users = Dojo_User.objects.all().order_by('username', 'last_name', 'first_name')
users = UserFilter(request.GET, queryset=users)
paged_users = get_page_items(request, users.qs, 25)
add_breadcrumb(title="All Users", top_level=True, request=request)
return render(request,
'dojo/users.html',
{"users": paged_users,
"filtered": users,
"name": "All Users",
})
@user_passes_test(lambda u: u.is_staff)
def add_user(request):
form = AddDojoUserForm()
if not request.user.is_superuser:
form.fields['is_staff'].widget.attrs['disabled'] = True
form.fields['is_superuser'].widget.attrs['disabled'] = True
form.fields['is_active'].widget.attrs['disabled'] = True
contact_form = UserContactInfoForm()
user = None
if request.method == 'POST':
form = AddDojoUserForm(request.POST)
contact_form = UserContactInfoForm(request.POST)
if form.is_valid() and contact_form.is_valid():
user = form.save(commit=False)
user.set_unusable_password()
user.active = True
user.save()
contact = contact_form.save(commit=False)
contact.user = user
contact.save()
if 'authorized_products' in form.cleaned_data and len(form.cleaned_data['authorized_products']) > 0:
for p in form.cleaned_data['authorized_products']:
p.authorized_users.add(user)
p.save()
messages.add_message(request,
messages.SUCCESS,
'User added successfully, you may edit if necessary.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('edit_user', args=(user.id,)))
else:
messages.add_message(request,
messages.ERROR,
'User was not added successfully.',
extra_tags='alert-danger')
add_breadcrumb(title="Add User", top_level=False, request=request)
return render(request, "dojo/add_user.html", {
'name': 'Add User',
'form': form,
'contact_form': contact_form,
'to_add': True})
@user_passes_test(lambda u: u.is_staff)
def edit_user(request, uid):
user = get_object_or_404(Dojo_User, id=uid)
authed_products = Product.objects.filter(authorized_users__in=[user])
form = AddDojoUserForm(instance=user, initial={'authorized_products': authed_products})
if not request.user.is_superuser:
form.fields['is_staff'].widget.attrs['disabled'] = True
form.fields['is_superuser'].widget.attrs['disabled'] = True
form.fields['is_active'].widget.attrs['disabled'] = True
try:
user_contact = UserContactInfo.objects.get(user=user)
except UserContactInfo.DoesNotExist:
user_contact = None
if user_contact is None:
contact_form = UserContactInfoForm()
else:
contact_form = UserContactInfoForm(instance=user_contact)
if request.method == 'POST':
form = AddDojoUserForm(request.POST, instance=user, initial={'authorized_products': authed_products})
if user_contact is None:
contact_form = UserContactInfoForm(request.POST)
else:
contact_form = UserContactInfoForm(request.POST, instance=user_contact)
if form.is_valid() and contact_form.is_valid():
form.save()
if 'authorized_products' in form.cleaned_data and len(form.cleaned_data['authorized_products']) > 0:
for p in form.cleaned_data['authorized_products']:
p.authorized_users.add(user)
p.save()
contact = contact_form.save(commit=False)
contact.user = user
contact.save()
messages.add_message(request,
messages.SUCCESS,
'User saved successfully.',
extra_tags='alert-success')
else:
messages.add_message(request,
messages.ERROR,
'User was not saved successfully.',
extra_tags='alert-danger')
add_breadcrumb(title="Edit User", top_level=False, request=request)
return render(request, "dojo/add_user.html", {
'name': 'Edit User',
'form': form,
'contact_form': contact_form,
'to_edit': user})
@user_passes_test(lambda u: u.is_staff)
def delete_user(request, uid):
user = get_object_or_404(Dojo_User, id=uid)
form = DeleteUserForm(instance=user)
from django.contrib.admin.utils import NestedObjects
from django.db import DEFAULT_DB_ALIAS
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([user])
rels = collector.nested()
if user.id == request.user.id:
messages.add_message(request,
messages.ERROR,
'You may not delete yourself.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('edit_user', args=(user.id,)))
if request.method == 'POST':
if 'id' in request.POST and str(user.id) == request.POST['id']:
form = DeleteUserForm(request.POST, instance=user)
if form.is_valid():
user.delete()
messages.add_message(request,
messages.SUCCESS,
'User and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('users'))
add_breadcrumb(title="Delete User", top_level=False, request=request)
return render(request, 'dojo/delete_user.html',
{'to_delete': user,
'form': form,
'rels': rels,
})
| |
import datetime
from django.db import connection
from django.db.models.query import QuerySet, Q
from django.utils.timezone import now as timezone_now
from sqlalchemy.sql import (
column,
literal,
func,
)
from zerver.lib.request import REQ
from zerver.models import (
Message,
Recipient,
UserMessage,
UserProfile,
)
from typing import Any, Dict, List, Optional, Tuple
# Only use these constants for events.
ORIG_TOPIC = "orig_subject"
TOPIC_NAME = "subject"
TOPIC_LINKS = "subject_links"
MATCH_TOPIC = "match_subject"
# This constant is actually embedded into
# the JSON data for message edit history,
# so we'll always need to handle legacy data
# unless we do a pretty tricky migration.
LEGACY_PREV_TOPIC = "prev_subject"
# This constant is pretty closely coupled to the
# database, but it's the JSON field.
EXPORT_TOPIC_NAME = "subject"
'''
The following functions are for user-facing APIs
where we'll want to support "subject" for a while.
'''
def get_topic_from_message_info(message_info: Dict[str, Any]) -> str:
'''
Use this where you are getting dicts that are based off of messages
that may come from the outside world, especially from third party
APIs and bots.
We prefer 'topic' to 'subject' here. We expect at least one field
to be present (or the caller must know how to handle KeyError).
'''
if 'topic' in message_info:
return message_info['topic']
return message_info['subject']
def REQ_topic() -> Optional[str]:
# REQ handlers really return a REQ, but we
# lie to make the rest of the type matching work.
return REQ(
whence='topic',
aliases=['subject'],
converter=lambda x: x.strip(),
default=None,
) # type: ignore # see comment above
'''
TRY TO KEEP THIS DIVIDING LINE.
Below this line we want to make it so that functions are only
using "subject" in the DB sense, and nothing customer facing.
'''
# This is used in low-level message functions in
# zerver/lib/message.py, and it's not user facing.
DB_TOPIC_NAME = "subject"
MESSAGE__TOPIC = 'message__subject'
def topic_match_sa(topic_name: str) -> Any:
# _sa is short for Sql Alchemy, which we use mostly for
# queries that search messages
topic_cond = func.upper(column("subject")) == func.upper(literal(topic_name))
return topic_cond
def topic_column_sa() -> Any:
return column("subject")
def filter_by_exact_message_topic(query: QuerySet, message: Message) -> QuerySet:
topic_name = message.topic_name()
return query.filter(subject=topic_name)
def filter_by_topic_name_via_message(query: QuerySet, topic_name: str) -> QuerySet:
return query.filter(message__subject__iexact=topic_name)
def messages_for_topic(stream_id: int, topic_name: str) -> QuerySet:
return Message.objects.filter(
recipient__type_id=stream_id,
subject__iexact=topic_name,
)
def save_message_for_edit_use_case(message: Message) -> None:
message.save(update_fields=["subject", "content", "rendered_content",
"rendered_content_version", "last_edit_time",
"edit_history"])
def user_message_exists_for_topic(user_profile: UserProfile,
recipient: Recipient,
topic_name: str) -> bool:
return UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=recipient,
message__subject__iexact=topic_name,
).exists()
def update_messages_for_topic_edit(message: Message,
propagate_mode: str,
orig_topic_name: str,
topic_name: str) -> List[Message]:
propagate_query = Q(recipient = message.recipient, subject = orig_topic_name)
# We only change messages up to 2 days in the past, to avoid hammering our
# DB by changing an unbounded amount of messages
if propagate_mode == 'change_all':
before_bound = timezone_now() - datetime.timedelta(days=2)
propagate_query = (propagate_query & ~Q(id = message.id) &
Q(pub_date__range=(before_bound, timezone_now())))
if propagate_mode == 'change_later':
propagate_query = propagate_query & Q(id__gt = message.id)
messages = Message.objects.filter(propagate_query).select_related()
# Evaluate the query before running the update
messages_list = list(messages)
messages.update(subject=topic_name)
for m in messages_list:
# The cached ORM object is not changed by messages.update()
# and the remote cache update requires the new value
m.set_topic_name(topic_name)
return messages_list
def generate_topic_history_from_db_rows(rows: List[Tuple[str, int]]) -> List[Dict[str, Any]]:
canonical_topic_names = {} # type: Dict[str, Tuple[int, str]]
# Sort rows by max_message_id so that if a topic
# has many different casings, we use the most
# recent row.
rows = sorted(rows, key=lambda tup: tup[1])
for (topic_name, max_message_id) in rows:
canonical_name = topic_name.lower()
canonical_topic_names[canonical_name] = (max_message_id, topic_name)
history = []
for canonical_topic, (max_message_id, topic_name) in canonical_topic_names.items():
history.append(dict(
name=topic_name,
max_id=max_message_id)
)
return sorted(history, key=lambda x: -x['max_id'])
def get_topic_history_for_stream(user_profile: UserProfile,
recipient: Recipient,
public_history: bool) -> List[Dict[str, Any]]:
cursor = connection.cursor()
if public_history:
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
WHERE (
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [recipient.id])
else:
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
INNER JOIN "zerver_usermessage" ON (
"zerver_usermessage"."message_id" = "zerver_message"."id"
)
WHERE (
"zerver_usermessage"."user_profile_id" = %s AND
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [user_profile.id, recipient.id])
rows = cursor.fetchall()
cursor.close()
return generate_topic_history_from_db_rows(rows)
def get_topic_history_for_web_public_stream(recipient: Recipient) -> List[Dict[str, Any]]:
cursor = connection.cursor()
query = '''
SELECT
"zerver_message"."subject" as topic,
max("zerver_message".id) as max_message_id
FROM "zerver_message"
WHERE (
"zerver_message"."recipient_id" = %s
)
GROUP BY (
"zerver_message"."subject"
)
ORDER BY max("zerver_message".id) DESC
'''
cursor.execute(query, [recipient.id])
rows = cursor.fetchall()
cursor.close()
return generate_topic_history_from_db_rows(rows)
| |
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import socket
import sys
import time
import uuid
from os import execlp
from random import randint
from urllib.parse import urlparse
import requests
from docker import errors
from paasta_tools.adhoc_tools import get_default_interactive_config
from paasta_tools.chronos_tools import parse_time_variables
from paasta_tools.cli.cmds.check import makefile_responds_to
from paasta_tools.cli.cmds.cook_image import paasta_cook_image
from paasta_tools.cli.utils import figure_out_service_name
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_instances
from paasta_tools.cli.utils import list_services
from paasta_tools.cli.utils import pick_random_port
from paasta_tools.long_running_service_tools import get_healthcheck_for_instance
from paasta_tools.paasta_execute_docker_command import execute_in_container
from paasta_tools.secret_tools import get_secret_provider
from paasta_tools.secret_tools import is_secret_ref
from paasta_tools.utils import _run
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import get_username
from paasta_tools.utils import list_clusters
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import NoDockerImageError
from paasta_tools.utils import paasta_print
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import PaastaNotConfiguredError
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import timed_flock
from paasta_tools.utils import Timeout
from paasta_tools.utils import TimeoutError
from paasta_tools.utils import validate_service_instance
def perform_http_healthcheck(url, timeout):
"""Returns true if healthcheck on url succeeds, false otherwise
:param url: the healthcheck url
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
try:
with Timeout(seconds=timeout):
try:
res = requests.get(url, verify=False)
except requests.ConnectionError:
return (False, "http request failed: connection failed")
except TimeoutError:
return (False, "http request timed out after %d seconds" % timeout)
if 'content-type' in res.headers and ',' in res.headers['content-type']:
paasta_print(PaastaColors.yellow(
"Multiple content-type headers detected in response."
" The Mesos healthcheck system will treat this as a failure!",
))
return (False, "http request succeeded, code %d" % res.status_code)
# check if response code is valid per https://mesosphere.github.io/marathon/docs/health-checks.html
elif res.status_code >= 200 and res.status_code < 400:
return (True, "http request succeeded, code %d" % res.status_code)
else:
return (False, "http request failed, code %s" % str(res.status_code))
def perform_tcp_healthcheck(url, timeout):
"""Returns true if successfully connests to host and port, false otherwise
:param url: the healthcheck url (in the form tcp://host:port)
:param timeout: timeout in seconds
:returns: True if healthcheck succeeds within number of seconds specified by timeout, false otherwise
"""
url_elem = urlparse(url)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((url_elem.hostname, url_elem.port))
sock.close()
if result == 0:
return (True, "tcp connection succeeded")
else:
return (False, "%s (timeout %d seconds)" % (os.strerror(result), timeout))
def perform_cmd_healthcheck(docker_client, container_id, command, timeout):
"""Returns true if return code of command is 0 when executed inside container, false otherwise
:param docker_client: Docker client object
:param container_id: Docker container id
:param command: command to execute
:param timeout: timeout in seconds
:returns: True if command exits with return code 0, false otherwise
"""
(output, return_code) = execute_in_container(docker_client, container_id, command, timeout)
if return_code == 0:
return (True, output)
else:
return (False, output)
def run_healthcheck_on_container(
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
timeout,
):
"""Performs healthcheck on a container
:param container_id: Docker container id
:param healthcheck_mode: one of 'http', 'https', 'tcp', or 'cmd'
:param healthcheck_data: a URL when healthcheck_mode is 'http[s]' or 'tcp', a command if healthcheck_mode is 'cmd'
:param timeout: timeout in seconds for individual check
:returns: a tuple of (bool, output string)
"""
healthcheck_result = (False, "unknown")
if healthcheck_mode == 'cmd':
healthcheck_result = perform_cmd_healthcheck(docker_client, container_id, healthcheck_data, timeout)
elif healthcheck_mode == 'http' or healthcheck_mode == 'https':
healthcheck_result = perform_http_healthcheck(healthcheck_data, timeout)
elif healthcheck_mode == 'tcp':
healthcheck_result = perform_tcp_healthcheck(healthcheck_data, timeout)
else:
paasta_print(PaastaColors.yellow(
"Healthcheck mode '%s' is not currently supported!" % healthcheck_mode,
))
sys.exit(1)
return healthcheck_result
def simulate_healthcheck_on_service(
instance_config,
docker_client,
container_id,
healthcheck_mode,
healthcheck_data,
healthcheck_enabled,
):
"""Simulates Marathon-style healthcheck on given service if healthcheck is enabled
:param instance_config: service manifest
:param docker_client: Docker client object
:param container_id: Docker container id
:param healthcheck_data: tuple url to healthcheck
:param healthcheck_enabled: boolean
:returns: healthcheck_passed: boolean
"""
healthcheck_link = PaastaColors.cyan(healthcheck_data)
if healthcheck_enabled:
grace_period = instance_config.get_healthcheck_grace_period_seconds()
timeout = instance_config.get_healthcheck_timeout_seconds()
interval = instance_config.get_healthcheck_interval_seconds()
max_failures = instance_config.get_healthcheck_max_consecutive_failures()
paasta_print('\nStarting health check via %s (waiting %s seconds before '
'considering failures due to grace period):' % (healthcheck_link, grace_period))
# silenty start performing health checks until grace period ends or first check succeeds
graceperiod_end_time = time.time() + grace_period
after_grace_period_attempts = 0
while True:
# First inspect the container for early exits
container_state = docker_client.inspect_container(container_id)
if not container_state['State']['Running']:
paasta_print(
PaastaColors.red('Container exited with code {}'.format(
container_state['State']['ExitCode'],
)),
)
healthcheck_passed = False
break
healthcheck_passed, healthcheck_output = run_healthcheck_on_container(
docker_client, container_id, healthcheck_mode, healthcheck_data, timeout,
)
# Yay, we passed the healthcheck
if healthcheck_passed:
paasta_print("{}'{}' (via {})".format(
PaastaColors.green("Healthcheck succeeded!: "),
healthcheck_output,
healthcheck_link,
))
break
# Otherwise, print why we failed
if time.time() < graceperiod_end_time:
color = PaastaColors.grey
msg = '(disregarded due to grace period)'
extra_msg = ' (via: {}. Output: {})'.format(healthcheck_link, healthcheck_output)
else:
# If we've exceeded the grace period, we start incrementing attempts
after_grace_period_attempts += 1
color = PaastaColors.red
msg = '(Attempt {} of {})'.format(
after_grace_period_attempts, max_failures,
)
extra_msg = ' (via: {}. Output: {})'.format(healthcheck_link, healthcheck_output)
paasta_print('{}{}'.format(
color('Healthcheck failed! {}'.format(msg)),
extra_msg,
))
if after_grace_period_attempts == max_failures:
break
time.sleep(interval)
else:
paasta_print('\nPaaSTA would have healthchecked your service via\n%s' % healthcheck_link)
healthcheck_passed = True
return healthcheck_passed
def read_local_dockerfile_lines():
dockerfile = os.path.join(os.getcwd(), 'Dockerfile')
return open(dockerfile).readlines()
def add_subparser(subparsers):
list_parser = subparsers.add_parser(
'local-run',
help="Run service's Docker image locally",
description=(
"'paasta local-run' is useful for simulating how a PaaSTA service would be "
"executed on a real cluster. It analyzes the local soa-configs and constructs "
"a 'docker run' invocation to match. This is useful as a type of end-to-end "
"test, ensuring that a service will work inside the docker container as expected. "
"Additionally, 'local-run' can healthcheck a service per the configured healthcheck.\n\n"
"Alternatively, 'local-run' can be used with --pull, which will pull the currently "
"deployed docker image and use it, instead of building one."
),
epilog=(
"Note: 'paasta local-run' uses docker commands, which may require elevated privileges "
"to run (sudo)."
),
)
list_parser.add_argument(
'-s', '--service',
help='The name of the service you wish to inspect',
).completer = lazy_choices_completer(list_services)
list_parser.add_argument(
'-c', '--cluster',
help=(
"The name of the cluster you wish to simulate. "
"If omitted, uses the default cluster defined in the paasta local-run configs"
),
).completer = lazy_choices_completer(list_clusters)
list_parser.add_argument(
'-y', '--yelpsoa-config-root',
dest='yelpsoa_config_root',
help='A directory from which yelpsoa-configs should be read from',
default=DEFAULT_SOA_DIR,
)
build_pull_group = list_parser.add_mutually_exclusive_group()
build_pull_group.add_argument(
'-b', '--build',
help=(
"Build the docker image to run from scratch using the local Makefile's "
"'cook-image' target. Defaults to try to use the local Makefile if present."
),
action='store_const',
const='build',
dest='action',
)
build_pull_group.add_argument(
'-p', '--pull',
help=(
"Pull the docker image marked for deployment from the Docker registry and "
"use that for the local-run. This is the opposite of --build."
),
action='store_const',
const='pull',
dest='action',
)
build_pull_group.add_argument(
'-d', '--dry-run',
help='Shows the arguments supplied to docker as json.',
action='store_const',
const='dry_run',
dest='action',
)
build_pull_group.set_defaults(action='build')
list_parser.add_argument(
'--json-dict',
help='When running dry run, output the arguments as a json dict',
action='store_true',
dest='dry_run_json_dict',
)
list_parser.add_argument(
'-C', '--cmd',
help=(
'Run Docker container with particular command, '
'for example: "bash". By default will use the command or args specified by the '
'soa-configs or what was specified in the Dockerfile'
),
required=False,
default=None,
)
list_parser.add_argument(
'-i', '--instance',
help=("Simulate a docker run for a particular instance of the service, like 'main' or 'canary'"),
required=False,
default=None,
).completer = lazy_choices_completer(list_instances)
list_parser.add_argument(
'-v', '--verbose',
help='Show Docker commands output',
action='store_true',
required=False,
default=True,
)
list_parser.add_argument(
'-I', '--interactive',
help=(
'Run container in interactive mode. If interactive is set the default command will be "bash" '
'unless otherwise set by the "--cmd" flag'
),
action='store_true',
required=False,
default=False,
)
list_parser.add_argument(
'-k', '--no-healthcheck',
help='Disable simulated healthcheck',
dest='healthcheck',
action='store_false',
required=False,
default=True,
)
list_parser.add_argument(
'-t', '--healthcheck-only',
help='Terminates container after healthcheck (exits with status code 0 on success, 1 otherwise)',
dest='healthcheck_only',
action='store_true',
required=False,
default=False,
)
list_parser.add_argument(
'-o', '--port',
help='Specify a port number to use. If not set, a random non-conflicting port will be found.',
type=int,
dest='user_port',
required=False,
default=False,
)
list_parser.add_argument(
'--vault-auth-method',
help='Override how we auth with vault, defaults to token if not present',
type=str,
dest='vault_auth_method',
required=False,
default='token',
choices=['token', 'ldap'],
)
list_parser.add_argument(
'--vault-token-file',
help='Override vault token file, defaults to /root/.vault-token',
type=str,
dest='vault_token_file',
required=False,
default='/var/spool/.paasta_vault_token',
)
list_parser.add_argument(
'--skip-secrets',
help='Skip decrypting secrets, useful if running non-interactively',
dest='skip_secrets',
required=False,
action='store_true',
default=False,
)
list_parser.set_defaults(command=paasta_local_run)
def get_container_name():
return 'paasta_local_run_%s_%s' % (get_username(), randint(1, 999999))
def get_docker_run_cmd(
memory, chosen_port, container_port, container_name, volumes, env, interactive,
docker_hash, command, net, docker_params, detach,
):
cmd = ['paasta_docker_wrapper', 'run']
for k, v in env.items():
cmd.append('--env')
cmd.append('%s=%s' % (k, v))
cmd.append('--memory=%dm' % memory)
for i in docker_params:
cmd.append('--%s=%s' % (i['key'], i['value']))
if net == 'bridge' and container_port is not None:
cmd.append('--publish=%d:%d' % (chosen_port, container_port))
elif net == 'host':
cmd.append('--net=host')
cmd.append('--name=%s' % container_name)
for volume in volumes:
cmd.append('--volume=%s' % volume)
if interactive:
cmd.append('--interactive=true')
if sys.stdout.isatty():
cmd.append('--tty=true')
else:
if detach:
cmd.append('--detach=true')
cmd.append('%s' % docker_hash)
if command:
if isinstance(command, str):
cmd.extend(('sh', '-c', command))
else:
cmd.extend(command)
return cmd
class LostContainerException(Exception):
pass
def docker_pull_image(docker_url):
"""Pull an image via ``docker pull``. Uses the actual pull command instead of the python
bindings due to the docker auth/registry transition. Once we are past Docker 1.6
we can use better credential management, but for now this function assumes the
user running the command has already been authorized for the registry"""
paasta_print("Please wait while the image (%s) is pulled (times out after 30m)..." % docker_url, file=sys.stderr)
DEVNULL = open(os.devnull, 'wb')
with open('/tmp/paasta-local-run-pull.lock', 'w') as f:
with timed_flock(f, seconds=1800):
ret, output = _run('docker pull %s' % docker_url, stream=True, stdin=DEVNULL)
if ret != 0:
paasta_print(
"\nPull failed. Are you authorized to run docker commands?",
file=sys.stderr,
)
sys.exit(ret)
def get_container_id(docker_client, container_name):
"""Use 'docker_client' to find the container we started, identifiable by
its 'container_name'. If we can't find the id, raise
LostContainerException.
"""
containers = docker_client.containers(all=False)
for container in containers:
if '/%s' % container_name in container.get('Names', []):
return container.get('Id')
raise LostContainerException(
"Can't find the container I just launched so I can't do anything else.\n"
"Try docker 'ps --all | grep %s' to see where it went.\n"
"Here were all the containers:\n"
"%s" % (container_name, containers),
)
def _cleanup_container(docker_client, container_id):
if docker_client.inspect_container(container_id)['State'].get('OOMKilled', False):
paasta_print(
PaastaColors.red(
"Your service was killed by the OOM Killer!\n"
"You've exceeded the memory limit, try increasing the mem parameter in your soa_configs",
),
file=sys.stderr,
)
paasta_print("\nStopping and removing the old container %s..." % container_id)
paasta_print("(Please wait or you may leave an orphaned container.)")
try:
docker_client.stop(container_id)
docker_client.remove_container(container_id)
paasta_print("...done")
except errors.APIError:
paasta_print(PaastaColors.yellow(
"Could not clean up container! You should stop and remove container '%s' manually." % container_id,
))
def get_local_run_environment_vars(instance_config, port0, framework):
"""Returns a dictionary of environment variables to simulate what would be available to
a paasta service running in a container"""
hostname = socket.getfqdn()
docker_image = instance_config.get_docker_image()
if docker_image == '':
# In a local_run environment, the docker_image may not be available
# so we can fall-back to the injected DOCKER_TAG per the paasta contract
docker_image = os.environ['DOCKER_TAG']
fake_taskid = uuid.uuid4()
env = {
'HOST': hostname,
'MESOS_SANDBOX': '/mnt/mesos/sandbox',
'MESOS_CONTAINER_NAME': 'localrun-%s' % fake_taskid,
'MESOS_TASK_ID': str(fake_taskid),
'PAASTA_DOCKER_IMAGE': docker_image,
}
if framework == 'marathon':
env['MARATHON_PORT'] = str(port0)
env['MARATHON_PORT0'] = str(port0)
env['MARATHON_PORTS'] = str(port0)
env['MARATHON_PORT_%d' % instance_config.get_container_port()] = str(port0)
env['MARATHON_APP_VERSION'] = 'simulated_marathon_app_version'
env['MARATHON_APP_RESOURCE_CPUS'] = str(instance_config.get_cpus())
env['MARATHON_APP_DOCKER_IMAGE'] = docker_image
env['MARATHON_APP_RESOURCE_MEM'] = str(instance_config.get_mem())
env['MARATHON_APP_RESOURCE_DISK'] = str(instance_config.get_disk())
env['MARATHON_APP_LABELS'] = ""
env['MARATHON_APP_ID'] = '/simulated_marathon_app_id'
env['MARATHON_HOST'] = hostname
elif framework == 'chronos':
env['CHRONOS_RESOURCE_DISK'] = str(instance_config.get_disk())
env['CHRONOS_RESOURCE_CPU'] = str(instance_config.get_cpus())
env['CHRONOS_RESOURCE_MEM'] = str(instance_config.get_mem())
env['CHRONOS_JOB_OWNER'] = 'simulated-owner'
env['CHRONOS_JOB_RUN_TIME'] = str(int(time.time()))
env['CHRONOS_JOB_NAME'] = "%s %s" % (instance_config.get_service(), instance_config.get_instance())
env['CHRONOS_JOB_RUN_ATTEMPT'] = str(0)
env['mesos_task_id'] = 'ct:simulated-task-id'
return env
def check_if_port_free(port):
temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
temp_socket.bind(("127.0.0.1", port))
except socket.error:
return False
finally:
temp_socket.close()
return True
def decrypt_secret_environment_variables(
secret_provider_name,
environment,
soa_dir,
service_name,
cluster_name,
secret_provider_kwargs,
):
secret_environment = {}
secret_env_vars = {k: v for k, v in environment.items() if is_secret_ref(v)}
if secret_env_vars:
secret_provider = get_secret_provider(
secret_provider_name=secret_provider_name,
soa_dir=soa_dir,
service_name=service_name,
cluster_names=[cluster_name],
secret_provider_kwargs=secret_provider_kwargs,
)
secret_environment = secret_provider.decrypt_environment(
secret_env_vars,
)
return secret_environment
def run_docker_container(
docker_client,
service,
instance,
docker_hash,
volumes,
interactive,
command,
healthcheck,
healthcheck_only,
user_port,
instance_config,
secret_provider_name,
soa_dir=DEFAULT_SOA_DIR,
dry_run=False,
json_dict=False,
framework=None,
secret_provider_kwargs={},
skip_secrets=False,
):
"""docker-py has issues running a container with a TTY attached, so for
consistency we execute 'docker run' directly in both interactive and
non-interactive modes.
In non-interactive mode when the run is complete, stop the container and
remove it (with docker-py).
"""
if user_port:
if check_if_port_free(user_port):
chosen_port = user_port
else:
paasta_print(
PaastaColors.red(
"The chosen port is already in use!\n"
"Try specifying another one, or omit (--port|-o) and paasta will find a free one for you",
),
file=sys.stderr,
)
sys.exit(1)
else:
chosen_port = pick_random_port(service)
environment = instance_config.get_env_dictionary()
if not skip_secrets:
secret_environment = decrypt_secret_environment_variables(
secret_provider_name=secret_provider_name,
environment=environment,
soa_dir=soa_dir,
service_name=service,
cluster_name=instance_config.cluster,
secret_provider_kwargs=secret_provider_kwargs,
)
environment.update(secret_environment)
local_run_environment = get_local_run_environment_vars(
instance_config=instance_config,
port0=chosen_port,
framework=framework,
)
environment.update(local_run_environment)
net = instance_config.get_net()
memory = instance_config.get_mem()
container_name = get_container_name()
docker_params = instance_config.format_docker_parameters()
healthcheck_mode, healthcheck_data = get_healthcheck_for_instance(
service, instance, instance_config, chosen_port, soa_dir=soa_dir,
)
if healthcheck_mode is None:
container_port = None
interactive = True
elif not user_port and not healthcheck and not healthcheck_only:
container_port = None
else:
try:
container_port = instance_config.get_container_port()
except AttributeError:
container_port = None
simulate_healthcheck = (healthcheck_only or healthcheck) and healthcheck_mode is not None
docker_run_args = dict(
memory=memory,
chosen_port=chosen_port,
container_port=container_port,
container_name=container_name,
volumes=volumes,
env=environment,
interactive=interactive,
detach=simulate_healthcheck,
docker_hash=docker_hash,
command=command,
net=net,
docker_params=docker_params,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
joined_docker_run_cmd = ' '.join(docker_run_cmd)
if dry_run:
if json_dict:
paasta_print(json.dumps(docker_run_args))
else:
paasta_print(json.dumps(docker_run_cmd))
return 0
else:
paasta_print('Running docker command:\n%s' % PaastaColors.grey(joined_docker_run_cmd))
if interactive or not simulate_healthcheck:
# NOTE: This immediately replaces us with the docker run cmd. Docker
# run knows how to clean up the running container in this situation.
execlp('paasta_docker_wrapper', *docker_run_cmd)
# For testing, when execlp is patched out and doesn't replace us, we
# still want to bail out.
return 0
container_started = False
container_id = None
try:
(returncode, output) = _run(docker_run_cmd)
if returncode != 0:
paasta_print(
'Failure trying to start your container!'
'Returncode: %d'
'Output:'
'%s'
''
'Fix that problem and try again.'
'http://y/paasta-troubleshooting'
% (returncode, output),
sep='\n',
)
# Container failed to start so no need to cleanup; just bail.
sys.exit(1)
container_started = True
container_id = get_container_id(docker_client, container_name)
paasta_print('Found our container running with CID %s' % container_id)
if simulate_healthcheck:
healthcheck_result = simulate_healthcheck_on_service(
instance_config=instance_config,
docker_client=docker_client,
container_id=container_id,
healthcheck_mode=healthcheck_mode,
healthcheck_data=healthcheck_data,
healthcheck_enabled=healthcheck,
)
def _output_stdout_and_exit_code():
returncode = docker_client.inspect_container(container_id)['State']['ExitCode']
paasta_print('Container exited: %d)' % returncode)
paasta_print('Here is the stdout and stderr:\n\n')
paasta_print(
docker_client.attach(container_id, stderr=True, stream=False, logs=True),
)
if healthcheck_only:
if container_started:
_output_stdout_and_exit_code()
_cleanup_container(docker_client, container_id)
if healthcheck_mode is None:
paasta_print('--healthcheck-only, but no healthcheck is defined for this instance!')
sys.exit(1)
elif healthcheck_result is True:
sys.exit(0)
else:
sys.exit(1)
running = docker_client.inspect_container(container_id)['State']['Running']
if running:
paasta_print('Your service is now running! Tailing stdout and stderr:')
for line in docker_client.attach(container_id, stderr=True, stream=True, logs=True):
paasta_print(line)
else:
_output_stdout_and_exit_code()
returncode = 3
except KeyboardInterrupt:
returncode = 3
# Cleanup if the container exits on its own or interrupted.
if container_started:
returncode = docker_client.inspect_container(container_id)['State']['ExitCode']
_cleanup_container(docker_client, container_id)
return returncode
def command_function_for_framework(framework):
"""
Given a framework, return a function that appropriately formats
the command to be run.
"""
def format_marathon_command(cmd):
return cmd
def format_chronos_command(cmd):
interpolated_command = parse_time_variables(cmd, datetime.datetime.now())
return interpolated_command
def format_adhoc_command(cmd):
return cmd
if framework == 'chronos':
return format_chronos_command
elif framework == 'marathon':
return format_marathon_command
elif framework == 'adhoc':
return format_adhoc_command
else:
raise ValueError("Invalid Framework")
def configure_and_run_docker_container(
docker_client,
docker_hash,
service,
instance,
cluster,
system_paasta_config,
args,
pull_image=False,
dry_run=False,
):
"""
Run Docker container by image hash with args set in command line.
Function prints the output of run command in stdout.
"""
if instance is None and args.healthcheck_only:
paasta_print(
"With --healthcheck-only, --instance MUST be provided!",
file=sys.stderr,
)
return 1
if instance is None and not sys.stdin.isatty():
paasta_print(
"--instance and --cluster must be specified when using paasta local-run without a tty!",
file=sys.stderr,
)
return 1
soa_dir = args.yelpsoa_config_root
volumes = list()
load_deployments = docker_hash is None or pull_image
interactive = args.interactive
try:
if instance is None:
instance_type = 'adhoc'
instance = 'interactive'
instance_config = get_default_interactive_config(
service=service,
cluster=cluster,
soa_dir=soa_dir,
load_deployments=load_deployments,
)
interactive = True
else:
instance_type = validate_service_instance(service, instance, cluster, soa_dir)
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
except NoConfigurationForServiceError as e:
paasta_print(str(e), file=sys.stderr)
return 1
except NoDeploymentsAvailable:
paasta_print(
PaastaColors.red(
"Error: No deployments.json found in %(soa_dir)s/%(service)s."
"You can generate this by running:"
"generate_deployments_for_service -d %(soa_dir)s -s %(service)s" % {
'soa_dir': soa_dir,
'service': service,
},
),
sep='\n',
file=sys.stderr,
)
return 1
if docker_hash is None:
try:
docker_url = instance_config.get_docker_url()
except NoDockerImageError:
paasta_print(
PaastaColors.red(
"Error: No sha has been marked for deployment for the %s deploy group.\n"
"Please ensure this service has either run through a jenkins pipeline "
"or paasta mark-for-deployment has been run for %s\n" % (
instance_config.get_deploy_group(), service,
),
),
sep='',
file=sys.stderr,
)
return 1
docker_hash = docker_url
if pull_image:
docker_pull_image(docker_url)
for volume in instance_config.get_volumes(system_paasta_config.get_volumes()):
if os.path.exists(volume['hostPath']):
volumes.append('%s:%s:%s' % (volume['hostPath'], volume['containerPath'], volume['mode'].lower()))
else:
paasta_print(
PaastaColors.yellow(
"Warning: Path %s does not exist on this host. Skipping this binding." % volume['hostPath'],
),
)
if interactive is True and args.cmd is None:
command = 'bash'
elif args.cmd:
command = args.cmd
else:
command_from_config = instance_config.get_cmd()
if command_from_config:
command_modifier = command_function_for_framework(instance_type)
command = command_modifier(command_from_config)
else:
command = instance_config.get_args()
secret_provider_kwargs = {
'vault_cluster_config': system_paasta_config.get_vault_cluster_config(),
'vault_auth_method': args.vault_auth_method,
'vault_token_file': args.vault_token_file,
}
return run_docker_container(
docker_client=docker_client,
service=service,
instance=instance,
docker_hash=docker_hash,
volumes=volumes,
interactive=interactive,
command=command,
healthcheck=args.healthcheck,
healthcheck_only=args.healthcheck_only,
user_port=args.user_port,
instance_config=instance_config,
soa_dir=args.yelpsoa_config_root,
dry_run=dry_run,
json_dict=args.dry_run_json_dict,
framework=instance_type,
secret_provider_name=system_paasta_config.get_secret_provider_name(),
secret_provider_kwargs=secret_provider_kwargs,
skip_secrets=args.skip_secrets,
)
def docker_config_available():
home = os.path.expanduser('~')
oldconfig = os.path.join(home, ".dockercfg")
newconfig = os.path.join(home, ".docker", "config.json")
return (os.path.isfile(oldconfig) and os.access(oldconfig, os.R_OK)) or \
(os.path.isfile(newconfig) and os.access(newconfig, os.R_OK))
def paasta_local_run(args):
if args.action == 'pull' and os.geteuid() != 0 and not docker_config_available():
paasta_print("Re-executing paasta local-run --pull with sudo..")
os.execvp("sudo", ["sudo", "-H"] + sys.argv)
if args.action == 'build' and not makefile_responds_to('cook-image'):
paasta_print("A local Makefile with a 'cook-image' target is required for --build", file=sys.stderr)
paasta_print("If you meant to pull the docker image from the registry, explicitly pass --pull", file=sys.stderr)
return 1
try:
system_paasta_config = load_system_paasta_config()
except PaastaNotConfiguredError:
paasta_print(
PaastaColors.yellow(
"Warning: Couldn't load config files from '/etc/paasta'. This indicates"
"PaaSTA is not configured locally on this host, and local-run may not behave"
"the same way it would behave on a server configured for PaaSTA.",
),
sep='\n',
)
system_paasta_config = SystemPaastaConfig({"volumes": []}, '/etc/paasta')
local_run_config = system_paasta_config.get_local_run_config()
service = figure_out_service_name(args, soa_dir=args.yelpsoa_config_root)
if args.cluster:
cluster = args.cluster
else:
try:
cluster = local_run_config['default_cluster']
except KeyError:
paasta_print(
PaastaColors.red(
"PaaSTA on this machine has not been configured with a default cluster."
"Please pass one to local-run using '-c'.",
),
sep='\n',
file=sys.stderr,
)
return 1
instance = args.instance
docker_client = get_docker_client()
if args.action == 'build':
default_tag = 'paasta-local-run-%s-%s' % (service, get_username())
tag = os.environ.get('DOCKER_TAG', default_tag)
os.environ['DOCKER_TAG'] = tag
pull_image = False
cook_return = paasta_cook_image(args=None, service=service, soa_dir=args.yelpsoa_config_root)
if cook_return != 0:
return cook_return
elif args.action == 'dry_run':
pull_image = False
tag = None
else:
pull_image = True
tag = None
try:
return configure_and_run_docker_container(
docker_client=docker_client,
docker_hash=tag,
service=service,
instance=instance,
cluster=cluster,
args=args,
pull_image=pull_image,
system_paasta_config=system_paasta_config,
dry_run=args.action == 'dry_run',
)
except errors.APIError as e:
paasta_print(
'Can\'t run Docker container. Error: %s' % str(e),
file=sys.stderr,
)
return 1
| |
# Authors: Fabian Pedregosa <fabian@fseoane.net>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import check_array, check_consistent_length
from ._isotonic import _inplace_contiguous_isotonic_regression, _make_unique
import warnings
import math
__all__ = ['check_increasing', 'isotonic_regression',
'IsotonicRegression']
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
Returns
-------
increasing_bool : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0] and len(x) > 3:
F = 0.5 * math.log((1. + rho) / (1. - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn("Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect.")
return increasing_bool
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floats
The data.
sample_weight : iterable of floats, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floats
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = check_array(y, ensure_2d=False, dtype=[np.float64, np.float32])
y = np.array(y[order], dtype=y.dtype)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=y.dtype)
else:
sample_weight = np.array(sample_weight[order], dtype=y.dtype)
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
class IsotonicRegression(BaseEstimator, TransformerMixin, RegressorMixin):
"""Isotonic regression model.
The isotonic regression optimization problem is defined by::
min sum w_i (y[i] - y_[i]) ** 2
subject to y_[i] <= y_[j] whenever X[i] <= X[j]
and min(y_) = y_min, max(y_) = y_max
where:
- ``y[i]`` are inputs (real numbers)
- ``y_[i]`` are fitted
- ``X`` specifies the order.
If ``X`` is non-decreasing then ``y_`` is non-decreasing.
- ``w[i]`` are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean or string, optional, default: True
If boolean, whether or not to fit the isotonic regression with y
increasing or decreasing.
The string value "auto" determines whether y should
increase or decrease based on the Spearman correlation estimate's
sign.
out_of_bounds : string, optional, default: "nan"
The ``out_of_bounds`` parameter handles how x-values outside of the
training domain are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
Attributes
----------
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
f_ : function
The stepwise interpolating function that covers the input domain ``X``.
Notes
-----
Ties are broken using the secondary method from Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
Leeuw, Psychometrica, 1977
Examples
--------
>>> from sklearn.datasets import make_regression
>>> from sklearn.isotonic import IsotonicRegression
>>> X, y = make_regression(n_samples=10, n_features=1, random_state=41)
>>> iso_reg = IsotonicRegression().fit(X.flatten(), y)
>>> iso_reg.predict([.1, .2])
array([1.8628..., 3.7256...])
"""
def __init__(self, y_min=None, y_max=None, increasing=True,
out_of_bounds='nan'):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_fit_data(self, X, y, sample_weight=None):
if len(X.shape) != 1:
raise ValueError("X should be a 1d array")
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(X, y, kind='linear',
bounds_error=bounds_error)
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False,
dtype=X.dtype)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y), dtype=X.dtype)
order = np.lexsort((y, X))
X, y, sample_weight = [array[order] for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(
X, y, sample_weight)
# Store _X_ and _y_ to maintain backward compat during the deprecation
# period of X_ and y_
self._X_ = X = unique_X
self._y_ = y = isotonic_regression(unique_y, unique_sample_weight,
self.y_min, self.y_max,
increasing=self.increasing_)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]),
np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
sample_weight : array-like, shape=(n_samples,), optional, default: None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as `transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False,
dtype=[np.float64, np.float32])
X = check_array(X, **check_params)
y = check_array(y, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self._necessary_X_, self._necessary_y_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
The transformed data
"""
if hasattr(self, '_necessary_X_'):
dtype = self._necessary_X_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False)
if len(T.shape) != 1:
raise ValueError("Isotonic regression input should be a 1d array")
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
Transformed data.
"""
return self.transform(T)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator. """
state = super().__getstate__()
# remove interpolation method
state.pop('f_', None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super().__setstate__(state)
if hasattr(self, '_necessary_X_') and hasattr(self, '_necessary_y_'):
self._build_f(self._necessary_X_, self._necessary_y_)
def _more_tags(self):
return {'X_types': ['1darray']}
| |
# The MIT License
#
# Copyright (c) 2009-2011 the bpython authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import with_statement
import code
import codecs
import errno
import inspect
import os
import pydoc
import shlex
import subprocess
import sys
import tempfile
import textwrap
import traceback
import unicodedata
from glob import glob
from itertools import takewhile
from locale import getpreferredencoding
from socket import error as SocketError
from string import Template
from urllib import quote as urlquote
from urlparse import urlparse
from xmlrpclib import ServerProxy, Error as XMLRPCError
from pygments.token import Token
from bpython import importcompletion, inspection
from bpython._py3compat import PythonLexer, py3
from bpython.formatter import Parenthesis
from bpython.translations import _
from bpython.autocomplete import Autocomplete
# Needed for special handling of __abstractmethods__
# abc only exists since 2.6, so check both that it exists and that it's
# the one we're expecting
try:
import abc
abc.ABCMeta
has_abc = True
except (ImportError, AttributeError):
has_abc = False
class Interpreter(code.InteractiveInterpreter):
def __init__(self, locals=None, encoding=None):
"""The syntaxerror callback can be set at any time and will be called
on a caught syntax error. The purpose for this in bpython is so that
the repl can be instantiated after the interpreter (which it
necessarily must be with the current factoring) and then an exception
callback can be added to the Interpeter instance afterwards - more
specifically, this is so that autoindentation does not occur after a
traceback."""
self.encoding = encoding or sys.getdefaultencoding()
self.syntaxerror_callback = None
# Unfortunately code.InteractiveInterpreter is a classic class, so no super()
code.InteractiveInterpreter.__init__(self, locals)
if not py3:
def runsource(self, source, filename='<input>', symbol='single',
encode=True):
if encode:
source = '# coding: %s\n%s' % (self.encoding,
source.encode(self.encoding))
return code.InteractiveInterpreter.runsource(self, source,
filename, symbol)
def showsyntaxerror(self, filename=None):
"""Override the regular handler, the code's copied and pasted from
code.py, as per showtraceback, but with the syntaxerror callback called
and the text in a pretty colour."""
if self.syntaxerror_callback is not None:
self.syntaxerror_callback()
type, value, sys.last_traceback = sys.exc_info()
sys.last_type = type
sys.last_value = value
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename and right lineno
if not py3:
lineno -= 1
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
list = traceback.format_exception_only(type, value)
self.writetb(list)
def showtraceback(self):
"""This needs to override the default traceback thing
so it can put it into a pretty colour and maybe other
stuff, I don't know"""
try:
t, v, tb = sys.exc_info()
sys.last_type = t
sys.last_value = v
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
# Set the right lineno (encoding header adds an extra line)
if not py3:
for i, (filename, lineno, module, something) in enumerate(tblist):
if filename == '<input>':
tblist[i] = (filename, lineno - 1, module, something)
l = traceback.format_list(tblist)
if l:
l.insert(0, "Traceback (most recent call last):\n")
l[len(l):] = traceback.format_exception_only(t, v)
finally:
tblist = tb = None
self.writetb(l)
def writetb(self, lines):
"""This outputs the traceback and should be overridden for anything
fancy."""
for line in lines:
self.write(line)
class History(object):
def __init__(self, entries=None, duplicates=False):
if entries is None:
self.entries = ['']
else:
self.entries = list(entries)
self.index = 0
self.saved_line = ''
self.duplicates = duplicates
def append(self, line):
line = line.rstrip('\n')
if line:
if not self.duplicates:
# remove duplicates
try:
while True:
self.entries.remove(line)
except ValueError:
pass
self.entries.append(line)
def first(self):
"""Move back to the beginning of the history."""
if not self.is_at_end:
self.index = len(self.entries)
return self.entries[-self.index]
def back(self, start=True, search=False):
"""Move one step back in the history."""
if not self.is_at_end:
if search:
self.index += self.find_partial_match_backward(self.saved_line)
elif start:
self.index += self.find_match_backward(self.saved_line)
else:
self.index += 1
return self.entries[-self.index] if self.index else self.saved_line
def find_match_backward(self, search_term):
filtered_list_len = len(self.entries) - self.index
for idx, val in enumerate(reversed(self.entries[:filtered_list_len])):
if val.startswith(search_term):
return idx + 1
return 0
def find_partial_match_backward(self, search_term):
filtered_list_len = len(self.entries) - self.index
for idx, val in enumerate(reversed(self.entries[:filtered_list_len])):
if search_term in val:
return idx + 1
return 0
def forward(self, start=True, search=False):
"""Move one step forward in the history."""
if self.index > 1:
if search:
self.index -= self.find_partial_match_forward(self.saved_line)
elif start:
self.index -= self.find_match_forward(self.saved_line)
else:
self.index -= 1
return self.entries[-self.index] if self.index else self.saved_line
else:
self.index = 0
return self.saved_line
def find_match_forward(self, search_term):
filtered_list_len = len(self.entries) - self.index + 1
for idx, val in enumerate(self.entries[filtered_list_len:]):
if val.startswith(search_term):
return idx + 1
return self.index
def find_partial_match_forward(self, search_term):
filtered_list_len = len(self.entries) - self.index + 1
for idx, val in enumerate(self.entries[filtered_list_len:]):
if search_term in val:
return idx + 1
return self.index
def last(self):
"""Move forward to the end of the history."""
if not self.is_at_start:
self.index = 0
return self.entries[0]
@property
def is_at_end(self):
return self.index >= len(self.entries) or self.index == -1
@property
def is_at_start(self):
return self.index == 0
def enter(self, line):
if self.index == 0:
self.saved_line = line
@classmethod
def from_filename(cls, filename):
history = cls()
history.load(filename)
return history
def load(self, filename, encoding):
with codecs.open(filename, 'r', encoding, 'ignore') as hfile:
for line in hfile:
self.append(line)
def reset(self):
self.index = 0
self.saved_line = ''
def save(self, filename, encoding, lines=0):
with codecs.open(filename, 'w', encoding, 'ignore') as hfile:
for line in self.entries[-lines:]:
hfile.write(line)
hfile.write('\n')
class MatchesIterator(object):
def __init__(self, current_word='', matches=[]):
self.current_word = current_word
self.matches = list(matches)
self.index = -1
def __nonzero__(self):
return self.index != -1
def __iter__(self):
return self
def current(self):
if self.index == -1:
raise ValueError('No current match.')
return self.matches[self.index]
def next(self):
self.index = (self.index + 1) % len(self.matches)
return self.matches[self.index]
def previous(self):
if self.index <= 0:
self.index = len(self.matches)
self.index -= 1
return self.matches[self.index]
def update(self, current_word='', matches=[]):
if current_word != self.current_word:
self.current_word = current_word
self.matches = list(matches)
self.index = -1
class Interaction(object):
def __init__(self, config, statusbar=None):
self.config = config
if statusbar:
self.statusbar = statusbar
def confirm(self, s):
raise NotImplementedError
def notify(self, s, n=10):
raise NotImplementedError
def file_prompt(self, s):
raise NotImplementedError
class Repl(object):
"""Implements the necessary guff for a Python-repl-alike interface
The execution of the code entered and all that stuff was taken from the
Python code module, I had to copy it instead of inheriting it, I can't
remember why. The rest of the stuff is basically what makes it fancy.
It reads what you type, passes it to a lexer and highlighter which
returns a formatted string. This then gets passed to echo() which
parses that string and prints to the curses screen in appropriate
colours and/or bold attribute.
The Repl class also keeps two stacks of lines that the user has typed in:
One to be used for the undo feature. I am not happy with the way this
works. The only way I have been able to think of is to keep the code
that's been typed in in memory and re-evaluate it in its entirety for each
"undo" operation. Obviously this means some operations could be extremely
slow. I'm not even by any means certain that this truly represents a
genuine "undo" implementation, but it does seem to be generally pretty
effective.
If anyone has any suggestions for how this could be improved, I'd be happy
to hear them and implement it/accept a patch. I researched a bit into the
idea of keeping the entire Python state in memory, but this really seems
very difficult (I believe it may actually be impossible to work) and has
its own problems too.
The other stack is for keeping a history for pressing the up/down keys
to go back and forth between lines.
XXX Subclasses should implement echo, current_line, cw
"""
def __init__(self, interp, config):
"""Initialise the repl.
interp is a Python code.InteractiveInterpreter instance
config is a populated bpython.config.Struct.
"""
self.config = config
self.cut_buffer = ''
self.buffer = []
self.interp = interp
self.interp.syntaxerror_callback = self.clear_current_line
self.match = False
self.rl_history = History(duplicates=config.hist_duplicates)
self.s_hist = []
self.history = []
self.evaluating = False
self.completer = Autocomplete(self.interp.locals, config)
self.matches = []
self.matches_iter = MatchesIterator()
self.argspec = None
self.current_func = None
self.highlighted_paren = None
self.list_win_visible = False
self._C = {}
self.prev_block_finished = 0
self.interact = Interaction(self.config)
# previous pastebin content to prevent duplicate pastes, filled on call
# to repl.pastebin
self.prev_pastebin_content = ''
self.prev_pastebin_url = ''
# Necessary to fix mercurial.ui.ui expecting sys.stderr to have this
# attribute
self.closed = False
pythonhist = os.path.expanduser(self.config.hist_file)
if os.path.exists(pythonhist):
self.rl_history.load(pythonhist,
getpreferredencoding() or "ascii")
@property
def ps1(self):
try:
return str(sys.ps1)
except AttributeError:
return '>>> '
@property
def ps2(self):
try:
return str(sys.ps2)
except AttributeError:
return '... '
def startup(self):
"""
Execute PYTHONSTARTUP file if it exits. Call this after front
end-specific initialisation.
"""
filename = os.environ.get('PYTHONSTARTUP')
if filename and os.path.isfile(filename):
with open(filename, 'r') as f:
if py3:
self.interp.runsource(f.read(), filename, 'exec')
else:
self.interp.runsource(f.read(), filename, 'exec', encode=False)
def current_string(self, concatenate=False):
"""If the line ends in a string get it, otherwise return ''"""
tokens = self.tokenize(self.current_line())
string_tokens = list(takewhile(token_is_any_of([Token.String,
Token.Text]),
reversed(tokens)))
if not string_tokens:
return ''
opening = string_tokens.pop()[1]
string = list()
for (token, value) in reversed(string_tokens):
if token is Token.Text:
continue
elif opening is None:
opening = value
elif token is Token.String.Doc:
string.append(value[3:-3])
opening = None
elif value == opening:
opening = None
if not concatenate:
string = list()
else:
string.append(value)
if opening is None:
return ''
return ''.join(string)
def get_object(self, name):
attributes = name.split('.')
obj = eval(attributes.pop(0), self.interp.locals)
while attributes:
with inspection.AttrCleaner(obj):
obj = getattr(obj, attributes.pop(0))
return obj
def get_args(self):
"""Check if an unclosed parenthesis exists, then attempt to get the
argspec() for it. On success, update self.argspec and return True,
otherwise set self.argspec to None and return False"""
self.current_func = None
if not self.config.arg_spec:
return False
# Get the name of the current function and where we are in
# the arguments
stack = [['', 0, '']]
try:
for (token, value) in PythonLexer().get_tokens(
self.current_line()):
if token is Token.Punctuation:
if value in '([{':
stack.append(['', 0, value])
elif value in ')]}':
stack.pop()
elif value == ',':
try:
stack[-1][1] += 1
except TypeError:
stack[-1][1] = ''
stack[-1][0] = ''
elif value == ':' and stack[-1][2] == 'lambda':
stack.pop()
else:
stack[-1][0] = ''
elif (token is Token.Name or token in Token.Name.subtypes or
token is Token.Operator and value == '.'):
stack[-1][0] += value
elif token is Token.Operator and value == '=':
stack[-1][1] = stack[-1][0]
stack[-1][0] = ''
elif token is Token.Keyword and value == 'lambda':
stack.append(['', 0, value])
else:
stack[-1][0] = ''
while stack[-1][2] in '[{':
stack.pop()
_, arg_number, _ = stack.pop()
func, _, _ = stack.pop()
except IndexError:
return False
if not func:
return False
try:
f = self.get_object(func)
except (AttributeError, NameError, SyntaxError):
return False
if inspect.isclass(f):
try:
if f.__init__ is not object.__init__:
f = f.__init__
except AttributeError:
return None
self.current_func = f
self.argspec = inspection.getargspec(func, f)
if self.argspec:
self.argspec.append(arg_number)
return True
return False
def get_source_of_current_name(self):
"""Return the source code of the object which is bound to the
current name in the current input line. Return `None` if the
source cannot be found."""
try:
obj = self.current_func
if obj is None:
line = self.current_line()
if inspection.is_eval_safe_name(line):
obj = self.get_object(line)
source = inspect.getsource(obj)
except (AttributeError, IOError, NameError, TypeError):
return None
else:
return source
def complete(self, tab=False):
"""Construct a full list of possible completions and construct and
display them in a window. Also check if there's an available argspec
(via the inspect module) and bang that on top of the completions too.
The return value is whether the list_win is visible or not."""
self.docstring = None
if not self.get_args():
self.argspec = None
elif self.current_func is not None:
try:
self.docstring = pydoc.getdoc(self.current_func)
except IndexError:
self.docstring = None
else:
# pydoc.getdoc() returns an empty string if no
# docstring was found
if not self.docstring:
self.docstring = None
cw = self.cw()
cs = self.current_string()
if not cw:
self.matches = []
self.matches_iter.update()
if not (cw or cs):
return bool(self.argspec)
if cs and tab:
# Filename completion
self.matches = list()
username = cs.split(os.path.sep, 1)[0]
user_dir = os.path.expanduser(username)
for filename in glob(os.path.expanduser(cs + '*')):
if os.path.isdir(filename):
filename += os.path.sep
if cs.startswith('~'):
filename = username + filename[len(user_dir):]
self.matches.append(filename)
self.matches_iter.update(cs, self.matches)
return bool(self.matches)
elif cs:
# Do not provide suggestions inside strings, as one cannot tab
# them so they would be really confusing.
self.matches_iter.update()
return False
# Check for import completion
e = False
matches = importcompletion.complete(self.current_line(), cw)
if matches is not None and not matches:
self.matches = []
self.matches_iter.update()
return False
if matches is None:
# Nope, no import, continue with normal completion
try:
self.completer.complete(cw, 0)
except Exception:
# This sucks, but it's either that or list all the exceptions that could
# possibly be raised here, so if anyone wants to do that, feel free to send me
# a patch. XXX: Make sure you raise here if you're debugging the completion
# stuff !
e = True
else:
matches = self.completer.matches
if (self.config.complete_magic_methods and self.buffer and
self.buffer[0].startswith("class ") and
self.current_line().lstrip().startswith("def ")):
matches.extend(name for name in self.config.magic_methods
if name.startswith(cw))
if not e and self.argspec:
matches.extend(name + '=' for name in self.argspec[1][0]
if isinstance(name, basestring) and name.startswith(cw))
if py3:
matches.extend(name + '=' for name in self.argspec[1][4]
if name.startswith(cw))
# unless the first character is a _ filter out all attributes starting with a _
if not e and not cw.split('.')[-1].startswith('_'):
matches = [match for match in matches
if not match.split('.')[-1].startswith('_')]
if e or not matches:
self.matches = []
self.matches_iter.update()
if not self.argspec:
return False
else:
# remove duplicates
self.matches = sorted(set(matches))
if len(self.matches) == 1 and not self.config.auto_display_list:
self.list_win_visible = True
self.tab()
return False
self.matches_iter.update(cw, self.matches)
return True
def format_docstring(self, docstring, width, height):
"""Take a string and try to format it into a sane list of strings to be
put into the suggestion box."""
lines = docstring.split('\n')
out = []
i = 0
for line in lines:
i += 1
if not line.strip():
out.append('\n')
for block in textwrap.wrap(line, width):
out.append(' ' + block + '\n')
if i >= height:
return out
i += 1
# Drop the last newline
out[-1] = out[-1].rstrip()
return out
def next_indentation(self):
"""Return the indentation of the next line based on the current
input buffer."""
if self.buffer:
indentation = next_indentation(self.buffer[-1],
self.config.tab_length)
if indentation and self.config.dedent_after > 0:
line_is_empty = lambda line: not line.strip()
empty_lines = takewhile(line_is_empty, reversed(self.buffer))
if sum(1 for _ in empty_lines) >= self.config.dedent_after:
indentation -= 1
else:
indentation = 0
return indentation
def formatforfile(self, s):
"""Format the stdout buffer to something suitable for writing to disk,
i.e. without >>> and ... at input lines and with "# OUT: " prepended to
output lines."""
def process():
for line in s.split('\n'):
if line.startswith(self.ps1):
yield line[len(self.ps1):]
elif line.startswith(self.ps2):
yield line[len(self.ps2):]
elif line.rstrip():
yield "# OUT: %s" % (line,)
return "\n".join(process())
def write2file(self):
"""Prompt for a filename and write the current contents of the stdout
buffer to disk."""
try:
fn = self.interact.file_prompt('Save to file (Esc to cancel): ')
if not fn:
self.interact.notify("Save cancelled.")
return
except ValueError:
self.interact.notify("Save cancelled.")
return
if fn.startswith('~'):
fn = os.path.expanduser(fn)
if not fn.endswith('.py') and self.config.save_append_py:
fn = fn + '.py'
mode = 'w'
if os.path.exists(fn):
mode = self.interact.file_prompt('%s already exists. Do you want '
'to (c)ancel, (o)verwrite or '
'(a)ppend? ' % (fn, ))
if mode in ('o', 'overwrite'):
mode = 'w'
elif mode in ('a', 'append'):
mode = 'a'
else:
self.interact.notify('Save cancelled.')
return
s = self.formatforfile(self.getstdout())
try:
f = open(fn, mode)
f.write(s)
f.close()
except IOError:
self.interact.notify("Disk write error for file '%s'." % (fn, ))
else:
self.interact.notify('Saved to %s.' % (fn, ))
def pastebin(self, s=None):
"""Upload to a pastebin and display the URL in the status bar."""
if s is None:
s = self.getstdout()
if (self.config.pastebin_confirm and
not self.interact.confirm(_("Pastebin buffer? (y/N) "))):
self.interact.notify(_("Pastebin aborted"))
return
return self.do_pastebin(s)
def do_pastebin(self, s):
"""Actually perform the upload."""
if s == self.prev_pastebin_content:
self.interact.notify(_('Duplicate pastebin. Previous URL: %s') %
(self.prev_pastebin_url, ))
return self.prev_pastebin_url
if self.config.pastebin_helper:
return self.do_pastebin_helper(s)
else:
return self.do_pastebin_xmlrpc(s)
def do_pastebin_xmlrpc(self, s):
"""Upload to pastebin via XML-RPC."""
try:
pasteservice = ServerProxy(self.config.pastebin_url)
except IOError, e:
self.interact.notify(_("Pastebin error for URL '%s': %s") %
(self.config.pastebin_url, str(e)))
return
self.interact.notify(_('Posting data to pastebin...'))
try:
paste_id = pasteservice.pastes.newPaste('pycon', s, '', '', '',
self.config.pastebin_private)
except (SocketError, XMLRPCError), e:
self.interact.notify(_('Upload failed: %s') % (str(e), ) )
return
self.prev_pastebin_content = s
paste_url_template = Template(self.config.pastebin_show_url)
paste_id = urlquote(paste_id)
paste_url = paste_url_template.safe_substitute(paste_id=paste_id)
self.prev_pastebin_url = paste_url
self.interact.notify(_('Pastebin URL: %s') % (paste_url, ), 10)
return paste_url
def do_pastebin_helper(self, s):
"""Call out to helper program for pastebin upload."""
self.interact.notify(_('Posting data to pastebin...'))
try:
helper = subprocess.Popen('',
executable=self.config.pastebin_helper,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
helper.stdin.write(s.encode(getpreferredencoding()))
output = helper.communicate()[0].decode(getpreferredencoding())
paste_url = output.split()[0]
except OSError, e:
if e.errno == errno.ENOENT:
self.interact.notify(_('Upload failed: '
'Helper program not found.'))
else:
self.interact.notify(_('Upload failed: '
'Helper program could not be run.'))
return
if helper.returncode != 0:
self.interact.notify(_('Upload failed: '
'Helper program returned non-zero exit '
'status %s.' % (helper.returncode, )))
return
if not paste_url:
self.interact.notify(_('Upload failed: '
'No output from helper program.'))
return
else:
parsed_url = urlparse(paste_url)
if (not parsed_url.scheme
or any(unicodedata.category(c) == 'Cc' for c in paste_url)):
self.interact.notify(_("Upload failed: "
"Failed to recognize the helper "
"program's output as an URL."))
return
self.prev_pastebin_content = s
self.interact.notify(_('Pastebin URL: %s') % (paste_url, ), 10)
return paste_url
def push(self, s, insert_into_history=True):
"""Push a line of code onto the buffer so it can process it all
at once when a code block ends"""
s = s.rstrip('\n')
self.buffer.append(s)
if insert_into_history:
self.insert_into_history(s)
more = self.interp.runsource('\n'.join(self.buffer))
if not more:
self.buffer = []
return more
def insert_into_history(self, s):
if self.config.hist_length:
histfilename = os.path.expanduser(self.config.hist_file)
oldhistory = self.rl_history.entries
self.rl_history.entries = []
if os.path.exists(histfilename):
self.rl_history.load(histfilename, getpreferredencoding())
self.rl_history.append(s)
try:
self.rl_history.save(histfilename, getpreferredencoding(), self.config.hist_length)
except EnvironmentError, err:
self.interact.notify("Error occured while writing to file %s (%s) " % (histfilename, err.strerror))
self.rl_history.entries = oldhistory
self.rl_history.append(s)
else:
self.rl_history.append(s)
def undo(self, n=1):
"""Go back in the undo history n steps and call reeavluate()
Note that in the program this is called "Rewind" because I
want it to be clear that this is by no means a true undo
implementation, it is merely a convenience bonus."""
if not self.history:
return None
if len(self.history) < n:
n = len(self.history)
entries = list(self.rl_history.entries)
self.history = self.history[:-n]
self.reevaluate()
self.rl_history.entries = entries
def flush(self):
"""Olivier Grisel brought it to my attention that the logging
module tries to call this method, since it makes assumptions
about stdout that may not necessarily be true. The docs for
sys.stdout say:
"stdout and stderr needn't be built-in file objects: any
object is acceptable as long as it has a write() method
that takes a string argument."
So I consider this to be a bug in logging, and this is a hack
to fix it, unfortunately. I'm sure it's not the only module
to do it."""
def close(self):
"""See the flush() method docstring."""
def tokenize(self, s, newline=False):
"""Tokenizes a line of code, returning pygments tokens
with side effects/impurities:
- reads self.cpos to see what parens should be highlighted
- reads self.buffer to see what came before the passed in line
- sets self.highlighted_paren to (buffer_lineno, tokens_for_that_line) for buffer line
that should replace that line to unhighlight it
- calls reprint_line with a buffer's line's tokens and the buffer lineno that has changed
iff that line is the not the current line
"""
source = '\n'.join(self.buffer + [s])
cursor = len(source) - self.cpos
if self.cpos:
cursor += 1
stack = list()
all_tokens = list(PythonLexer().get_tokens(source))
# Unfortunately, Pygments adds a trailing newline and strings with
# no size, so strip them
while not all_tokens[-1][1]:
all_tokens.pop()
all_tokens[-1] = (all_tokens[-1][0], all_tokens[-1][1].rstrip('\n'))
line = pos = 0
parens = dict(zip('{([', '})]'))
line_tokens = list()
saved_tokens = list()
search_for_paren = True
for (token, value) in split_lines(all_tokens):
pos += len(value)
if token is Token.Text and value == '\n':
line += 1
# Remove trailing newline
line_tokens = list()
saved_tokens = list()
continue
line_tokens.append((token, value))
saved_tokens.append((token, value))
if not search_for_paren:
continue
under_cursor = (pos == cursor)
if token is Token.Punctuation:
if value in parens:
if under_cursor:
line_tokens[-1] = (Parenthesis.UnderCursor, value)
# Push marker on the stack
stack.append((Parenthesis, value))
else:
stack.append((line, len(line_tokens) - 1,
line_tokens, value))
elif value in parens.itervalues():
saved_stack = list(stack)
try:
while True:
opening = stack.pop()
if parens[opening[-1]] == value:
break
except IndexError:
# SyntaxError.. more closed parentheses than
# opened or a wrong closing paren
opening = None
if not saved_stack:
search_for_paren = False
else:
stack = saved_stack
if opening and opening[0] is Parenthesis:
# Marker found
line_tokens[-1] = (Parenthesis, value)
search_for_paren = False
elif opening and under_cursor and not newline:
if self.cpos:
line_tokens[-1] = (Parenthesis.UnderCursor, value)
else:
# The cursor is at the end of line and next to
# the paren, so it doesn't reverse the paren.
# Therefore, we insert the Parenthesis token
# here instead of the Parenthesis.UnderCursor
# token.
line_tokens[-1] = (Parenthesis, value)
(lineno, i, tokens, opening) = opening
if lineno == len(self.buffer):
self.highlighted_paren = (lineno, saved_tokens)
line_tokens[i] = (Parenthesis, opening)
else:
self.highlighted_paren = (lineno, list(tokens))
# We need to redraw a line
tokens[i] = (Parenthesis, opening)
self.reprint_line(lineno, tokens)
search_for_paren = False
elif under_cursor:
search_for_paren = False
if line != len(self.buffer):
return list()
return line_tokens
def clear_current_line(self):
"""This is used as the exception callback for the Interpreter instance.
It prevents autoindentation from occuring after a traceback."""
def send_to_external_editor(self, text, filename=None):
"""Returns modified text from an editor, or the oriignal text if editor exited with non-zero"""
editor_args = shlex.split(self.config.editor)
with tempfile.NamedTemporaryFile(suffix='.py') as temp:
temp.write(text)
temp.flush()
if subprocess.call(editor_args + [temp.name]) == 0:
with open(temp.name) as f:
return f.read()
else:
return text
def next_indentation(line, tab_length):
"""Given a code line, return the indentation of the next line."""
line = line.expandtabs(tab_length)
indentation = (len(line) - len(line.lstrip(' '))) // tab_length
if line.rstrip().endswith(':'):
indentation += 1
elif indentation >= 1:
if line.lstrip().startswith(('return', 'pass', 'raise', 'yield')):
indentation -= 1
return indentation
def next_token_inside_string(s, inside_string):
"""Given a code string s and an initial state inside_string, return
whether the next token will be inside a string or not."""
for token, value in PythonLexer().get_tokens(s):
if token is Token.String:
value = value.lstrip('bBrRuU')
if value in ['"""', "'''", '"', "'"]:
if not inside_string:
inside_string = value
elif value == inside_string:
inside_string = False
return inside_string
def split_lines(tokens):
for (token, value) in tokens:
if not value:
continue
while value:
head, newline, value = value.partition('\n')
yield (token, head)
if newline:
yield (Token.Text, newline)
def token_is(token_type):
"""Return a callable object that returns whether a token is of the
given type `token_type`."""
def token_is_type(token):
"""Return whether a token is of a certain type or not."""
token = token[0]
while token is not token_type and token.parent:
token = token.parent
return token is token_type
return token_is_type
def token_is_any_of(token_types):
"""Return a callable object that returns whether a token is any of the
given types `token_types`."""
is_token_types = map(token_is, token_types)
def token_is_any_of(token):
return any(check(token) for check in is_token_types)
return token_is_any_of
def extract_exit_value(args):
"""Given the arguments passed to `SystemExit`, return the value that
should be passed to `sys.exit`.
"""
if len(args) == 0:
return None
elif len(args) == 1:
return args[0]
else:
return args
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''Unit tests for the Dataset.py module'''
import unittest
from ocw.dataset import Dataset, Bounds
import numpy as np
import datetime as dt
class TestDatasetAttributes(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.name = 'foo'
self.origin = {'path': '/a/fake/file/path'}
self.test_dataset = Dataset(self.lat,
self.lon,
self.time,
self.value,
variable=self.variable,
name=self.name,
origin=self.origin)
def test_lats(self):
self.assertItemsEqual(self.test_dataset.lats, self.lat)
def test_lons(self):
self.assertItemsEqual(self.test_dataset.lons, self.lon)
def test_times(self):
self.assertItemsEqual(self.test_dataset.times, self.time)
def test_values(self):
self.assertEqual(self.test_dataset.values.all(), self.value.all())
def test_variable(self):
self.assertEqual(self.test_dataset.variable, self.variable)
def test_name(self):
self.assertEqual(self.test_dataset.name, self.name)
def test_origin(self):
self.assertEqual(self.test_dataset.origin, self.origin)
class TestInvalidDatasetInit(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.values_in_wrong_order = flat_array.reshape(5, 5, 12)
def test_bad_lat_shape(self):
self.lat = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_lon_shape(self):
self.lon = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_times_shape(self):
self.time = np.array([[1, 2], [3, 4]])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_bad_values_shape(self):
self.value = np.array([1, 2, 3, 4, 5])
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_shape_mismatch(self):
# If we change lats to this the shape of value will not match
# up with the length of the lats array.
self.lat = self.lat[:-2]
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.value, 'prec')
def test_values_given_in_wrong_order(self):
with self.assertRaises(ValueError):
Dataset(self.lat, self.lon, self.time, self.values_in_wrong_order)
def test_lons_values_incorrectly_gridded(self):
times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
lats = np.arange(-30, 30)
bad_lons = np.arange(360)
flat_array = np.arange(len(times) * len(lats) * len(bad_lons))
values = flat_array.reshape(len(times), len(lats), len(bad_lons))
ds = Dataset(lats, bad_lons, times, values)
np.testing.assert_array_equal(ds.lons, np.arange(-180, 180))
def test_reversed_lats(self):
ds = Dataset(self.lat[::-1], self.lon, self.time, self.value)
np.testing.assert_array_equal(ds.lats, self.lat)
class TestDatasetFunctions(unittest.TestCase):
def setUp(self):
self.lat = np.array([10, 12, 14, 16, 18])
self.lon = np.array([100, 102, 104, 106, 108])
self.time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)])
flat_array = np.array(range(300))
self.value = flat_array.reshape(12, 5, 5)
self.variable = 'prec'
self.test_dataset = Dataset(self.lat, self.lon, self.time,
self.value, self.variable)
def test_spatial_boundaries(self):
self.assertEqual(
self.test_dataset.spatial_boundaries(),
(min(self.lat), max(self.lat), min(self.lon), max(self.lon)))
def test_time_range(self):
self.assertEqual(
self.test_dataset.time_range(),
(dt.datetime(2000, 1, 1), dt.datetime(2000, 12, 1)))
def test_spatial_resolution(self):
self.assertEqual(self.test_dataset.spatial_resolution(), (2, 2))
def test_temporal_resolution(self):
self.assertEqual(self.test_dataset.temporal_resolution(), 'monthly')
class TestBounds(unittest.TestCase):
def setUp(self):
self.bounds = Bounds(-80, 80, # Lats
-160, 160, # Lons
dt.datetime(2000, 1, 1), # Start time
dt.datetime(2002, 1, 1)) # End time
# Latitude tests
def test_inverted_min_max_lat(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = 81
with self.assertRaises(ValueError):
self.bounds.lat_max = -81
# Lat Min
def test_out_of_bounds_lat_min(self):
with self.assertRaises(ValueError):
self.bounds.lat_min = -91
with self.assertRaises(ValueError):
self.bounds.lat_min = 91
# Lat Max
def test_out_of_bounds_lat_max(self):
with self.assertRaises(ValueError):
self.bounds.lat_max = -91
with self.assertRaises(ValueError):
self.bounds.lat_max = 91
# Longitude tests
def test_inverted_max_max_lon(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = 161
with self.assertRaises(ValueError):
self.bounds.lon_max = -161
# Lon Min
def test_out_of_bounds_lon_min(self):
with self.assertRaises(ValueError):
self.bounds.lon_min = -181
with self.assertRaises(ValueError):
self.bounds.lon_min = 181
# Lon Max
def test_out_of_bounds_lon_max(self):
with self.assertRaises(ValueError):
self.bounds.lon_max = -181
with self.assertRaises(ValueError):
self.bounds.lon_max = 181
# Temporal tests
def test_inverted_start_end_times(self):
with self.assertRaises(ValueError):
self.bounds.start = dt.datetime(2003, 1, 1)
with self.assertRaises(ValueError):
self.bounds.end = dt.datetime(1999, 1, 1)
# Start tests
def test_invalid_start(self):
with self.assertRaises(ValueError):
self.bounds.start = "This is not a date time object"
# End tests
def test_invalid_end(self):
with self.assertRaises(ValueError):
self.bounds.end = "This is not a date time object"
if __name__ == '__main__':
unittest.main()
| |
import math
from decimal import Decimal, getcontext
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as LA
#
# Runge-Kutta IMEX methods of order 1 to 3
#
class rk_imex:
def __init__(self, M_fast, M_slow, order):
assert np.shape(M_fast)[0] == np.shape(M_fast)[1], "A_fast must be square"
assert np.shape(M_slow)[0] == np.shape(M_slow)[1], "A_slow must be square"
assert np.shape(M_fast)[0] == np.shape(M_slow)[0], "A_fast and A_slow must be of the same size"
assert order in [1, 2, 3, 4, 5], "Order must be between 1 and 5"
self.order = order
if self.order == 1:
self.A = np.array([[0, 0], [0, 1]])
self.A_hat = np.array([[0, 0], [1, 0]])
self.b = np.array([0, 1])
self.b_hat = np.array([1, 0])
self.nstages = 2
elif self.order == 2:
self.A = np.array([[0, 0], [0, 0.5]])
self.A_hat = np.array([[0, 0], [0.5, 0]])
self.b = np.array([0, 1])
self.b_hat = np.array([0, 1])
self.nstages = 2
elif self.order == 3:
# parameter from Pareschi and Russo, J. Sci. Comp. 2005
alpha = 0.24169426078821
beta = 0.06042356519705
eta = 0.12915286960590
self.A_hat = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 1.0, 0, 0], [0, 1.0 / 4.0, 1.0 / 4.0, 0]])
self.A = np.array([[alpha, 0, 0, 0], [-alpha, alpha, 0, 0], [0, 1.0 - alpha, alpha, 0],
[beta, eta, 0.5 - beta - eta - alpha, alpha]])
self.b_hat = np.array([0, 1.0 / 6.0, 1.0 / 6.0, 2.0 / 3.0])
self.b = self.b_hat
self.nstages = 4
elif self.order == 4:
self.A_hat = np.array([[0, 0, 0, 0, 0, 0],
[1. / 2, 0, 0, 0, 0, 0],
[13861. / 62500., 6889. / 62500., 0, 0, 0, 0],
[-116923316275. / 2393684061468., -2731218467317. / 15368042101831.,
9408046702089. / 11113171139209., 0, 0, 0],
[-451086348788. / 2902428689909., -2682348792572. / 7519795681897.,
12662868775082. / 11960479115383., 3355817975965. / 11060851509271., 0, 0],
[647845179188. / 3216320057751., 73281519250. / 8382639484533.,
552539513391. / 3454668386233., 3354512671639. / 8306763924573., 4040. / 17871.,
0]])
self.A = np.array([[0, 0, 0, 0, 0, 0],
[1. / 4, 1. / 4, 0, 0, 0, 0],
[8611. / 62500., -1743. / 31250., 1. / 4, 0, 0, 0],
[5012029. / 34652500., -654441. / 2922500., 174375. / 388108., 1. / 4, 0, 0],
[15267082809. / 155376265600., -71443401. / 120774400., 730878875. / 902184768.,
2285395. / 8070912., 1. / 4, 0],
[82889. / 524892., 0, 15625. / 83664., 69875. / 102672., -2260. / 8211, 1. / 4]])
self.b = np.array([82889. / 524892., 0, 15625. / 83664., 69875. / 102672., -2260. / 8211, 1. / 4])
self.b_hat = np.array([4586570599. / 29645900160., 0, 178811875. / 945068544., 814220225. / 1159782912.,
-3700637. / 11593932., 61727. / 225920.])
self.nstages = 6
elif self.order == 5:
# from Kennedy and Carpenter
# copied from http://www.mcs.anl.gov/petsc/petsc-3.2/src/ts/impls/arkimex/arkimex.c
self.A_hat = np.zeros((8, 8))
getcontext().prec = 56
self.A_hat[1, 0] = Decimal(41.0) / Decimal(100.0)
self.A_hat[2, 0] = Decimal(367902744464.) / Decimal(2072280473677.)
self.A_hat[2, 1] = Decimal(677623207551.) / Decimal(8224143866563.)
self.A_hat[3, 0] = Decimal(1268023523408.) / Decimal(10340822734521.)
self.A_hat[3, 1] = 0.0
self.A_hat[3, 2] = Decimal(1029933939417.) / Decimal(13636558850479.)
self.A_hat[4, 0] = Decimal(14463281900351.) / Decimal(6315353703477.)
self.A_hat[4, 1] = 0.0
self.A_hat[4, 2] = Decimal(66114435211212.) / Decimal(5879490589093.)
self.A_hat[4, 3] = Decimal(-54053170152839.) / Decimal(4284798021562.)
self.A_hat[5, 0] = Decimal(14090043504691.) / Decimal(34967701212078.)
self.A_hat[5, 1] = 0.0
self.A_hat[5, 2] = Decimal(15191511035443.) / Decimal(11219624916014.)
self.A_hat[5, 3] = Decimal(-18461159152457.) / Decimal(12425892160975.)
self.A_hat[5, 4] = Decimal(-281667163811.) / Decimal(9011619295870.)
self.A_hat[6, 0] = Decimal(19230459214898.) / Decimal(13134317526959.)
self.A_hat[6, 1] = 0.0
self.A_hat[6, 2] = Decimal(21275331358303.) / Decimal(2942455364971.)
self.A_hat[6, 3] = Decimal(-38145345988419.) / Decimal(4862620318723.)
self.A_hat[6, 4] = Decimal(-1.0) / Decimal(8.0)
self.A_hat[6, 5] = Decimal(-1.0) / Decimal(8.0)
self.A_hat[7, 0] = Decimal(-19977161125411.) / Decimal(11928030595625.)
self.A_hat[7, 1] = 0.0
self.A_hat[7, 2] = Decimal(-40795976796054.) / Decimal(6384907823539.)
self.A_hat[7, 3] = Decimal(177454434618887.) / Decimal(12078138498510.)
self.A_hat[7, 4] = Decimal(782672205425.) / Decimal(8267701900261.)
self.A_hat[7, 5] = Decimal(-69563011059811.) / Decimal(9646580694205.)
self.A_hat[7, 6] = Decimal(7356628210526.) / Decimal(4942186776405.)
self.b_hat = np.zeros(8)
self.b_hat[0] = Decimal(-872700587467.) / Decimal(9133579230613.)
self.b_hat[1] = 0.0
self.b_hat[2] = 0.0
self.b_hat[3] = Decimal(22348218063261.) / Decimal(9555858737531.)
self.b_hat[4] = Decimal(-1143369518992.) / Decimal(8141816002931.)
self.b_hat[5] = Decimal(-39379526789629.) / Decimal(19018526304540.)
self.b_hat[6] = Decimal(32727382324388.) / Decimal(42900044865799.)
self.b_hat[7] = Decimal(41.0) / Decimal(200.0)
self.A = np.zeros((8, 8))
self.A[1, 0] = Decimal(41.) / Decimal(200.)
self.A[1, 1] = Decimal(41.) / Decimal(200.)
self.A[2, 0] = Decimal(41.) / Decimal(400.)
self.A[2, 1] = Decimal(-567603406766.) / Decimal(11931857230679.)
self.A[2, 2] = Decimal(41.) / Decimal(200.)
self.A[3, 0] = Decimal(683785636431.) / Decimal(9252920307686.)
self.A[3, 1] = 0.0
self.A[3, 2] = Decimal(-110385047103.) / Decimal(1367015193373.)
self.A[3, 3] = Decimal(41.) / Decimal(200.)
self.A[4, 0] = Decimal(3016520224154.) / Decimal(10081342136671.)
self.A[4, 1] = 0.0
self.A[4, 2] = Decimal(30586259806659.) / Decimal(12414158314087.)
self.A[4, 3] = Decimal(-22760509404356.) / Decimal(11113319521817.)
self.A[4, 4] = Decimal(41.) / Decimal(200.)
self.A[5, 0] = Decimal(218866479029.) / Decimal(1489978393911.)
self.A[5, 1] = 0.0
self.A[5, 2] = Decimal(638256894668.) / Decimal(5436446318841.)
self.A[5, 3] = Decimal(-1179710474555.) / Decimal(5321154724896.)
self.A[5, 4] = Decimal(-60928119172.) / Decimal(8023461067671.)
self.A[5, 5] = Decimal(41.) / Decimal(200.)
self.A[6, 0] = Decimal(1020004230633.) / Decimal(5715676835656.)
self.A[6, 1] = 0.0
self.A[6, 2] = Decimal(25762820946817.) / Decimal(25263940353407.)
self.A[6, 3] = Decimal(-2161375909145.) / Decimal(9755907335909.)
self.A[6, 4] = Decimal(-211217309593.) / Decimal(5846859502534.)
self.A[6, 5] = Decimal(-4269925059573.) / Decimal(7827059040749.)
self.A[6, 6] = Decimal(41.) / Decimal(200.)
self.A[7, 0] = Decimal(-872700587467.) / Decimal(9133579230613.)
self.A[7, 1] = 0.0
self.A[7, 2] = 0.0
self.A[7, 3] = Decimal(22348218063261.) / Decimal(9555858737531.)
self.A[7, 4] = Decimal(-1143369518992.) / Decimal(8141816002931.)
self.A[7, 5] = Decimal(-39379526789629.) / Decimal(19018526304540.)
self.A[7, 6] = Decimal(32727382324388.) / Decimal(42900044865799.)
self.A[7, 7] = Decimal(41.) / Decimal(200.)
self.b = np.zeros(8)
self.b[0] = Decimal(-975461918565.) / Decimal(9796059967033.)
self.b[1] = 0.0
self.b[2] = 0.0
self.b[3] = Decimal(78070527104295.) / Decimal(32432590147079.)
self.b[4] = Decimal(-548382580838.) / Decimal(3424219808633.)
self.b[5] = Decimal(-33438840321285.) / Decimal(15594753105479.)
self.b[6] = Decimal(3629800801594.) / Decimal(4656183773603.)
self.b[7] = Decimal(4035322873751.) / Decimal(18575991585200.)
self.nstages = 8
self.M_fast = sp.csc_matrix(M_fast)
self.M_slow = sp.csc_matrix(M_slow)
self.ndof = np.shape(M_fast)[0]
self.stages = np.zeros((self.nstages, self.ndof), dtype='complex')
def timestep(self, u0, dt):
# Solve for stages
for i in range(0, self.nstages):
# Construct RHS
rhs = np.copy(u0)
for j in range(0, i):
rhs += dt * self.A_hat[i, j] * (self.f_slow(self.stages[j, :])) + dt * self.A[i, j] * \
(self.f_fast(self.stages[j, :]))
# Solve for stage i
if self.A[i, i] == 0:
# Avoid call to spsolve with identity matrix
self.stages[i, :] = np.copy(rhs)
else:
self.stages[i, :] = self.f_fast_solve(rhs, dt * self.A[i, i])
# Update
for i in range(0, self.nstages):
u0 += dt * self.b_hat[i] * (self.f_slow(self.stages[i, :])) + dt * self.b[i] * \
(self.f_fast(self.stages[i, :]))
return u0
def f_slow(self, u):
return self.M_slow.dot(u)
def f_fast(self, u):
return self.M_fast.dot(u)
def f_fast_solve(self, rhs, alpha):
L = sp.eye(self.ndof) - alpha * self.M_fast
return LA.spsolve(L, rhs)
#
# Trapezoidal rule
#
class trapezoidal:
def __init__(self, M, alpha=0.5):
assert np.shape(M)[0] == np.shape(M)[1], "Matrix M must be quadratic"
self.Ndof = np.shape(M)[0]
self.M = M
self.alpha = alpha
def timestep(self, u0, dt):
M_trap = sp.eye(self.Ndof) - self.alpha * dt * self.M
B_trap = sp.eye(self.Ndof) + (1.0 - self.alpha) * dt * self.M
b = B_trap.dot(u0)
return LA.spsolve(M_trap, b)
#
# A BDF-2 implicit two-step method
#
class bdf2:
def __init__(self, M):
assert np.shape(M)[0] == np.shape(M)[1], "Matrix M must be quadratic"
self.Ndof = np.shape(M)[0]
self.M = M
def firsttimestep(self, u0, dt):
b = u0
L = sp.eye(self.Ndof) - dt * self.M
return LA.spsolve(L, b)
def timestep(self, u0, um1, dt):
b = (4.0 / 3.0) * u0 - (1.0 / 3.0) * um1
L = sp.eye(self.Ndof) - (2.0 / 3.0) * dt * self.M
return LA.spsolve(L, b)
#
# A diagonally implicit Runge-Kutta method of order 2, 3 or 4
#
class dirk:
def __init__(self, M, order):
assert np.shape(M)[0] == np.shape(M)[1], "Matrix M must be quadratic"
self.Ndof = np.shape(M)[0]
self.M = M
self.order = order
assert self.order in [2, 22, 3, 4, 5], 'Order must be 2,22,3,4'
if self.order == 2:
self.nstages = 1
self.A = np.zeros((1, 1))
self.A[0, 0] = 0.5
self.tau = [0.5]
self.b = [1.0]
if self.order == 22:
self.nstages = 2
self.A = np.zeros((2, 2))
self.A[0, 0] = 1.0 / 3.0
self.A[1, 0] = 1.0 / 2.0
self.A[1, 1] = 1.0 / 2.0
self.tau = np.zeros(2)
self.tau[0] = 1.0 / 3.0
self.tau[1] = 1.0
self.b = np.zeros(2)
self.b[0] = 3.0 / 4.0
self.b[1] = 1.0 / 4.0
if self.order == 3:
self.nstages = 2
self.A = np.zeros((2, 2))
self.A[0, 0] = 0.5 + 1.0 / (2.0 * math.sqrt(3.0))
self.A[1, 0] = -1.0 / math.sqrt(3.0)
self.A[1, 1] = self.A[0, 0]
self.tau = np.zeros(2)
self.tau[0] = 0.5 + 1.0 / (2.0 * math.sqrt(3.0))
self.tau[1] = 0.5 - 1.0 / (2.0 * math.sqrt(3.0))
self.b = np.zeros(2)
self.b[0] = 0.5
self.b[1] = 0.5
if self.order == 4:
self.nstages = 3
alpha = 2.0 * math.cos(math.pi / 18.0) / math.sqrt(3.0)
self.A = np.zeros((3, 3))
self.A[0, 0] = (1.0 + alpha) / 2.0
self.A[1, 0] = -alpha / 2.0
self.A[1, 1] = self.A[0, 0]
self.A[2, 0] = (1.0 + alpha)
self.A[2, 1] = -(1.0 + 2.0 * alpha)
self.A[2, 2] = self.A[0, 0]
self.tau = np.zeros(3)
self.tau[0] = (1.0 + alpha) / 2.0
self.tau[1] = 1.0 / 2.0
self.tau[2] = (1.0 - alpha) / 2.0
self.b = np.zeros(3)
self.b[0] = 1.0 / (6.0 * alpha * alpha)
self.b[1] = 1.0 - 1.0 / (3.0 * alpha * alpha)
self.b[2] = 1.0 / (6.0 * alpha * alpha)
if self.order == 5:
self.nstages = 5
# From Kennedy, Carpenter "Diagonally Implicit Runge-Kutta Methods for
# Ordinary Differential Equations. A Review"
self.A = np.zeros((5, 5))
self.A[0, 0] = 4024571134387. / 14474071345096.
self.A[1, 0] = 9365021263232. / 12572342979331.
self.A[1, 1] = self.A[0, 0]
self.A[2, 0] = 2144716224527. / 9320917548702.
self.A[2, 1] = -397905335951. / 4008788611757.
self.A[2, 2] = self.A[0, 0]
self.A[3, 0] = -291541413000. / 6267936762551.
self.A[3, 1] = 226761949132. / 4473940808273.
self.A[3, 2] = -1282248297070. / 9697416712681.
self.A[3, 3] = self.A[0, 0]
self.A[4, 0] = -2481679516057. / 4626464057815.
self.A[4, 1] = -197112422687. / 6604378783090.
self.A[4, 2] = 3952887910906. / 9713059315593.
self.A[4, 3] = 4906835613583. / 8134926921134.
self.A[4, 4] = self.A[0, 0]
self.b = np.zeros(5)
self.b[0] = -2522702558582. / 12162329469185.
self.b[1] = 1018267903655. / 12907234417901.
self.b[2] = 4542392826351. / 13702606430957.
self.b[3] = 5001116467727. / 12224457745473.
self.b[4] = 1509636094297. / 3891594770934.
self.stages = np.zeros((self.nstages, self.Ndof), dtype='complex')
def timestep(self, u0, dt):
uend = u0
for i in range(0, self.nstages):
b = u0
# Compute right hand side for this stage's implicit step
for j in range(0, i):
b = b + self.A[i, j] * dt * self.f(self.stages[j, :])
# Implicit solve for current stage
self.stages[i, :] = self.f_solve(b, dt * self.A[i, i])
# Add contribution of current stage to final value
uend = uend + self.b[i] * dt * self.f(self.stages[i, :])
return uend
#
# Returns f(u) = c*u
#
def f(self, u):
return self.M.dot(u)
#
# Solves (Id - alpha*c)*u = b for u
#
def f_solve(self, b, alpha):
L = sp.eye(self.Ndof) - alpha * self.M
return LA.spsolve(L, b)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = slim.dropout(net,
dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
def inception_v1_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn'start_time use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.event': {
'Meta': {
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'message_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'db_index': 'True'
}),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'view', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'class_name': (
'django.db.models.fields.CharField', [], {
'db_index': 'True',
'max_length': '128',
'null': 'True',
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'traceback':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'view': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
})
},
'sentry.projectdomain': {
'Meta': {
'unique_together': "(('project', 'domain'),)",
'object_name': 'ProjectDomain'
},
'domain': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'domain_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.projectmember': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'ProjectMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'project_set'",
'to': "orm['sentry.User']"
}
)
}
}
complete_apps = ['sentry']
| |
#!/usr/bin/python
import pdb
import logging
import stat
import os
import sys
import struct
import time
import uuid
import subprocess
from tempfile import NamedTemporaryFile, TemporaryFile
from ovirtsdk.api import API
from ovirtsdk.xml import params
from xml.etree import ElementTree
from time import sleep
from threading import BoundedSemaphore
from imagefactory_plugins.ovfcommon.ovfcommon import RHEVOVFPackage
# Large portions derived from dc-rhev-img from iwhd written by
# Pete Zaitcev <zaitcev@redhat.com>
NFSUID = 36
NFSGID = 36
# Borrowed from Oz by Chris Lalancette
def subprocess_check_output(*popenargs, **kwargs):
"""
Function to call a subprocess and gather the output.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
#executable_exists(popenargs[0][0])
# NOTE: it is very, very important that we use temporary files for
# collecting stdout and stderr here. There is a nasty bug in python
# subprocess; if your process produces more than 64k of data on an fd that
# is using subprocess.PIPE, the whole thing will hang. To avoid this, we
# use temporary fds to capture the data
stdouttmp = TemporaryFile()
stderrtmp = TemporaryFile()
process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs,
**kwargs)
process.communicate()
retcode = process.poll()
stdouttmp.seek(0, 0)
stdout = stdouttmp.read()
stdouttmp.close()
stderrtmp.seek(0, 0)
stderr = stderrtmp.read()
stderrtmp.close()
if retcode:
cmd = ' '.join(*popenargs)
raise Exception("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode)
return (stdout, stderr, retcode)
class RHEVMHelper(object):
api_connections_lock = BoundedSemaphore()
def __init__(self, url, username, password):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
# The SDK allows only a single active connection object to be created, regardless of whether
# or not multiple RHEVM servers are being accessed. For now we need to have a global lock,
# create a connection object before each batch of API interactions and then disconnect it.
self.api_details = { 'url':url, 'username':username, 'password':password }
# TODO: When this limitation in the ovirt SDK is removed, get rid of these
def _init_api(self):
self.log.debug("Doing blocking acquire() on global RHEVM API connection lock")
self.api_connections_lock.acquire()
self.log.debug("Got global RHEVM API connection lock")
url = self.api_details['url']
username = self.api_details['username']
password = self.api_details['password']
self.api = API(url=url, username=username, password=password, insecure=True)
def _disconnect_api(self):
try:
self.log.debug("Attempting API disconnect")
if hasattr(self, 'api') and self.api is not None:
self.api.disconnect()
else:
self.log.debug("API connection was not initialized. Will not attempt to disconnect.")
finally:
# Must always do this
self.log.debug("Releasing global RHEVM API connection lock")
self.api_connections_lock.release()
# These are the only two genuinley public methods
# What we create is a VM template
def import_template(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster,
ovf_name = None, ovf_desc = None):
if not ovf_desc:
self.ovf_desc = "Imported by Image Factory"
else:
self.ovf_desc = ovf_desc
self.log.debug("Preparing for RHEVM template import of image file (%s)" % (image_filename))
# API lock protected action
try:
self._init_api()
self.init_vm_import(image_filename, nfs_host, nfs_path, nfs_dir, cluster)
finally:
self._disconnect_api()
self.ovf_name = ovf_name
self.log.debug("Staging files")
self.stage_files()
self.log.debug("Moving files to final export domain location")
self.move_files()
self.ovf_pkg.delete()
self.log.debug("Executing import")
# API lock protected action
try:
self._init_api()
self.execute_import()
finally:
self._disconnect_api()
return str(self.ovf_pkg.tpl_uuid)
def delete_template(self, template_uuid):
template = self.api.templates.get(id=template_uuid)
if template:
template.delete()
return True
else:
return False
# Begin Nuts and Bolts
# We don't want to run seteuid() in our main process as it will globally change the UID/GID for everything
# OTOH, we need to be root to access our image files and temp files
# We use stdin and Popen's preexec_fn via the helper functions below to deal with this
def become_nfs_user(self):
os.setegid(NFSGID)
os.seteuid(NFSUID)
def copy_as_nfs_user(self, sourcefile, destfile):
self.log.debug("Copying (%s) to (%s) as nfsuser" % (sourcefile, destfile))
f = open(sourcefile,"r")
(stdout, stderr, retcode) = subprocess_check_output([ 'dd', 'of=%s' % (destfile), 'bs=4k' ], stdin=f, preexec_fn=self.become_nfs_user)
f.close()
def copy_dir_as_nfs_user(self, sourcefile, destfile):
self.log.debug("Copying directory (%s) to (%s) as nfsuser" % (sourcefile, destfile))
(stdout, stderr, retcode) = subprocess_check_output([ 'cp', '-r', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user)
def move_as_nfs_user(self, sourcefile, destfile):
self.log.debug("Moving (%s) to (%s) as nfsuser" % (sourcefile, destfile))
(stdout, stderr, retcode) = subprocess_check_output([ 'mv', '%s' % (sourcefile), '%s' % (destfile)], preexec_fn=self.become_nfs_user)
def mkdir_as_nfs_user(self, directory):
self.log.debug("Making directory (%s) as nfsuser" % (directory))
(stdout, stderr, retcode) = subprocess_check_output([ 'mkdir', '%s' % (directory)], preexec_fn=self.become_nfs_user)
def rm_rf_as_nfs_user(self, directory):
self.log.debug("Recursive remove of dir (%s) as nfsuser" % (directory))
(stdout, stderr, retcode) = subprocess_check_output([ 'rm', '-rf', '%s' % (directory)], preexec_fn=self.become_nfs_user)
def get_storage_domain(self, nfs_host, nfs_path):
# Find the storage domain that matches the nfs details given
sds = self.api.storagedomains.list()
for sd in sds:
if sd.get_type() == "export":
self.log.debug("Export domain: (%s)" % (sd.get_name()))
stor = sd.get_storage()
if (stor.get_address() == nfs_host) and (stor.get_path() == nfs_path):
self.log.debug("This is the right domain (%s)" % (sd.get_id()))
return sd
return None
def get_pool_id(self, sd_uuid):
# Get datacenter for a given storage domain UUID
# This is the UUID that becomes the "StoragePoolID" in our OVF XML
# TODO: The storagedomain object has a get_data_center() method that doesn't seem to work
# Find out why
dcs = self.api.datacenters.list()
for dc in dcs:
self.log.debug("Looking for our storage domain (%s) in data center (%s)" % (sd_uuid, dc.get_id()))
sd = dc.storagedomains.get(id=sd_uuid)
if sd:
self.log.debug("This is the right datacenter (%s)" % (dc.get_id()))
return dc
return None
def get_cluster_by_dc(self, poolid):
# If we have been passed "_any_" as the cluster name, we pick the first cluster that
# matches our datacenter/pool ID
clusters = self.api.clusters.list()
for cluster in clusters:
dc_id = None
if cluster.get_data_center():
dc_id = cluster.get_data_center().get_id()
self.log.debug("Checking cluster (%s) with name (%s) with data center (%s)" % (cluster.get_id(), cluster.get_name(), dc_id))
if dc_id == poolid:
return cluster
self.log.debug("Cannot find cluster for dc (%s)" % (poolid))
return None
def get_cluster_by_name(self, name):
# If we have been passed a specific cluster name, we need to find that specific cluster
clusters = self.api.clusters.list()
for cluster in clusters:
self.log.debug("Checking cluster (%s) with name (%s)" % (cluster.get_id(), cluster.get_name()))
if cluster.get_name() == name:
return cluster
self.log.debug("Cannot find cluster named (%s)" % (name))
return None
def init_vm_import(self, image_filename, nfs_host, nfs_path, nfs_dir, cluster):
# Prepare for the import of a VM
self.image_filename = image_filename
self.nfs_host = nfs_host
self.nfs_path = nfs_path
self.nfs_dir = nfs_dir
# Sets some values used when creating XML and meta files
self.storage_domain_object = self.get_storage_domain(nfs_host, nfs_path)
if self.storage_domain_object:
self.storage_domain = self.storage_domain_object.get_id()
else:
raise Exception("Cannot find storage domain matching NFS details given")
self.dc_object = self.get_pool_id(self.storage_domain)
if self.dc_object:
# Our StoragePoolID is the UUID of the DC containing our storage domain
self.pool_id=self.dc_object.get_id()
else:
raise Exception("Cannot find datacenter for our storage domain")
if cluster == '_any_':
self.cluster_object = self.get_cluster_by_dc(self.pool_id)
else:
self.cluster_object = self.get_cluster_by_name(cluster)
if self.cluster_object:
self.cluster = self.cluster_object.get_id()
else:
raise Exception("Cannot find cluster (%s)" % (cluster))
def stage_files(self):
# Called after init to copy files to staging location
# This is the base dir of the export domain
self.export_domain_dir = self.nfs_dir + "/" + self.storage_domain
if not os.path.isdir(self.export_domain_dir):
raise Exception("Cannot find expected export domain directory (%s) at local mount point (%s)" % (self.nfs_dir, self.storage_domain))
self.ovf_pkg = RHEVOVFPackage(disk=self.image_filename,
ovf_name=self.ovf_name,
ovf_desc=self.ovf_desc)
self.ovf_pkg.sync()
def move_files(self):
self.final_image_dir = "%s/images/%s" % (self.export_domain_dir, str(self.ovf_pkg.img_uuid))
self.final_ovf_dir = "%s/master/vms/%s" % (self.export_domain_dir, str(self.ovf_pkg.tpl_uuid))
self.copy_dir_as_nfs_user(self.ovf_pkg.image_dir, self.final_image_dir)
self.copy_dir_as_nfs_user(self.ovf_pkg.ovf_dir, self.final_ovf_dir)
def remove_export_template(self):
self.rm_rf_as_nfs_user(self.final_image_dir)
self.rm_rf_as_nfs_user(self.final_ovf_dir)
def execute_import(self):
# We import to the master storage domain of the datacenter of which our export domain is a member
# Got it?
action = params.Action()
sds = self.dc_object.storagedomains.list()
for sd in sds:
if sd.get_master():
action.storage_domain=sd
if not action.storage_domain:
raise Exception("Could not find master storage domain for datacenter ID (%s)" % (self.dc_object.get_id()))
action.cluster = self.cluster_object
# At this point our freshly copied in files are discoverable via the tpl_uuid in our export domain
template = self.storage_domain_object.templates.get(id=str(self.ovf_pkg.tpl_uuid))
if template:
template.import_template(action=action)
real_template = self.api.templates.get(id=str(self.ovf_pkg.tpl_uuid))
# Wait 5 minutes for an import to finish
self.log.debug("Waiting for template import to complete")
for i in range(30):
self.log.debug("Waited %d - state (%s)" % (i*10, real_template.get_status().get_state()))
if real_template.get_status().get_state() != 'locked':
break
real_template = real_template.update()
sleep(10)
self.log.debug("Deleting export domain files")
self.remove_export_template()
final_state = real_template.get_status().get_state()
if final_state == 'ok':
self.log.debug("Template import completed successfully")
return
elif final_state == 'locked':
raise Exception("Timed out waiting for template import to finish")
else:
raise Exception("Template import ended in unknown state (%s)" % (final_state))
| |
# -*- coding: utf-8 -*-
from raiden.utils import sha3
from raiden.transfer.architecture import TransitionResult
from raiden.transfer.mediated_transfer.state import TargetState
from raiden.transfer.state_change import (
Block,
ActionRouteChange,
)
from raiden.transfer.mediated_transfer.state_change import (
ActionInitTarget,
ReceiveBalanceProof,
ReceiveSecretReveal,
)
from raiden.transfer.events import (
EventTransferReceivedSuccess,
)
from raiden.transfer.mediated_transfer.events import (
ContractSendChannelClose,
ContractSendWithdraw,
EventWithdrawFailed,
EventWithdrawSuccess,
SendRevealSecret,
SendSecretRequest,
)
from raiden.transfer.mediated_transfer.mediator import (
is_safe_to_wait,
)
from raiden.transfer.state import CHANNEL_STATE_OPENED
def events_for_close(state):
""" Emits the event for closing the netting channel if from_transfer needs
to be settled on-chain.
"""
from_transfer = state.from_transfer
from_route = state.from_route
safe_to_wait = is_safe_to_wait(
from_transfer,
from_route.reveal_timeout,
state.block_number,
)
secret_known = from_transfer.secret is not None
if not safe_to_wait and secret_known:
state.state = 'waiting_close'
channel_close = ContractSendChannelClose(
from_route.channel_address,
from_transfer.token,
)
return [channel_close]
return list()
def events_for_withdraw(from_transfer, from_route):
""" Withdraw from the from_channel if it is closed and the secret is known. """
channel_open = from_route.state == CHANNEL_STATE_OPENED
if not channel_open and from_transfer.secret is not None:
withdraw = ContractSendWithdraw(
from_transfer,
from_route.channel_address,
)
return [withdraw]
return list()
def handle_inittarget(state_change):
""" Handle an ActionInitTarget state change. """
from_transfer = state_change.from_transfer
from_route = state_change.from_route
block_number = state_change.block_number
state = TargetState(
state_change.our_address,
from_route,
from_transfer,
block_number,
)
safe_to_wait = is_safe_to_wait(
from_transfer,
from_route.reveal_timeout,
block_number,
)
# if there is not enough time to safely withdraw the token on-chain
# silently let the transfer expire.
if safe_to_wait:
secret_request = SendSecretRequest(
from_transfer.identifier,
from_transfer.amount,
from_transfer.hashlock,
from_transfer.initiator,
)
iteration = TransitionResult(state, [secret_request])
else:
iteration = TransitionResult(state, list())
return iteration
def handle_secretreveal(state, state_change):
""" Validate and handle a ReceiveSecretReveal state change. """
valid_secret = sha3(state_change.secret) == state.from_transfer.hashlock
if valid_secret:
from_transfer = state.from_transfer
from_route = state.from_route
state.state = 'reveal_secret'
from_transfer.secret = state_change.secret
reveal = SendRevealSecret(
from_transfer.identifier,
from_transfer.secret,
from_transfer.token,
from_route.node_address,
state.our_address,
)
iteration = TransitionResult(state, [reveal])
else:
# TODO: event for byzantine behavior
iteration = TransitionResult(state, list())
return iteration
def handle_balanceproof(state, state_change):
""" Handle a ReceiveBalanceProof state change. """
iteration = TransitionResult(state, list())
# TODO: byzantine behavior event when the sender doesn't match
if state_change.node_address == state.from_route.node_address:
state.state = 'balance_proof'
return iteration
def handle_block(state, state_change):
""" After Raiden learns about a new block this function must be called to
handle expiration of the hash time lock.
"""
state.block_number = max(
state.block_number,
state_change.block_number,
)
# only emit the close event once
if state.state != 'waiting_close':
close_events = events_for_close(state)
else:
close_events = list()
iteration = TransitionResult(state, close_events)
return iteration
def handle_routechange(state, state_change):
""" Handle an ActionRouteChange state change. """
updated_route = state_change.route
assert updated_route.node_address == state.from_route.node_address
# the route might be closed by another task
state.from_route = updated_route
withdraw_events = events_for_withdraw(
state.from_transfer,
state.from_route,
)
iteration = TransitionResult(
state,
withdraw_events,
)
return iteration
def clear_if_finalized(iteration):
""" Clear the state if the transfer was either completed or failed. """
state = iteration.new_state
if state is None:
return iteration
if state.from_transfer.secret is None and state.block_number > state.from_transfer.expiration:
failed = EventWithdrawFailed(
identifier=state.from_transfer.identifier,
hashlock=state.from_transfer.hashlock,
reason='lock expired',
)
iteration = TransitionResult(None, [failed])
elif state.state == 'balance_proof':
transfer_success = EventTransferReceivedSuccess(
state.from_transfer.identifier,
state.from_transfer.amount,
state.from_transfer.initiator,
)
unlock_success = EventWithdrawSuccess(
state.from_transfer.identifier,
state.from_transfer.hashlock,
)
iteration = TransitionResult(None, [transfer_success, unlock_success])
return iteration
def state_transition(state, state_change):
""" State machine for the target node of a mediated transfer. """
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
iteration = TransitionResult(state, list())
if state is None:
if isinstance(state_change, ActionInitTarget):
iteration = handle_inittarget(state_change)
elif state.from_transfer.secret is None:
if isinstance(state_change, ReceiveSecretReveal):
iteration = handle_secretreveal(state, state_change)
elif isinstance(state_change, Block):
iteration = handle_block(state, state_change)
elif state.from_transfer.secret is not None:
if isinstance(state_change, ReceiveBalanceProof):
iteration = handle_balanceproof(state, state_change)
elif isinstance(state_change, ActionRouteChange):
iteration = handle_routechange(state, state_change)
elif isinstance(state_change, Block):
iteration = handle_block(state, state_change)
return clear_if_finalized(iteration)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utilities working with arbitrarily nested structures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class _CustomMapping(collections.Mapping):
def __init__(self, *args, **kwargs):
self._wrapped = dict(*args, **kwargs)
def __getitem__(self, key):
return self._wrapped[key]
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
if attr:
class BadAttr(object):
"""Class that has a non-iterable __attrs_attrs__."""
__attrs_attrs__ = None
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
@attr.s
class UnsortedSampleAttr(object):
field3 = attr.ib()
field1 = attr.ib()
field2 = attr.ib()
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsFlattenAndPack(self):
if attr is None:
self.skipTest("attr module is unavailable.")
field_values = [1, 2]
sample_attr = NestTest.SampleAttr(*field_values)
self.assertFalse(nest._is_attrs(field_values))
self.assertTrue(nest._is_attrs(sample_attr))
flat = nest.flatten(sample_attr)
self.assertEqual(field_values, flat)
restructured_from_flat = nest.pack_sequence_as(sample_attr, flat)
self.assertIsInstance(restructured_from_flat, NestTest.SampleAttr)
self.assertEqual(restructured_from_flat, sample_attr)
# Check that flatten fails if attributes are not iterable
with self.assertRaisesRegexp(TypeError, "object is not iterable"):
flat = nest.flatten(NestTest.BadAttr())
@parameterized.parameters(
{"values": [1, 2, 3]},
{"values": [{"B": 10, "A": 20}, [1, 2], 3]},
{"values": [(1, 2), [3, 4], 5]},
{"values": [PointXY(1, 2), 3, 4]},
)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAttrsMapStructure(self, values):
if attr is None:
self.skipTest("attr module is unavailable.")
structure = NestTest.UnsortedSampleAttr(*values)
new_structure = nest.map_structure(lambda x: x, structure)
self.assertEqual(structure, new_structure)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack(self):
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])
self.assertEqual(
nest.pack_sequence_as(structure, flat), (("a", "b"), "c",
("d", "e", ("f", "g"), "h")))
structure = (NestTest.PointXY(x=4, y=2),
((NestTest.PointXY(x=1, y=0),),))
flat = [4, 2, 1, 0]
self.assertEqual(nest.flatten(structure), flat)
restructured_from_flat = nest.pack_sequence_as(structure, flat)
self.assertEqual(restructured_from_flat, structure)
self.assertEqual(restructured_from_flat[0].x, 4)
self.assertEqual(restructured_from_flat[0].y, 2)
self.assertEqual(restructured_from_flat[1][0][0].x, 1)
self.assertEqual(restructured_from_flat[1][0][0].y, 0)
self.assertEqual([5], nest.flatten(5))
self.assertEqual([np.array([5])], nest.flatten(np.array([5])))
self.assertEqual("a", nest.pack_sequence_as(5, ["a"]))
self.assertEqual(
np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
nest.pack_sequence_as("scalar", [4, 5])
with self.assertRaisesRegexp(TypeError, "flat_sequence"):
nest.pack_sequence_as([4, 5], "bad_sequence")
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
@parameterized.parameters({"mapping_type": collections.OrderedDict},
{"mapping_type": _CustomMapping})
def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
self.assertIsInstance(custom_reconstruction, mapping_type)
self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testFlattenAndPack_withDicts(self):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
NestTest.Abc(3, 4), {
"d": _CustomMapping({
41: 4
}),
"c": [
1,
collections.OrderedDict([
("b", 3),
("a", 2),
]),
],
"b": 5
}, 17
]
flattened = nest.flatten(mess)
self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
"d": _CustomMapping({
41: 42
}),
"c": [
0,
collections.OrderedDict([
("b", 9),
("a", 8),
]),
],
"b": 3
},
"hi everybody",
]
unflattened = nest.pack_sequence_as(structure_of_mess, flattened)
self.assertEqual(unflattened, mess)
# Check also that the OrderedDict was created, with the correct key order.
unflattened_ordered_dict = unflattened[2]["c"][1]
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
unflattened_custom_mapping = unflattened[2]["d"]
self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
def testFlatten_stringIsNotFlattened(self):
structure = "lots of letters"
flattened = nest.flatten(structure)
self.assertLen(flattened, 1)
unflattened = nest.pack_sequence_as("goodbye", flattened)
self.assertEqual(structure, unflattened)
def testPackSequenceAs_notIterableError(self):
with self.assertRaisesRegexp(TypeError,
"flat_sequence must be a sequence"):
nest.pack_sequence_as("hi", "bye")
def testPackSequenceAs_wrongLengthsError(self):
with self.assertRaisesRegexp(
ValueError,
"Structure had 2 elements, but flat_sequence had 3 elements."):
nest.pack_sequence_as(["hello", "world"],
["and", "goodbye", "again"])
@test_util.assert_no_new_pyobjects_executing_eagerly
def testIsNested(self):
self.assertFalse(nest.is_nested("1234"))
self.assertTrue(nest.is_nested([1, 3, [4, 5]]))
self.assertTrue(nest.is_nested(((7, 8), (5, 6))))
self.assertTrue(nest.is_nested([]))
self.assertTrue(nest.is_nested({"a": 1, "b": 2}))
self.assertFalse(nest.is_nested(set([1, 2])))
ones = array_ops.ones([2, 3])
self.assertFalse(nest.is_nested(ones))
self.assertFalse(nest.is_nested(math_ops.tanh(ones)))
self.assertFalse(nest.is_nested(np.ones((4, 5))))
@parameterized.parameters({"mapping_type": _CustomMapping},
{"mapping_type": dict})
def testFlattenDictItems(self, mapping_type):
dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
another_bad_dictionary = mapping_type({
(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
})
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
# pylint does not correctly recognize these as class names and
# suggests to use variable style under_score naming.
# pylint: disable=invalid-name
Named0ab = collections.namedtuple("named_0", ("a", "b"))
Named1ab = collections.namedtuple("named_1", ("a", "b"))
SameNameab = collections.namedtuple("same_name", ("a", "b"))
SameNameab2 = collections.namedtuple("same_name", ("a", "b"))
SameNamexy = collections.namedtuple("same_name", ("x", "y"))
SameName1xy = collections.namedtuple("same_name_1", ("x", "y"))
SameName1xy2 = collections.namedtuple("same_name_1", ("x", "y"))
NotSameName = collections.namedtuple("not_same_name", ("a", "b"))
# pylint: enable=invalid-name
class SameNamedType1(SameNameab):
pass
@test_util.assert_no_new_pyobjects_executing_eagerly
def testAssertSameStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
structure_different_num_elements = ("spam", "eggs")
structure_different_nesting = (((1, 2), 3), 4, 5, (6,))
nest.assert_same_structure(structure1, structure2)
nest.assert_same_structure("abc", 1.0)
nest.assert_same_structure("abc", np.array([0, 1]))
nest.assert_same_structure("abc", constant_op.constant([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
"More specifically: Substructure "
r'"type=tuple str=\(\(1, 2\), 3\)" is a sequence, while '
'substructure "type=str str=spam" is not\n'
"Entire first structure:\n"
r"\(\(\(\., \.\), \.\), \., \(\., \.\)\)\n"
"Entire second structure:\n"
r"\(\., \.\)")):
nest.assert_same_structure(structure1, structure_different_num_elements)
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
r'is a sequence, while substructure "type=ndarray str=\[0 1\]" '
"is not")):
nest.assert_same_structure([0, 1], np.array([0, 1]))
with self.assertRaisesRegexp(
ValueError,
("The two structures don't have the same nested structure\\.\n\n"
"First structure:.*?\n\n"
"Second structure:.*\n\n"
r'More specifically: Substructure "type=list str=\[0, 1\]" '
'is a sequence, while substructure "type=int str=0" '
"is not")):
nest.assert_same_structure(0, [0, 1])
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(structure1, structure_different_nesting)
self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
NestTest.Named0ab("a", "b"))
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab("a", "b"))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure(NestTest.Named0ab(3, 4),
NestTest.Named0ab([3], 4))
with self.assertRaisesRegexp(
ValueError,
("don't have the same nested structure\\.\n\n"
"First structure: .*?\n\nSecond structure: ")):
nest.assert_same_structure([[3], 4], [3, [4]])
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError,
"don't have the same sequence type"):
nest.assert_same_structure(structure1, structure1_list)
nest.assert_same_structure(structure1, structure2, check_types=False)
nest.assert_same_structure(structure1, structure1_list, check_types=False)
with self.assertRaisesRegexp(ValueError,
"don't have the same set of keys"):
nest.assert_same_structure({"a": 1}, {"b": 1})
nest.assert_same_structure(NestTest.SameNameab(0, 1),
NestTest.SameNameab2(2, 3))
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
nest.assert_same_structure(
NestTest.SameNameab(NestTest.SameName1xy(0, 1), 2),
NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
expected_message = "The two structures don't have the same.*"
with self.assertRaisesRegexp(ValueError, expected_message):
nest.assert_same_structure(
NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.NotSameName(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamexy(2, 3))
self.assertRaises(TypeError, nest.assert_same_structure,
NestTest.SameNameab(0, 1), NestTest.SameNamedType1(2, 3))
EmptyNT = collections.namedtuple("empty_nt", "") # pylint: disable=invalid-name
def testHeterogeneousComparison(self):
nest.assert_same_structure({"a": 4}, _CustomMapping(a=3))
nest.assert_same_structure(_CustomMapping(b=3), {"b": 4})
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructure(self):
structure1 = (((1, 2), 3), 4, (5, 6))
structure2 = (((7, 8), 9), 10, (11, 12))
structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)
nest.assert_same_structure(structure1, structure1_plus1)
self.assertAllEqual(
[2, 3, 4, 5, 6, 7],
nest.flatten(structure1_plus1))
structure1_plus_structure2 = nest.map_structure(
lambda x, y: x + y, structure1, structure2)
self.assertEqual(
(((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),
structure1_plus_structure2)
self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))
self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
# Empty structures
self.assertEqual((), nest.map_structure(lambda x: x + 1, ()))
self.assertEqual([], nest.map_structure(lambda x: x + 1, []))
self.assertEqual({}, nest.map_structure(lambda x: x + 1, {}))
self.assertEqual(NestTest.EmptyNT(), nest.map_structure(lambda x: x + 1,
NestTest.EmptyNT()))
# This is checking actual equality of types, empty list != empty tuple
self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
with self.assertRaisesRegexp(TypeError, "callable"):
nest.map_structure("bad", structure1_plus1)
with self.assertRaisesRegexp(ValueError, "at least one structure"):
nest.map_structure(lambda x: x)
with self.assertRaisesRegexp(ValueError, "same number of elements"):
nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, 3, (3,))
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
structure1_list = [[[1, 2], 3], 4, [5, 6]]
with self.assertRaisesRegexp(TypeError, "same sequence type"):
nest.map_structure(lambda x, y: None, structure1, structure1_list)
nest.map_structure(lambda x, y: None, structure1, structure1_list,
check_types=False)
with self.assertRaisesRegexp(ValueError, "same nested structure"):
nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
check_types=False)
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, foo="a")
with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
ABTuple = collections.namedtuple("ab_tuple", "a, b") # pylint: disable=invalid-name
@test_util.assert_no_new_pyobjects_executing_eagerly
def testMapStructureWithStrings(self):
inp_a = NestTest.ABTuple(a="foo", b=("bar", "baz"))
inp_b = NestTest.ABTuple(a=2, b=(1, 3))
out = nest.map_structure(lambda string, repeats: string * repeats,
inp_a,
inp_b)
self.assertEqual("foofoo", out.a)
self.assertEqual("bar", out.b[0])
self.assertEqual("bazbazbaz", out.b[1])
nt = NestTest.ABTuple(a=("something", "something_else"),
b="yet another thing")
rev_nt = nest.map_structure(lambda x: x[::-1], nt)
# Check the output is the correct structure, and all strings are reversed.
nest.assert_same_structure(nt, rev_nt)
self.assertEqual(nt.a[0][::-1], rev_nt.a[0])
self.assertEqual(nt.a[1][::-1], rev_nt.a[1])
self.assertEqual(nt.b[::-1], rev_nt.b)
@test_util.run_deprecated_v1
def testMapStructureOverPlaceholders(self):
inp_a = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
inp_b = (array_ops.placeholder(dtypes.float32, shape=[3, 4]),
array_ops.placeholder(dtypes.float32, shape=[3, 7]))
output = nest.map_structure(lambda x1, x2: x1 + x2, inp_a, inp_b)
nest.assert_same_structure(output, inp_a)
self.assertShapeEqual(np.zeros((3, 4)), output[0])
self.assertShapeEqual(np.zeros((3, 7)), output[1])
feed_dict = {
inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)),
inp_b: (np.random.randn(3, 4), np.random.randn(3, 7))
}
with self.cached_session() as sess:
output_np = sess.run(output, feed_dict=feed_dict)
self.assertAllClose(output_np[0],
feed_dict[inp_a][0] + feed_dict[inp_b][0])
self.assertAllClose(output_np[1],
feed_dict[inp_a][1] + feed_dict[inp_b][1])
def testAssertShallowStructure(self):
inp_ab = ["a", "b"]
inp_abc = ["a", "b", "c"]
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
shallow_size=len(inp_abc),
input_size=len(inp_ab))):
nest.assert_shallow_structure(shallow_tree=inp_abc, input_tree=inp_ab)
inp_ab1 = [(1, 1), (2, 2)]
inp_ab2 = [[1, 1], [2, 2]]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._STRUCTURES_HAVE_MISMATCHING_TYPES.format(
shallow_type=type(inp_ab2[0]),
input_type=type(inp_ab1[0]))):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
inp_ab1 = {"a": (1, 1), "b": {"c": (2, 2)}}
inp_ab2 = {"a": (1, 1), "b": {"d": (2, 2)}}
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["d"])):
nest.assert_shallow_structure(inp_ab2, inp_ab1)
inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
inp_ba = collections.OrderedDict([("b", (2, 3)), ("a", 1)])
nest.assert_shallow_structure(inp_ab, inp_ba)
# This assertion is expected to pass: two namedtuples with the same
# name and field names are considered to be identical.
inp_shallow = NestTest.SameNameab(1, 2)
inp_deep = NestTest.SameNameab2(1, [1, 2, 3])
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=False)
nest.assert_shallow_structure(inp_shallow, inp_deep, check_types=True)
def testFlattenUpTo(self):
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = NestTest.ABTuple
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'str'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
expected_message = ("If shallow structure is a sequence, input must also "
"be a sequence. Input has type: <(type|class) 'int'>.")
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesRegexp(TypeError, expected_message):
flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testFlattenWithTuplePathsUpTo(self):
def get_paths_and_values(shallow_tree, input_tree):
path_value_pairs = nest.flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
paths = [p for p, _ in path_value_pairs]
values = [v for _, v in path_value_pairs]
return paths, values
# Shallow tree ends at scalar.
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_input_tree, [[2, 2], [3, 3], [4, 9], [5, 5]])
self.assertEqual(flattened_shallow_tree_paths,
[(0, 0), (0, 1), (1, 0), (1, 1)])
self.assertEqual(flattened_shallow_tree, [True, True, False, True])
# Shallow tree ends at string.
input_tree = [[("a", 1), [("b", 2), [("c", 3), [("d", 4)]]]]]
shallow_tree = [["level_1", ["level_2", ["level_3", ["level_4"]]]]]
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
input_tree_flattened_paths = [p for p, _ in
nest.flatten_with_tuple_paths(input_tree)]
input_tree_flattened = nest.flatten(input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[(0, 0), (0, 1, 0), (0, 1, 1, 0), (0, 1, 1, 1, 0)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[("a", 1), ("b", 2), ("c", 3), ("d", 4)])
self.assertEqual(input_tree_flattened_paths,
[(0, 0, 0), (0, 0, 1),
(0, 1, 0, 0), (0, 1, 0, 1),
(0, 1, 1, 0, 0), (0, 1, 1, 0, 1),
(0, 1, 1, 1, 0, 0), (0, 1, 1, 1, 0, 1)])
self.assertEqual(input_tree_flattened, ["a", 1, "b", 2, "c", 3, "d", 4])
# Make sure dicts are correctly flattened, yielding values, not keys.
input_tree = {"a": 1, "b": {"c": 2}, "d": [3, (4, 5)]}
shallow_tree = {"a": 0, "b": 0, "d": [0, 0]}
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",), ("d", 0), ("d", 1)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[1, {"c": 2}, 3, (4, 5)])
# Namedtuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
input_tree = ab_tuple(a=[0, 1], b=2)
shallow_tree = ab_tuple(a=0, b=1)
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("b",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[[0, 1], 2])
# Nested dicts, OrderedDicts and namedtuples.
input_tree = collections.OrderedDict(
[("a", ab_tuple(a=[0, {"b": 1}], b=2)),
("c", {"d": 3, "e": collections.OrderedDict([("f", 4)])})])
shallow_tree = input_tree
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a", "a", 0),
("a", "a", 1, "b"),
("a", "b"),
("c", "d"),
("c", "e", "f")])
self.assertEqual(input_tree_flattened_as_shallow_tree, [0, 1, 2, 3, 4])
shallow_tree = collections.OrderedDict([("a", 0), ("c", {"d": 3, "e": 1})])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",),
("c", "d"),
("c", "e")])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
3,
collections.OrderedDict([("f", 4)])])
shallow_tree = collections.OrderedDict([("a", 0), ("c", 0)])
(input_tree_flattened_as_shallow_tree_paths,
input_tree_flattened_as_shallow_tree) = get_paths_and_values(shallow_tree,
input_tree)
self.assertEqual(input_tree_flattened_as_shallow_tree_paths,
[("a",), ("c",)])
self.assertEqual(input_tree_flattened_as_shallow_tree,
[ab_tuple(a=[0, {"b": 1}], b=2),
{"d": 3, "e": collections.OrderedDict([("f", 4)])}])
## Shallow non-list edge-case.
# Using iterable elements.
input_tree = ["input_tree"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = ["input_tree_0", "input_tree_1"]
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Test case where len(shallow_tree) < len(input_tree)
input_tree = {"a": "A", "b": "B", "c": "C"}
shallow_tree = {"a": 1, "c": 2}
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [("a",), ("c",)])
self.assertEqual(flattened_input_tree, ["A", "C"])
self.assertEqual(flattened_shallow_tree_paths, [("a",), ("c",)])
self.assertEqual(flattened_shallow_tree, [1, 2])
# Using non-iterable elements.
input_tree = [0]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
input_tree = [0, 1]
shallow_tree = 9
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Both non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = "shallow_tree"
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
# Using non-iterable elements.
input_tree = 0
shallow_tree = 0
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_input_tree_paths, [()])
self.assertEqual(flattened_input_tree, [input_tree])
self.assertEqual(flattened_shallow_tree_paths, [()])
self.assertEqual(flattened_shallow_tree, [shallow_tree])
## Input non-list edge-case.
# Using iterable elements.
input_tree = "input_tree"
shallow_tree = ["shallow_tree"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = "input_tree"
shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
# Using non-iterable elements.
input_tree = 0
shallow_tree = [9]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
input_tree = 0
shallow_tree = [9, 8]
with self.assertRaisesWithLiteralMatch(
TypeError,
nest._IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ.format(type(input_tree))):
(flattened_input_tree_paths,
flattened_input_tree) = get_paths_and_values(shallow_tree, input_tree)
(flattened_shallow_tree_paths,
flattened_shallow_tree) = get_paths_and_values(shallow_tree, shallow_tree)
self.assertEqual(flattened_shallow_tree_paths, [(0,), (1,)])
self.assertEqual(flattened_shallow_tree, shallow_tree)
def testMapStructureUpTo(self):
# Named tuples.
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)
self.assertEqual(out.a, 6)
self.assertEqual(out.b, 15)
# Lists.
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ["evens", ["odds", "primes"]]
out = nest.map_structure_up_to(
name_list, lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
self.assertEqual(out, ["first_4_evens", ["first_5_odds", "first_3_primes"]])
# Dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dicts.
inp_val = dict(a=2, b=3)
inp_ops = dict(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
# Dict+custom mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), b=dict(add=2, mul=3))
out = nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
self.assertEqual(out["a"], 6)
self.assertEqual(out["b"], 15)
# Non-equal dict/mapping.
inp_val = dict(a=2, b=3)
inp_ops = _CustomMapping(a=dict(add=1, mul=2), c=dict(add=2, mul=3))
with self.assertRaisesWithLiteralMatch(
ValueError,
nest._SHALLOW_TREE_HAS_INVALID_KEYS.format(["b"])):
nest.map_structure_up_to(
inp_val,
lambda val, ops: (val + ops["add"]) * ops["mul"], inp_val, inp_ops)
def testGetTraverseShallowStructure(self):
scalar_traverse_input = [3, 4, (1, 2, [0]), [5, 6], {"a": (7,)}, []]
scalar_traverse_r = nest.get_traverse_shallow_structure(
lambda s: not isinstance(s, tuple),
scalar_traverse_input)
self.assertEqual(scalar_traverse_r,
[True, True, False, [True, True], {"a": False}, []])
nest.assert_shallow_structure(scalar_traverse_r,
scalar_traverse_input)
structure_traverse_input = [(1, [2]), ([1], 2)]
structure_traverse_r = nest.get_traverse_shallow_structure(
lambda s: (True, False) if isinstance(s, tuple) else True,
structure_traverse_input)
self.assertEqual(structure_traverse_r,
[(True, False), ([True], False)])
nest.assert_shallow_structure(structure_traverse_r,
structure_traverse_input)
with self.assertRaisesRegexp(TypeError, "returned structure"):
nest.get_traverse_shallow_structure(lambda _: [True], 0)
with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
nest.get_traverse_shallow_structure(lambda _: 1, [1])
with self.assertRaisesRegexp(
TypeError, "didn't return a depth=1 structure of bools"):
nest.get_traverse_shallow_structure(lambda _: [1], [1])
def testYieldFlatStringPaths(self):
for inputs_expected in ({"inputs": [], "expected": []},
{"inputs": 3, "expected": [()]},
{"inputs": [3], "expected": [(0,)]},
{"inputs": {"a": 3}, "expected": [("a",)]},
{"inputs": {"a": {"b": 4}},
"expected": [("a", "b")]},
{"inputs": [{"a": 2}], "expected": [(0, "a")]},
{"inputs": [{"a": [2]}], "expected": [(0, "a", 0)]},
{"inputs": [{"a": [(23, 42)]}],
"expected": [(0, "a", 0, 0), (0, "a", 0, 1)]},
{"inputs": [{"a": ([23], 42)}],
"expected": [(0, "a", 0, 0), (0, "a", 1)]},
{"inputs": {"a": {"a": 2}, "c": [[[4]]]},
"expected": [("a", "a"), ("c", 0, 0, 0)]},
{"inputs": {"0": [{"1": 23}]},
"expected": [("0", 0, "1")]}):
inputs = inputs_expected["inputs"]
expected = inputs_expected["expected"]
self.assertEqual(list(nest.yield_flat_paths(inputs)), expected)
# We cannot define namedtuples within @parameterized argument lists.
# pylint: disable=invalid-name
Foo = collections.namedtuple("Foo", ["a", "b"])
Bar = collections.namedtuple("Bar", ["c", "d"])
# pylint: enable=invalid-name
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[("0", 23), ("1", "42")]),
dict(inputs=[[[[108]]]], expected=[("0/0/0/0", 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[("a", 3), ("b/c", 23), ("b/d", 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[("a/c", 23), ("a/d", 42), ("b/c", 0), ("b/d", "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[("c", 42), ("d", 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[("c/0", 42), ("d", 43)]),
])
def testFlattenWithStringPaths(self, inputs, expected):
self.assertEqual(
nest.flatten_with_joined_string_paths(inputs, separator="/"),
expected)
@parameterized.parameters([
dict(inputs=[], expected=[]),
dict(inputs=[23, "42"], expected=[((0,), 23), ((1,), "42")]),
dict(inputs=[[[[108]]]], expected=[((0, 0, 0, 0), 108)]),
dict(inputs=Foo(a=3, b=Bar(c=23, d=42)),
expected=[(("a",), 3), (("b", "c"), 23), (("b", "d"), 42)]),
dict(inputs=Foo(a=Bar(c=23, d=42), b=Bar(c=0, d="thing")),
expected=[(("a", "c"), 23), (("a", "d"), 42), (("b", "c"), 0),
(("b", "d"), "thing")]),
dict(inputs=Bar(c=42, d=43),
expected=[(("c",), 42), (("d",), 43)]),
dict(inputs=Bar(c=[42], d=43),
expected=[(("c", 0), 42), (("d",), 43)]),
])
def testFlattenWithTuplePaths(self, inputs, expected):
self.assertEqual(nest.flatten_with_tuple_paths(inputs), expected)
@parameterized.named_parameters(
("tuples", (1, 2), (3, 4), True, (("0", 4), ("1", 6))),
("dicts", {"a": 1, "b": 2}, {"b": 4, "a": 3}, True,
{"a": ("a", 4), "b": ("b", 6)}),
("mixed", (1, 2), [3, 4], False, (("0", 4), ("1", 6))),
("nested",
{"a": [2, 3], "b": [1, 2, 3]}, {"b": [5, 6, 7], "a": [8, 9]}, True,
{"a": [("a/0", 10), ("a/1", 12)],
"b": [("b/0", 6), ("b/1", 8), ("b/2", 10)]}))
def testMapWithPathsCompatibleStructures(self, s1, s2, check_types, expected):
def format_sum(path, *values):
return (path, sum(values))
result = nest.map_structure_with_paths(format_sum, s1, s2,
check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters(
("tuples", (1, 2, 3), (4, 5), ValueError),
("dicts", {"a": 1}, {"b": 2}, ValueError),
("mixed", (1, 2), [3, 4], TypeError),
("nested",
{"a": [2, 3, 4], "b": [1, 3]},
{"b": [5, 6], "a": [8, 9]},
ValueError
))
def testMapWithPathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_paths(lambda path, *s: 0, s1, s2)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2), s2=(3, 4),
check_types=True, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Dicts", s1={"a": 1, "b": 2}, s2={"b": 4, "a": 3},
check_types=True, expected={"a": (("a",), 4), "b": (("b",), 6)}),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4],
check_types=False, expected=(((0,), 4), ((1,), 6))),
dict(testcase_name="Nested",
s1={"a": [2, 3], "b": [1, 2, 3]},
s2={"b": [5, 6, 7], "a": [8, 9]},
check_types=True,
expected={"a": [(("a", 0), 10), (("a", 1), 12)],
"b": [(("b", 0), 6), (("b", 1), 8), (("b", 2), 10)]}),
])
def testMapWithTuplePathsCompatibleStructures(
self, s1, s2, check_types, expected):
def path_and_sum(path, *values):
return path, sum(values)
result = nest.map_structure_with_tuple_paths(
path_and_sum, s1, s2, check_types=check_types)
self.assertEqual(expected, result)
@parameterized.named_parameters([
dict(testcase_name="Tuples", s1=(1, 2, 3), s2=(4, 5),
error_type=ValueError),
dict(testcase_name="Dicts", s1={"a": 1}, s2={"b": 2},
error_type=ValueError),
dict(testcase_name="Mixed", s1=(1, 2), s2=[3, 4], error_type=TypeError),
dict(testcase_name="Nested",
s1={"a": [2, 3, 4], "b": [1, 3]},
s2={"b": [5, 6], "a": [8, 9]},
error_type=ValueError)
])
def testMapWithTuplePathsIncompatibleStructures(self, s1, s2, error_type):
with self.assertRaises(error_type):
nest.map_structure_with_tuple_paths(lambda path, *s: 0, s1, s2)
class NestBenchmark(test.Benchmark):
def run_and_report(self, s1, s2, name):
burn_iter, test_iter = 100, 30000
for _ in xrange(burn_iter):
nest.assert_same_structure(s1, s2)
t0 = time.time()
for _ in xrange(test_iter):
nest.assert_same_structure(s1, s2)
t1 = time.time()
self.report_benchmark(iters=test_iter, wall_time=(t1 - t0) / test_iter,
name=name)
def benchmark_assert_structure(self):
s1 = (((1, 2), 3), 4, (5, 6))
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
self.run_and_report(s1, s2, "assert_same_structure_6_elem")
s1 = (((1, 2), 3), 4, (5, 6)) * 10
s2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6")) * 10
self.run_and_report(s1, s2, "assert_same_structure_60_elem")
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods used to interface with Blobstore API. Includes db.Model-like
class representing a reference to a very large BLOB. Imports db.Key-like
class representing a blob-key.
"""
import cgi
import email
import os
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.blobstore import blobstore
from google.appengine.ext import db
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobReferenceProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'delete',
'fetch_data',
'get',
'parse_blob_info']
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
BlobKey = blobstore.BlobKey
create_upload_url = blobstore.create_upload_url
delete = blobstore.delete
class BlobInfoParseError(Error):
"""CGI parameter does not contain valid BlobInfo record."""
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
class _GqlQuery(db.GqlQuery):
"""GqlQuery class that explicitly sets model-class.
This does the same as the original db.GqlQuery class except that it does
not try to find the model class based on the compiled GQL query. The
caller instead provides the query with a model class to use for construction.
This class is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
def __init__(self, query_string, model_class, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
model_class: Model class from which entities are constructed.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
self._proto_query = gql.GQL(query_string, _app=app, namespace='')
super(db.GqlQuery, self).__init__(model_class, namespace='')
self.bind(*args, **kwds)
class BlobInfo(object):
"""Information about blobs in Blobstore.
This is a db.Model-like class that contains information about blobs stored
by an application. Like db.Model, this class is backed by an Datastore
entity, however, BlobInfo instances are read-only and have a much more
limited interface.
Each BlobInfo has a key of type BlobKey associated with it. This key is
specific to the Blobstore API and is not compatible with db.get. The key
can be used for quick lookup by passing it to BlobInfo.get. This
key converts easily to a string, which is web safe and can be embedded
in URLs.
Properties:
content_type: Content type of blob.
creation: Creation date of blob, when it was uploaded.
filename: Filename user selected from their machine.
size: Size of uncompressed blob.
All properties are read-only. Attempting to assign a value to a property
will raise NotImplementedError.
"""
_unindexed_properties = frozenset()
@property
def content_type(self):
return self.__get_value('content_type')
@property
def creation(self):
return self.__get_value('creation')
@property
def filename(self):
return self.__get_value('filename')
@property
def size(self):
return self.__get_value('size')
def __init__(self, entity_or_blob_key, _values=None):
"""Constructor for wrapping blobstore entity.
The constructor should not be used outside this package and tests.
Args:
entity: Datastore entity that represents the blob reference.
"""
if isinstance(entity_or_blob_key, datastore.Entity):
self.__entity = entity_or_blob_key
self.__key = BlobKey(entity_or_blob_key.key().name())
elif isinstance(entity_or_blob_key, BlobKey):
self.__entity = _values
self.__key = entity_or_blob_key
else:
TypeError('Must provide Entity or BlobKey')
@classmethod
def from_entity(cls, entity):
"""Convert entity to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BlobInfo(entity)
@classmethod
def properties(cls):
"""Set of properties that belong to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return set(('content_type', 'creation', 'filename', 'size'))
def __get_value(self, name):
"""Get a BlobInfo value, loading entity if necessary.
This method allows lazy loading of the underlying datastore entity. It
should never be invoked directly.
Args:
name: Name of property to get value for.
Returns:
Value of BlobInfo property from entity.
"""
if self.__entity is None:
self.__entity = datastore.Get(
datastore_types.Key.from_path(
self.kind(), str(self.__key), namespace=''))
try:
return self.__entity[name]
except KeyError:
raise AttributeError(name)
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
return self.__key
def delete(self):
"""Permanently delete blob from Blobstore."""
delete(self.key())
def open(self, *args, **kwargs):
"""Returns a BlobReader for this blob.
Args:
*args, **kwargs: Passed to BlobReader constructor.
Returns:
A BlobReader instance.
"""
return BlobReader(self, *args, **kwargs)
@classmethod
def get(cls, blob_keys):
"""Retrieve BlobInfo by key or list of keys.
Args:
blob_keys: A key or a list of keys. Keys may be instances of str,
unicode and BlobKey.
Returns:
A BlobInfo instance associated with provided key or a list of BlobInfo
instances if a list of keys was provided. Keys that are not found in
Blobstore return None as their values.
"""
blob_keys = cls.__normalize_and_convert_keys(blob_keys)
try:
entities = datastore.Get(blob_keys)
except datastore_errors.EntityNotFoundError:
return None
if isinstance(entities, datastore.Entity):
return BlobInfo(entities)
else:
references = []
for entity in entities:
if entity is not None:
references.append(BlobInfo(entity))
else:
references.append(None)
return references
@classmethod
def all(cls):
"""Get query for all Blobs associated with application.
Returns:
A db.Query object querying over BlobInfo's datastore kind.
"""
return db.Query(model_class=cls, namespace='')
@classmethod
def __factory_for_kind(cls, kind):
if kind == BLOB_INFO_KIND:
return BlobInfo
raise ValueError('Cannot query for kind %s' % kind)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: Properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
Returns:
A gql.GqlQuery object querying over BlobInfo's datastore kind.
"""
return _GqlQuery('SELECT * FROM %s %s'
% (cls.kind(), query_string),
cls,
*args,
**kwds)
@classmethod
def kind(self):
"""Get the entity kind for the BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BLOB_INFO_KIND
@classmethod
def __normalize_and_convert_keys(cls, keys):
"""Normalize and convert all keys to BlobKey type.
This method is based on datastore.NormalizeAndTypeCheck().
Args:
keys: A single key or a list/tuple of keys. Keys may be a string
or BlobKey
Returns:
Single key or list with all strings replaced by BlobKey instances.
"""
if isinstance(keys, (list, tuple)):
multiple = True
keys = list(keys)
else:
multiple = False
keys = [keys]
for index, key in enumerate(keys):
if not isinstance(key, (basestring, BlobKey)):
raise datastore_errors.BadArgumentError(
'Expected str or BlobKey; received %s (a %s)' % (
key,
datastore.typename(key)))
keys[index] = datastore.Key.from_path(cls.kind(), str(key), namespace='')
if multiple:
return keys
else:
return keys[0]
def get(blob_key):
"""Get a BlobInfo record from blobstore.
Does the same as BlobInfo.get.
"""
return BlobInfo.get(blob_key)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dict, name):
value = dict.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key = BlobKey(get_value(field_storage.type_options, 'blob-key'))
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(blob_key,
{'content_type': content_type,
'creation': creation,
'filename': filename,
'size': size,
})
class BlobReferenceProperty(db.Property):
"""Property compatible with db.Model classes.
Add references to blobs to domain models using BlobReferenceProperty:
class Picture(db.Model):
title = db.StringProperty()
image = blobstore.BlobReferenceProperty()
thumbnail = blobstore.BlobReferenceProperty()
To find the size of a picture using this model:
picture = Picture.get(picture_key)
print picture.image.size
BlobInfo objects are lazily loaded so iterating over models with
for BlobKeys is efficient, the following does not need to hit
Datastore for each image key:
list_of_untitled_blobs = []
for picture in Picture.gql("WHERE title=''"):
list_of_untitled_blobs.append(picture.image.key())
"""
data_type = BlobInfo
def get_value_for_datastore(self, model_instance):
"""Translate model property to datastore value."""
blob_info = getattr(model_instance, self.name)
if blob_info is None:
return None
return blob_info.key()
def make_value_from_datastore(self, value):
"""Translate datastore value to BlobInfo."""
if value is None:
return None
return BlobInfo(value)
def validate(self, value):
"""Validate that assigned value is BlobInfo.
Automatically converts from strings and BlobKey instances.
"""
if isinstance(value, (basestring)):
value = BlobInfo(BlobKey(value))
elif isinstance(value, BlobKey):
value = BlobInfo(value)
return super(BlobReferenceProperty, self).validate(value)
def fetch_data(blob, start_index, end_index):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data(blob, start_index, end_index)
class BlobReader(object):
"""Provides a read-only file-like interface to a blobstore blob."""
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def __init__(self, blob, buffer_size=131072, position=0):
"""Constructor.
Args:
blob: The blob key, blob info, or string blob key to read from.
buffer_size: The minimum size to fetch chunks of data from blobstore.
position: The initial position in the file.
"""
if hasattr(blob, 'key'):
self.__blob_key = blob.key()
self.__blob_info = blob
else:
self.__blob_key = blob
self.__blob_info = None
self.__buffer_size = buffer_size
self.__buffer = ""
self.__position = position
self.__buffer_position = 0
self.__eof = False
def __iter__(self):
"""Returns a file iterator for this BlobReader."""
return self
def __getstate__(self):
"""Returns the serialized state for this BlobReader."""
return (self.__blob_key, self.__buffer_size, self.__position)
def __setstate__(self, state):
"""Restores pickled state for this BlobReader."""
self.__init__(*state)
def close(self):
"""Close the file.
A closed file cannot be read or written any more. Any operation which
requires that the file be open will raise a ValueError after the file has
been closed. Calling close() more than once is allowed.
"""
self.__blob_key = None
def flush(self):
raise IOError("BlobReaders are read-only")
def next(self):
"""Returns the next line from the file.
Returns:
A string, terminted by \n. The last line may not be terminated by \n.
If EOF is reached, an empty string will be returned.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def __read_from_buffer(self, size):
"""Reads at most size bytes from the buffer.
Args:
size: Number of bytes to read, or negative to read the entire buffer.
Returns:
Tuple (data, size):
data: The bytes read from the buffer.
size: The remaining unread byte count.
"""
if not self.__blob_key:
raise ValueError("File is closed")
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
data = self.__buffer[self.__buffer_position:end_pos]
data_length = len(data)
size -= data_length
self.__position += data_length
self.__buffer_position += data_length
if self.__buffer_position == len(self.__buffer):
self.__buffer = ""
self.__buffer_position = 0
return data, size
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
def read(self, size=-1):
"""Read at most size bytes from the file.
Fewer bytes are read if the read hits EOF before obtaining size bytes.
If the size argument is negative or omitted, read all data until EOF is
reached. The bytes are returned as a string object. An empty string is
returned when EOF is encountered immediately.
Calling read() without a size specified is likely to be dangerous, as it
may read excessive amounts of data.
Args:
size: Optional. The maximum number of bytes to read. When omitted, read()
returns all remaining data in the file.
Returns:
The read data, as a string.
"""
data_list = []
while True:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
return ''.join(data_list)
self.__fill_buffer(size)
def readline(self, size=-1):
"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent when a
file ends with an incomplete line). If the size argument is present and
non-negative, it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned. An empty string is returned only
when EOF is encountered immediately.
Args:
size: Optional. The maximum number of bytes to read.
Returns:
The read data, as a string.
"""
data_list = []
while True:
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
newline_pos = self.__buffer.find('\n', self.__buffer_position, end_pos)
if newline_pos != -1:
data_list.append(
self.__read_from_buffer(newline_pos
- self.__buffer_position + 1)[0])
break
else:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
break
self.__fill_buffer()
return ''.join(data_list)
def readlines(self, sizehint=None):
"""Read until EOF using readline() and return a list of lines thus read.
If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes (possibly after rounding
up to an internal buffer size) are read.
Args:
sizehint: A hint as to the maximum number of bytes to read.
Returns:
A list of strings, each being a single line from the file.
"""
lines = []
while sizehint is None or sizehint > 0:
line = self.readline()
if sizehint:
sizehint -= len(line)
if not line:
break
lines.append(line)
return lines
def seek(self, offset, whence=SEEK_SET):
"""Set the file's current position, like stdio's fseek().
The whence argument is optional and defaults to os.SEEK_SET or 0 (absolute
file positioning); other values are os.SEEK_CUR or 1 (seek relative to the
current position) and os.SEEK_END or 2 (seek relative to the file's end).
Args:
offset: The relative offset to seek to.
whence: Defines what the offset is relative to. See description for
details.
"""
if whence == BlobReader.SEEK_CUR:
offset = self.__position + offset
elif whence == BlobReader.SEEK_END:
offset = self.blob_info.size + offset
self.__buffer = ""
self.__buffer_position = 0
self.__position = offset
self.__eof = False
def tell(self):
"""Return the file's current position, like stdio's ftell()."""
return self.__position
def truncate(self, size):
raise IOError("BlobReaders are read-only")
def write(self, str):
raise IOError("BlobReaders are read-only")
def writelines(self, sequence):
raise IOError("BlobReaders are read-only")
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
@property
def closed(self):
"""Returns True if this file is closed, False otherwise."""
return self.__blob_key is None
| |
from collections import OrderedDict
from functools import partial
import pytest
import numpy as np
from numpy.testing import assert_allclose
import xarray as xr
from xyzpy.gen.combo_runner import (
combo_runner,
results_to_ds,
combo_runner_to_ds,
)
from . import (
foo3_scalar,
foo3_float_bool,
foo2_array,
foo2_array_bool,
foo2_array_array,
foo2_zarray1_zarray2,
foo_array_input,
)
# --------------------------------------------------------------------------- #
# COMBO_RUNNER tests #
# --------------------------------------------------------------------------- #
_test_combos1 = (('a', [1, 2]),
('b', [10, 20, 30]),
('c', [100, 200, 300, 400]))
_test_expect1 = (np.array([1, 2]).reshape((2, 1, 1)) +
np.array([10, 20, 30]).reshape((1, 3, 1)) +
np.array([100, 200, 300, 400]).reshape((1, 1, 4)))
class TestComboRunner:
@pytest.mark.parametrize('shuffle', [False, True, 2])
def test_simple(self, shuffle):
x = combo_runner(foo3_scalar, _test_combos1, shuffle=shuffle)
assert_allclose(x, _test_expect1)
def test_progbars(self):
combo_runner(foo3_scalar, _test_combos1, verbosity=2)
def test_dict(self):
combos = OrderedDict(_test_combos1)
x = combo_runner(foo3_scalar, combos)
assert_allclose(x, _test_expect1)
def test_single_combo(self):
combos = [('a', [1, 2])]
x = combo_runner(partial(foo3_scalar, b=20, c=300), combos)
assert_allclose(x, [321, 322])
def test_single_combo_single_tuple(self):
combos = ('a', [1, 2])
constants = {'b': 20, 'c': 300}
x = combo_runner(foo3_scalar, combos, constants=constants)
assert_allclose(x, [321, 322])
def test_multires(self):
combos = _test_combos1
x, y = combo_runner(foo3_float_bool, combos, split=True)
xn = _test_expect1
yn = (np.array([1, 2]).reshape((2, 1, 1)) %
np.array([2] * 24).reshape((2, 3, 4))) == 0
assert_allclose(x, xn)
assert_allclose(y, yn)
@pytest.mark.parametrize('shuffle', [False, True, 2])
@pytest.mark.parametrize('parallel', [False, True])
@pytest.mark.parametrize('fn', (foo3_scalar,))
def test_parallel_basic(self, parallel, fn, shuffle):
x = combo_runner(fn, _test_combos1, num_workers=2, parallel=parallel,
shuffle=shuffle)
assert_allclose(x, _test_expect1)
@pytest.mark.parametrize('executor', ['cf-process', 'cf-thread',
'mp-process', 'mp-thread'])
@pytest.mark.parametrize('fn', (foo3_scalar,))
def test_executor_basic(self, executor, fn):
import concurrent.futures as cf
import multiprocessing as mp
executor = {
'cf-process': cf.ProcessPoolExecutor,
'cf-thread': cf.ThreadPoolExecutor,
'mp-process': mp.Pool,
'mp-thread': mp.pool.ThreadPool,
}[executor](2)
x = combo_runner(fn, _test_combos1, executor=executor)
assert_allclose(x, _test_expect1)
@pytest.mark.parametrize('parallel', [False, True])
def test_parallel_multires(self, parallel):
x = combo_runner(foo3_float_bool, _test_combos1, num_workers=2,
split=True, parallel=parallel)
assert_allclose(x[0], _test_expect1)
assert np.all(np.asarray(x[1])[1, ...])
@pytest.mark.parametrize('parallel', [False, True])
def test_parallel_dict(self, parallel):
combos = OrderedDict(_test_combos1)
x = [*combo_runner(foo3_scalar, combos, num_workers=2,
parallel=parallel, verbosity=2)]
assert_allclose(x, _test_expect1)
class TestCombosToDS:
def test_simple(self):
results = [1, 2, 3]
combos = [('a', [1, 2, 3])]
var_names = ['sum']
ds = results_to_ds(results, combos, var_names,
var_dims={'sum': ()}, var_coords={})
assert ds['sum'].data.dtype == int
class TestComboRunnerToDS:
@pytest.mark.parametrize('shuffle', [False, True, 2])
def test_basic(self, shuffle):
combos = _test_combos1
ds = combo_runner_to_ds(foo3_scalar, combos, var_names=['bananas'],
shuffle=shuffle)
assert ds.sel(a=2, b=30, c=400)['bananas'].data == 432
def test_multiresult(self):
ds = combo_runner_to_ds(foo3_float_bool, _test_combos1,
var_names=['bananas', 'cakes'])
assert ds.bananas.data.dtype == int
assert ds.cakes.data.dtype == bool
assert ds.sel(a=2, b=30, c=400)['bananas'].data == 432
assert ds.sel(a=1, b=10, c=100)['bananas'].data == 111
assert ds.sel(a=2, b=30, c=400)['cakes'].data
assert not ds.sel(a=1, b=10, c=100)['cakes'].data
def test_arrayresult(self):
combos = (('a', [1, 2]),
('b', [10, 20, 30]))
ds = combo_runner_to_ds(foo2_array, combos,
var_names='bananas',
var_dims={'bananas': ['sugar']},
var_coords={'sugar': [*range(10)]})
assert ds.bananas.data.dtype == float
assert_allclose(ds.sel(a=2, b=30)['bananas'].data,
[32.0, 32.1, 32.2, 32.3, 32.4,
32.5, 32.6, 32.7, 32.8, 32.9])
def test_array_and_single_result(self):
combos = (('a', [1, 2]),
('b', [10, 20, 30]))
ds = combo_runner_to_ds(foo2_array_bool, combos,
var_names=['bananas', 'ripe'],
var_dims=(['sugar'], []),
var_coords={'sugar': [*range(10, 20)]})
assert ds.ripe.data.dtype == bool
assert ds.sel(a=2, b=30, sugar=14)['bananas'].data == 32.4
with pytest.raises((KeyError, ValueError)):
ds['ripe'].sel(sugar=12)
def test_single_string_var_names_with_no_var_dims(self):
combos = ('a', [1, 2, 3])
ds = combo_runner_to_ds(foo3_scalar, combos,
constants={'b': 10, 'c': 100},
var_names='sum')
assert_allclose(ds['sum'].data, np.array([111, 112, 113]))
def test_double_array_return_with_same_dimensions(self):
combos = (('a', [1, 2]),
('b', [10, 20, 30]))
ds = combo_runner_to_ds(foo2_array_array, combos,
var_names=['apples', 'oranges'],
var_dims={('apples', 'oranges'): ['seeds']},
var_coords={'seeds': [*range(5)]})
assert ds.oranges.data.dtype == int
assert_allclose(ds.sel(a=2, b=30).apples.data, [30, 32, 34, 36, 38])
assert_allclose(ds.sel(a=2, b=30).oranges.data, [30, 28, 26, 24, 22])
assert 'seeds' in ds.apples.coords
assert 'seeds' in ds.oranges.coords
def test_double_array_return_with_no_given_dimensions(self):
ds = combo_runner_to_ds(foo2_array_array,
combos=[('a', [1, 2]),
('b', [30, 40])],
var_names=['array1', 'array2'],
var_dims=[['auto'], ['auto']])
assert (ds['auto'].data.dtype == int or
ds['auto'].data.dtype == np.int64)
assert_allclose(ds['auto'].data, [0, 1, 2, 3, 4])
def test_complex_output(self):
ds = combo_runner_to_ds(foo2_zarray1_zarray2,
combos=[('a', [1, 2]),
('b', [30, 40])],
var_names=['array1', 'array2'],
var_dims=[['auto'], ['auto']])
assert ds['array1'].data.size == 2 * 2 * 5
assert ds['array2'].data.size == 2 * 2 * 5
assert ds['array1'].data.dtype == complex
assert ds['array2'].data.dtype == complex
assert_allclose(ds['array1'].sel(a=2, b=30).data,
32 + np.arange(5) * 0.1j)
assert_allclose(ds['array2'].sel(a=2, b=30).data,
32 - np.arange(5) * 0.1j)
def test_constants_to_attrs(self):
ds = combo_runner_to_ds(foo3_scalar,
combos=[('a', [1, 2, 3]),
('c', [100, 200, 300])],
constants={'b': 20},
var_names='x')
assert ds.attrs['b'] == 20
def test_const_array_to_coord(self):
ds = combo_runner_to_ds(foo_array_input,
combos=[('a', [1, 2, 3])],
constants={'t': [10, 20, 30]},
var_names=['x'],
var_dims=[['t']])
assert 't' in ds.dims
assert 't' not in ds.attrs
def test_when_results_are_xobjs(self):
def fn_ds(a, b):
ds = xr.Dataset(
coords={
'x': [1, 2],
'y': ['foo', 'bar', 'baz'],
},
data_vars={
'apples': (['x', 'y'], np.tile(10 * a + b, (2, 3))),
'lemons': ('y', np.tile(10 * a + b, (3,))),
}
)
return ds
fds = combo_runner_to_ds(
fn_ds,
combos={
'a': [2, 3, 4, 5],
'b': [6, 7, 8, 9, 10],
}, var_names=None)
assert fds.sel(a=4, b=9, x=2, y='bar')['apples'].values == 49
assert fds.sel(a=5, b=7, y='bar')['lemons'].values == 57
assert 'output' not in fds
| |
from django.shortcuts import render
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core import serializers
from django.core.exceptions import PermissionDenied
from django.forms import model_to_dict
from django.template import RequestContext
from django.db.models import Q
from models import EventBase
from models import EventVersion
from models import TagBase
from models import TagVersion
from models import Language
from Timeline.util.Permissions import require_permission
import math
import re
class Query:
# Show a filterable list of all events
@staticmethod
def getQuery(request):
if 'q' in request.GET:
return request.GET['q']
"""
@staticmethod
def parseTagQuery(tagQuery):
# (2a4)o5
# 2a(4o5)
# (2a(4o5))
# ((2o3)a(4o5))
numeric = re.compile("^\d$")
AND = "a"
OR = "o"
def eatPart(queryPart):
currentQ = None
nextOp = None
currentId = ""
length = len(queryPart)
i = 0
while i < length:
endId = True
char = queryPart[i]
if numeric.match(char):
currentId += char
endId = False
elif char == AND or char == OR:
nextOp = char
last = i == length-1
if endId or last:
if len(currentId) > 0:
id = int(currentId)
currentId = ""
q = Q(eventversion__tags__id=id)
if currentQ is None:
currentQ = q
elif nextOp == AND:
currentQ = currentQ & q
elif nextOp == OR:
currentQ = currentQ | q
if char == ")" or last:
return (currentQ, queryPart[i:])
if char == "(":
(q, queryPart) = eatPart(queryPart[i+1:])
i = 0
length = len(queryPart)
if currentQ is None:
currentQ = q
elif nextOp == AND:
currentQ = currentQ & q
elif nextOp == OR:
currentQ = currentQ | q
i += 1
return (currentQ, queryPart[i:])
(q, p) = eatPart(tagQuery)
return q
"""
@staticmethod
def listLanguages():
return Language.objects.order_by('indexing').all()
@staticmethod
def hasLanguage(code):
languages = Query.listLanguages()
for lang in languages:
if lang.code == code:
return True
return False
@staticmethod
def filterByTags(dbPath, tagQuery):
sep = "a"
numeric = re.compile("^\d$")
for id in tagQuery.split(sep):
if numeric.match(id):
dbPath = dbPath.filter(eventversion__tags__id=id)
return dbPath
@staticmethod
def listEvents(request, shorten):
dbPath = EventBase.objects
cullDisabled = True
if not 'f' in request.GET or request.GET['f'] == 'html':
cullDisabled = False
if 'incDisabled' in request.GET:
cullDisabled = request.GET['incDisabled'] == False
if cullDisabled:
dbPath = dbPath.filter(enabled=True)
if 'key' in request.GET:
key = request.GET['key']
if key.startswith("~"):
key = key.lstrip("~")
dbPath = dbPath.filter(key__icontains=key)
else:
dbPath = dbPath.filter(key__iexact=key)
if 'title' in request.GET:
title = request.GET['title']
if title.startswith("~"):
title = title.lstrip("~")
dbPath = dbPath.filter(eventversion__title__icontains=title)
else:
dbPath = dbPath.filter(eventversion__title__iexact=title)
if 'q' in request.GET:
query = Query.getQuery(request)
dbPath = dbPath.filter(Q(key__icontains=query) | Q(eventversion__title__icontains=query) | Q(eventversion__text__icontains=query))
if 't' in request.GET:
dbPath = Query.filterByTags(dbPath, request.GET['t'])
if 'l' in request.GET and request.GET['l'] != '':
dbPath = dbPath.filter(language__code=request.GET['l'])
dbPath = dbPath.distinct()
pag = Query.pagination(request, dbPath)
rawevents = dbPath.order_by("key")[pag['offset']: pag['offset'] + pag['limit']]
events = []
for event in rawevents:
version = event.getCurrentVersion()
if version is not None:
events.append({'id': event.id,
'key': event.key,
'title': version.title,
'text': version.text,
'enabled': event.enabled,
'year': version.getYear(),
'day': version.getDay(),
'language': event.language.code if shorten else event.language,
'wiki': version.wiki
})
return (events, pag)
# Create an event
@staticmethod
def createEvent(request):
require_permission(request.user, "Timeline_data.add_eventbase")
key = request.POST['key']
languageCode = request.POST['language']
if EventBase.objects.filter(key=key, language__code=languageCode).count() == 0:
event = EventBase()
event.key = key
event.language = Language.objects.get(code=languageCode)
event.save()
try:
eventVersion = event.addVersion(request.POST, 'publish' in request.POST)
return event
except Exception as e:
event.delete()
raise e
else:
raise Exception("Event with key '%s' already exists for language '%s'" % (key, languageCode))
# Show an event (POST for saving changes)
@staticmethod
def updateEvent(request, eventId, revision=None):
event = EventBase.objects.filter(id=eventId).get()
version = None
permissions = request.user.get_all_permissions()
if event is not None and request.method == "POST":
if 'enabled' in request.POST:
require_permission(request.user, "Timeline_data.change_eventbase")
event.toggleEnabled()
version = event.getCurrentVersion()
if 'current' in request.POST:
require_permission(request.user, "Timeline_data.change_eventbase")
event.setPublicRevision(request.POST['current'])
version = event.getCurrentVersion()
elif 'save' in request.POST:
require_permission(request.user, "Timeline_data.add_eventversion")
version = event.addVersion(request.POST, False)
elif 'publish' in request.POST:
require_permission(request.user, "Timeline_data.add_eventversion")
version = event.addVersion(request.POST, True)
elif 'deleteVersion' in request.POST:
require_permission(request.user, "Timeline_data.delete_eventversion")
if revision is None and 'revision' in request.POST:
revision = request.POST['revision']
if revision is not None:
event.deleteVersion(revision)
elif 'deleteEvent' in request.POST:
require_permission(request.user, "Timeline_data.delete_eventbase")
event.delete()
return (None,None)
return (event, version)
@staticmethod
def deleteEvent(request, eventId):
require_permission(request.user, "Timeline_data.delete_eventbase")
event = EventBase.objects.filter(id=eventId).get()
event.delete()
@staticmethod
def deleteEventVersion(request, eventId, revision):
require_permission(request.user, "Timeline_data.delete_eventversion")
event = EventBase.objects.filter(id=eventId).get()
if revision is not None:
event.deleteVersion(revision)
return event
#------------------------------------------------------------------------------
# Show a filterable list of all tags
@staticmethod
def listTags(request, shorten):
dbPath = TagBase.objects
if 'q' in request.GET:
query = Query.getQuery(request)
dbPath = dbPath.filter(Q(key__icontains=query) | Q(tagversion__title__icontains=query))
if 'key' in request.GET:
key = request.GET['key']
if key.startswith("~"):
key = key.lstrip("~")
dbPath = dbPath.filter(key__icontains=key)
else:
dbPath = dbPath.filter(key__iexact=key)
if 'title' in request.GET:
title = request.GET['title']
if title.startswith("~"):
title = title.lstrip("~")
dbPath = dbPath.filter(tagversion__title__icontains=title)
else:
dbPath = dbPath.filter(tagversion__title__iexact=title)
if 'l' in request.GET:
dbPath = dbPath.filter(language__code=request.GET['l'])
dbPath = dbPath.distinct()
pag = Query.pagination(request, dbPath)
rawtags = dbPath.order_by("key")[pag['offset'] : pag['offset'] + pag['limit']]
tags = []
for tag in rawtags:
version = tag.getCurrentVersion()
tags.append({
'id': tag.id,
'key': tag.key,
'title': version.title,
'key': tag.key,
'enabled': tag.enabled,
'language': tag.language.code if shorten else tag.language
});
return (tags,pag)
# Create an event (POST for saving changes)
@staticmethod
def createTag(request):
require_permission(request.user, "Timeline_data.add_tagbase")
key = request.POST['key']
languageCode = request.POST['language']
if TagBase.objects.filter(key=key, language__code=languageCode).count() == 0:
tag = TagBase()
tag.key = key
tag.language = Language.objects.get(code=languageCode)
tag.save()
tagVersion = tag.addVersion(request.POST['title'], True)
return tag
else:
raise Exception("Tag with key '%s' already exists for language '%s'" % (key, languageCode))
# Show an event (POST for saving changes)
@staticmethod
def updateTag(request, tagId, revision=None):
tag = TagBase.objects.filter(id=tagId).get()
version = None
if request.method == "POST":
if 'enabled' in request.POST:
require_permission(request.user, "Timeline_data.change_tagbase")
tag.toggleEnabled()
if 'current' in request.POST:
require_permission(request.user, "Timeline_data.change_tagbase")
revision = request.POST['current']
tag.setPublicRevision(revision)
elif 'save' in request.POST:
require_permission(request.user, "Timeline_data.add_tagversion")
version = tag.addVersion(request.POST['title'], False)
elif 'publish' in request.POST:
require_permission(request.user, "Timeline_data.add_tagversion")
version = tag.addVersion(request.POST['title'], True)
elif 'deleteVersion' in request.POST:
require_permission(request.user, "Timeline_data.delete_tagversion")
if revision is None and 'revision' in request.POST:
revision = request.POST['revision']
if revision is not None:
tag.deleteVersion(revision)
elif 'deleteTag' in request.POST:
require_permission(request.user, "Timeline_data.delete_tagbase")
tag.delete()
return (None,None)
return (tag,version)
@staticmethod
def deleteTag(request, tagId):
require_permission(request.user, "Timeline_data.delete_tagbase")
tag = TagBase.objects.filter(id=tagId).get()
tag.delete()
@staticmethod
def deleteTagVersion(request, tagId, revision):
require_permission(request.user, "Timeline_data.delete_tagversion")
tag = EventBase.objects.filter(id=tagId).get()
if revision is not None:
tag.deleteVersion(revision)
return tag
#------------------------------------------------------------------------------
@staticmethod
def pagination(request, dbPath):
try:
page = int(request.GET['p'])
except:
page = 1
limit = 10
offset = (page - 1) * limit
lastpage = int(math.ceil(dbPath.count() / limit))
return {
"offset": offset,
"limit": limit,
"current": page,
"prev": page - 1 if page > 1 else None,
"next": page + 1 if page < lastpage else None,
"first": 1 if page > 1 else None,
"last": lastpage if page < lastpage else None
}
| |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import socket
import time
import warnings
from pathlib import Path
from typing import Dict, List, Union
from zipfile import ZipFile
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from huggingface_hub.hf_api import list_models
from transformers import MarianConfig, MarianMTModel, MarianTokenizer
def remove_suffix(text: str, suffix: str):
if text.endswith(suffix):
return text[: -len(suffix)]
return text # or whatever
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text # or whatever
def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict):
sd = {}
for k in opus_dict:
if not k.startswith(layer_prefix):
continue
stripped = remove_prefix(k, layer_prefix)
v = opus_dict[k].T # besides embeddings, everything must be transposed.
sd[converter[stripped]] = torch.tensor(v).squeeze()
return sd
def load_layers_(layer_lst: nn.ModuleList, opus_state: dict, converter, is_decoder=False):
for i, layer in enumerate(layer_lst):
layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_"
sd = convert_encoder_layer(opus_state, layer_tag, converter)
layer.load_state_dict(sd, strict=True)
def find_pretrained_model(src_lang: str, tgt_lang: str) -> List[str]:
"""Find models that can accept src_lang as input and return tgt_lang as output."""
prefix = "Helsinki-NLP/opus-mt-"
model_list = list_models()
model_ids = [x.modelId for x in model_list if x.modelId.startswith("Helsinki-NLP")]
src_and_targ = [
remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m
] # + cant be loaded.
matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b]
return matching
def add_emb_entries(wemb, final_bias, n_special_tokens=1):
vsize, d_model = wemb.shape
embs_to_add = np.zeros((n_special_tokens, d_model))
new_embs = np.concatenate([wemb, embs_to_add])
bias_to_add = np.zeros((n_special_tokens, 1))
new_bias = np.concatenate((final_bias, bias_to_add), axis=1)
return new_embs, new_bias
def _cast_yaml_str(v):
bool_dct = {"true": True, "false": False}
if not isinstance(v, str):
return v
elif v in bool_dct:
return bool_dct[v]
try:
return int(v)
except (TypeError, ValueError):
return v
def cast_marian_config(raw_cfg: Dict[str, str]) -> Dict:
return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()}
CONFIG_KEY = "special:model.yml"
def load_config_from_state_dict(opus_dict):
import yaml
cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]])
yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader)
return cast_marian_config(yaml_cfg)
def find_model_file(dest_dir): # this one better
model_files = list(Path(dest_dir).glob("*.npz"))
if len(model_files) != 1:
raise ValueError(f"Found more than one model file: {model_files}")
model_file = model_files[0]
return model_file
# Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE
ROM_GROUP = (
"fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT"
"+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co"
"+nap+scn+vec+sc+ro+la"
)
GROUPS = [
("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"),
(ROM_GROUP, "ROMANCE"),
("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"),
("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"),
("se+sma+smj+smn+sms", "SAMI"),
("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"),
("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages
]
GROUP_TO_OPUS_NAME = {
"opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de",
"opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv",
"opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv",
"opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv",
"opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi",
"opus-mt-en-ROMANCE": "en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la",
"opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv",
"opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms",
"opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-ROMANCE-en": "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en",
"opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en",
"opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
}
OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/"
ORG_NAME = "Helsinki-NLP/"
def convert_opus_name_to_hf_name(x):
"""For OPUS-MT-Train/ DEPRECATED"""
for substr, grp_name in GROUPS:
x = x.replace(substr, grp_name)
return x.replace("+", "_")
def convert_hf_name_to_opus_name(hf_model_name):
"""
Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME.
"""
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
if hf_model_name in GROUP_TO_OPUS_NAME:
opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name]
else:
opus_w_prefix = hf_model_name.replace("_", "+")
return remove_prefix(opus_w_prefix, "opus-mt-")
def get_system_metadata(repo_root):
import git
return dict(
helsinki_git_sha=git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha,
transformers_git_sha=git.Repo(path=".", search_parent_directories=True).head.object.hexsha,
port_machine=socket.gethostname(),
port_time=time.strftime("%Y-%m-%d-%H:%M"),
)
# docstyle-ignore
FRONT_MATTER_TEMPLATE = """---
language:
{}
tags:
- translation
license: apache-2.0
---
"""
DEFAULT_REPO = "Tatoeba-Challenge"
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models")
def write_model_card(
hf_model_name: str,
repo_root=DEFAULT_REPO,
save_dir=Path("marian_converted"),
dry_run=False,
extra_metadata={},
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if repo_root not in ("OPUS-MT-train", "Tatoeba-Challenge"):
raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
if not (opus_readme_path.exists()):
raise ValueError(f"Readme file {opus_readme_path} not found")
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": hf_model_name,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with opus markdown
extra_markdown = (
f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: "
f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
)
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
print(splat[3])
content = "*".join(splat)
content = (
FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"])
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original weights")
)
items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = save_dir / f"opus-mt-{hf_model_name}"
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
# if dry_run:
return content, metadata
def make_registry(repo_path="Opus-MT-train/models"):
if not (Path(repo_path) / "fr-en" / "README.md").exists():
raise ValueError(
f"repo_path:{repo_path} does not exist: "
"You must run: git clone git@github.com:Helsinki-NLP/Opus-MT-train.git before calling."
)
results = {}
for p in Path(repo_path).iterdir():
n_dash = p.name.count("-")
if n_dash == 0:
continue
else:
lns = list(open(p / "README.md").readlines())
results[p.name] = _parse_readme(lns)
return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()]
def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")):
"""Requires 300GB"""
save_dir = Path("marian_ckpt")
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
save_paths = []
if model_list is None:
model_list: list = make_registry(repo_path=repo_path)
for k, prepro, download, test_set_url in tqdm(model_list):
if "SentencePiece" not in prepro: # dont convert BPE models.
continue
if not os.path.exists(save_dir / k):
download_and_unzip(download, save_dir / k)
pair_name = convert_opus_name_to_hf_name(k)
convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}")
save_paths.append(dest_dir / f"opus-mt-{pair_name}")
return save_paths
def lmap(f, x) -> List:
return list(map(f, x))
def fetch_test_set(test_set_url):
import wget
fname = wget.download(test_set_url, "opus_test.txt")
lns = Path(fname).open().readlines()
src = lmap(str.strip, lns[::4])
gold = lmap(str.strip, lns[1::4])
mar_model = lmap(str.strip, lns[2::4])
if not (len(gold) == len(mar_model) == len(src)):
raise ValueError(f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched")
os.remove(fname)
return src, mar_model, gold
def convert_whole_dir(path=Path("marian_ckpt/")):
for subdir in tqdm(list(path.ls())):
dest_dir = f"marian_converted/{subdir.name}"
if (dest_dir / "pytorch_model.bin").exists():
continue
convert(source_dir, dest_dir)
def _parse_readme(lns):
"""Get link and metadata from opus model card equivalent."""
subres = {}
for ln in [x.strip() for x in lns]:
if not ln.startswith("*"):
continue
ln = ln[1:].strip()
for k in ["download", "dataset", "models", "model", "pre-processing"]:
if ln.startswith(k):
break
else:
continue
if k in ["dataset", "model", "pre-processing"]:
splat = ln.split(":")
_, v = splat
subres[k] = v
elif k == "download":
v = ln.split("(")[-1][:-1]
subres[k] = v
return subres
def save_tokenizer_config(dest_dir: Path):
dname = dest_dir.name.split("-")
dct = dict(target_lang=dname[-1], source_lang="-".join(dname[:-1]))
save_json(dct, dest_dir / "tokenizer_config.json")
def add_to_vocab_(vocab: Dict[str, int], special_tokens: List[str]):
start = max(vocab.values()) + 1
added = 0
for tok in special_tokens:
if tok in vocab:
continue
vocab[tok] = start + added
added += 1
return added
def find_vocab_file(model_dir):
return list(model_dir.glob("*vocab.yml"))[0]
def add_special_tokens_to_vocab(model_dir: Path) -> None:
vocab = load_yaml(find_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
print(f"added {num_added} tokens to vocab")
save_json(vocab, model_dir / "vocab.json")
save_tokenizer_config(model_dir)
def check_equal(marian_cfg, k1, k2):
v1, v2 = marian_cfg[k1], marian_cfg[k2]
if v1 != v2:
raise ValueError(f"hparams {k1},{k2} differ: {v1} != {v2}")
def check_marian_cfg_assumptions(marian_cfg):
assumed_settings = {
"tied-embeddings-all": True,
"layer-normalization": False,
"right-left": False,
"transformer-ffn-depth": 2,
"transformer-aan-depth": 2,
"transformer-no-projection": False,
"transformer-postprocess-emb": "d",
"transformer-postprocess": "dan", # Dropout, add, normalize
"transformer-preprocess": "",
"type": "transformer",
"ulr-dim-emb": 0,
"dec-cell-base-depth": 2,
"dec-cell-high-depth": 1,
"transformer-aan-nogate": False,
}
for k, v in assumed_settings.items():
actual = marian_cfg[k]
if actual != v:
raise ValueError(f"Unexpected config value for {k} expected {v} got {actual}")
check_equal(marian_cfg, "transformer-ffn-activation", "transformer-aan-activation")
check_equal(marian_cfg, "transformer-ffn-depth", "transformer-aan-depth")
check_equal(marian_cfg, "transformer-dim-ffn", "transformer-dim-aan")
BIAS_KEY = "decoder_ff_logit_out_b"
BART_CONVERTER = { # for each encoder and decoder layer
"self_Wq": "self_attn.q_proj.weight",
"self_Wk": "self_attn.k_proj.weight",
"self_Wv": "self_attn.v_proj.weight",
"self_Wo": "self_attn.out_proj.weight",
"self_bq": "self_attn.q_proj.bias",
"self_bk": "self_attn.k_proj.bias",
"self_bv": "self_attn.v_proj.bias",
"self_bo": "self_attn.out_proj.bias",
"self_Wo_ln_scale": "self_attn_layer_norm.weight",
"self_Wo_ln_bias": "self_attn_layer_norm.bias",
"ffn_W1": "fc1.weight",
"ffn_b1": "fc1.bias",
"ffn_W2": "fc2.weight",
"ffn_b2": "fc2.bias",
"ffn_ffn_ln_scale": "final_layer_norm.weight",
"ffn_ffn_ln_bias": "final_layer_norm.bias",
# Decoder Cross Attention
"context_Wk": "encoder_attn.k_proj.weight",
"context_Wo": "encoder_attn.out_proj.weight",
"context_Wq": "encoder_attn.q_proj.weight",
"context_Wv": "encoder_attn.v_proj.weight",
"context_bk": "encoder_attn.k_proj.bias",
"context_bo": "encoder_attn.out_proj.bias",
"context_bq": "encoder_attn.q_proj.bias",
"context_bv": "encoder_attn.v_proj.bias",
"context_Wo_ln_scale": "encoder_attn_layer_norm.weight",
"context_Wo_ln_bias": "encoder_attn_layer_norm.bias",
}
class OpusState:
def __init__(self, source_dir, eos_token_id=0):
npz_path = find_model_file(source_dir)
self.state_dict = np.load(npz_path)
cfg = load_config_from_state_dict(self.state_dict)
if cfg["dim-vocabs"][0] != cfg["dim-vocabs"][1]:
raise ValueError
if "Wpos" in self.state_dict:
raise ValueError("Wpos key in state dictionary")
self.state_dict = dict(self.state_dict)
self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1)
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
# self.state_dict['Wemb'].sha
self.state_keys = list(self.state_dict.keys())
if "Wtype" in self.state_dict:
raise ValueError("Wtype key in state dictionary")
self._check_layer_entries()
self.source_dir = source_dir
self.cfg = cfg
hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape
if hidden_size != 512 or cfg["dim-emb"] != 512:
raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched or not 512")
# Process decoder.yml
decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml"))
check_marian_cfg_assumptions(cfg)
self.hf_config = MarianConfig(
vocab_size=cfg["vocab_size"],
decoder_layers=cfg["dec-depth"],
encoder_layers=cfg["enc-depth"],
decoder_attention_heads=cfg["transformer-heads"],
encoder_attention_heads=cfg["transformer-heads"],
decoder_ffn_dim=cfg["transformer-dim-ffn"],
encoder_ffn_dim=cfg["transformer-dim-ffn"],
d_model=cfg["dim-emb"],
activation_function=cfg["transformer-aan-activation"],
pad_token_id=self.pad_token_id,
eos_token_id=eos_token_id,
forced_eos_token_id=eos_token_id,
bos_token_id=0,
max_position_embeddings=cfg["dim-emb"],
scale_embedding=True,
normalize_embedding="n" in cfg["transformer-preprocess"],
static_position_embeddings=not cfg["transformer-train-position-embeddings"],
dropout=0.1, # see opus-mt-train repo/transformer-dropout param.
# default: add_final_layer_norm=False,
num_beams=decoder_yml["beam-size"],
decoder_start_token_id=self.pad_token_id,
bad_words_ids=[[self.pad_token_id]],
max_length=512,
)
def _check_layer_entries(self):
self.encoder_l1 = self.sub_keys("encoder_l1")
self.decoder_l1 = self.sub_keys("decoder_l1")
self.decoder_l2 = self.sub_keys("decoder_l2")
if len(self.encoder_l1) != 16:
warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}")
if len(self.decoder_l1) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
if len(self.decoder_l2) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
@property
def extra_keys(self):
extra = []
for k in self.state_keys:
if (
k.startswith("encoder_l")
or k.startswith("decoder_l")
or k in [CONFIG_KEY, "Wemb", "Wpos", "decoder_ff_logit_out_b"]
):
continue
else:
extra.append(k)
return extra
def sub_keys(self, layer_prefix):
return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)]
def load_marian_model(self) -> MarianMTModel:
state_dict, cfg = self.state_dict, self.hf_config
if not cfg.static_position_embeddings:
raise ValueError("config.static_position_embeddings should be True")
model = MarianMTModel(cfg)
if "hidden_size" in cfg.to_dict():
raise ValueError("hidden_size is in config")
load_layers_(
model.model.encoder.layers,
state_dict,
BART_CONVERTER,
)
load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True)
# handle tensors not associated with layers
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.shared.weight = wemb_tensor
model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared
model.final_logits_bias = bias_tensor
if "Wpos" in state_dict:
print("Unexpected: got Wpos")
wpos_tensor = torch.tensor(state_dict["Wpos"])
model.model.encoder.embed_positions.weight = wpos_tensor
model.model.decoder.embed_positions.weight = wpos_tensor
if cfg.normalize_embedding:
if not ("encoder_emb_ln_scale_pre" in state_dict):
raise ValueError("encoder_emb_ln_scale_pre is not in state dictionary")
raise NotImplementedError("Need to convert layernorm_embedding")
if self.extra_keys:
raise ValueError(f"Failed to convert {self.extra_keys}")
if model.model.shared.padding_idx != self.pad_token_id:
raise ValueError(f"Padding tokens {model.model.shared.padding_idx} and {self.pad_token_id} mismatched")
return model
def download_and_unzip(url, dest_dir):
try:
import wget
except ImportError:
raise ImportError("you must pip install wget")
filename = wget.download(url)
unzip(filename, dest_dir)
os.remove(filename)
def convert(source_dir: Path, dest_dir):
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
add_special_tokens_to_vocab(source_dir)
tokenizer = MarianTokenizer.from_pretrained(str(source_dir))
tokenizer.save_pretrained(dest_dir)
# retrieve EOS token and set correctly
tokenizer_has_eos_token_id = hasattr(tokenizer, "eos_token_id") and tokenizer.eos_token_id is not None
eos_token_id = tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0
opus_state = OpusState(source_dir, eos_token_id=eos_token_id)
if opus_state.cfg["vocab_size"] != len(tokenizer.encoder):
raise ValueError(
f"Original vocab size {opus_state.cfg['vocab_size']} and new vocab size {len(tokenizer.encoder)} mismatched"
)
# save_json(opus_state.cfg, dest_dir / "marian_original_config.json")
# ^^ Uncomment to save human readable marian config for debugging
model = opus_state.load_marian_model()
model = model.half()
model.save_pretrained(dest_dir)
model.from_pretrained(dest_dir) # sanity check
def load_yaml(path):
import yaml
with open(path) as f:
return yaml.load(f, Loader=yaml.BaseLoader)
def save_json(content: Union[Dict, List], path: str) -> None:
with open(path, "w") as f:
json.dump(content, f)
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, "r") as zipObj:
zipObj.extractall(dest_dir)
if __name__ == "__main__":
"""
Tatoeba conversion instructions in scripts/tatoeba/README.md
"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src", type=str, help="path to marian model sub dir", default="en-de")
parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.")
args = parser.parse_args()
source_dir = Path(args.src)
if not source_dir.exists():
raise ValueError(f"Source directory {source_dir} not found")
dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest
convert(source_dir, dest_dir)
| |
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramz.core.constrainable nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from .indexable import Indexable
from ..transformations import Transformation,Logexp, NegativeLogexp, Logistic, __fixed__, FIXED, UNFIXED
class Constrainable(Indexable):
def __init__(self, name, default_constraint=None, *a, **kw):
super(Constrainable, self).__init__(name=name)
self._default_constraint_ = default_constraint
from .index_operations import ParameterIndexOperations
self.add_index_operation('constraints', ParameterIndexOperations())
if self._default_constraint_ is not None:
self.constrain(self._default_constraint_)
# def __setstate__(self, state):
# super(Constrainable, self).__setstate__(state)
# #from .index_operations import ParameterIndexOperations
# #self.add_index_operation('constraints', ParameterIndexOperations())
# #self._index_operations['constraints'] = self.constraints
#===========================================================================
# Fixing Parameters:
#===========================================================================
def constrain_fixed(self, value=None, warning=True, trigger_parent=True):
"""
Constrain this parameter to be fixed to the current value it carries.
This does not override the previous constraints, so unfixing will
restore the constraint set before fixing.
:param warning: print a warning for overwriting constraints.
"""
if value is not None:
self[:] = value
#index = self.unconstrain()
index = self._add_to_index_operations(self.constraints, np.empty(0), __fixed__, warning)
self._highest_parent_._set_fixed(self, index)
self.notify_observers(self, None if trigger_parent else -np.inf)
return index
fix = constrain_fixed
def unconstrain_fixed(self):
"""
This parameter will no longer be fixed.
If there was a constraint on this parameter when fixing it,
it will be constraint with that previous constraint.
"""
unconstrained = self.unconstrain(__fixed__)
self._highest_parent_._set_unfixed(self, unconstrained)
#if self._default_constraint_ is not None:
# return self.constrain(self._default_constraint_)
return unconstrained
unfix = unconstrain_fixed
def _ensure_fixes(self):
# Ensure that the fixes array is set:
# Parameterized: ones(self.size)
# Param: ones(self._realsize_
if (not hasattr(self, "_fixes_")) or (self._fixes_ is None) or (self._fixes_.size != self.size):
self._fixes_ = np.ones(self.size, dtype=bool)
self._fixes_[self.constraints[__fixed__]] = FIXED
def _set_fixed(self, param, index):
self._ensure_fixes()
offset = self._offset_for(param)
self._fixes_[index+offset] = FIXED
if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED
def _set_unfixed(self, param, index):
self._ensure_fixes()
offset = self._offset_for(param)
self._fixes_[index+offset] = UNFIXED
if np.all(self._fixes_): self._fixes_ = None # ==UNFIXED
def _connect_fixes(self):
fixed_indices = self.constraints[__fixed__]
if fixed_indices.size > 0:
self._ensure_fixes()
self._fixes_[:] = UNFIXED
self._fixes_[fixed_indices] = FIXED
else:
self._fixes_ = None
del self.constraints[__fixed__]
#===========================================================================
# Convenience for fixed
#===========================================================================
def _has_fixes(self):
return self.constraints[__fixed__].size != 0
@property
def is_fixed(self):
for p in self.parameters:
if not p.is_fixed: return False
return True
def _get_original(self, param):
# if advanced indexing is activated it happens that the array is a copy
# you can retrieve the original param through this method, by passing
# the copy here
return self.parameters[param._parent_index_]
#===========================================================================
# Constrain operations -> done
#===========================================================================
def constrain(self, transform, warning=True, trigger_parent=True):
"""
:param transform: the :py:class:`paramz.transformations.Transformation`
to constrain the this parameter to.
:param warning: print a warning if re-constraining parameters.
Constrain the parameter to the given
:py:class:`paramz.transformations.Transformation`.
"""
if isinstance(transform, Transformation):
self.param_array[...] = transform.initialize(self.param_array)
elif transform == __fixed__:
return self.fix(warning=warning, trigger_parent=trigger_parent)
else:
raise ValueError('Can only constrain with paramz.transformations.Transformation object')
reconstrained = self.unconstrain()
added = self._add_to_index_operations(self.constraints, reconstrained, transform, warning)
self.trigger_update(trigger_parent)
return added
def unconstrain(self, *transforms):
"""
:param transforms: The transformations to unconstrain from.
remove all :py:class:`paramz.transformations.Transformation`
transformats of this parameter object.
"""
return self._remove_from_index_operations(self.constraints, transforms)
def constrain_positive(self, warning=True, trigger_parent=True):
"""
:param warning: print a warning if re-constraining parameters.
Constrain this parameter to the default positive constraint.
"""
self.constrain(Logexp(), warning=warning, trigger_parent=trigger_parent)
def constrain_negative(self, warning=True, trigger_parent=True):
"""
:param warning: print a warning if re-constraining parameters.
Constrain this parameter to the default negative constraint.
"""
self.constrain(NegativeLogexp(), warning=warning, trigger_parent=trigger_parent)
def constrain_bounded(self, lower, upper, warning=True, trigger_parent=True):
"""
:param lower, upper: the limits to bound this parameter to
:param warning: print a warning if re-constraining parameters.
Constrain this parameter to lie within the given range.
"""
self.constrain(Logistic(lower, upper), warning=warning, trigger_parent=trigger_parent)
def unconstrain_positive(self):
"""
Remove positive constraint of this parameter.
"""
self.unconstrain(Logexp())
def unconstrain_negative(self):
"""
Remove negative constraint of this parameter.
"""
self.unconstrain(NegativeLogexp())
def unconstrain_bounded(self, lower, upper):
"""
:param lower, upper: the limits to unbound this parameter from
Remove (lower, upper) bounded constrain from this parameter/
"""
self.unconstrain(Logistic(lower, upper))
| |
"""
Unittest for Vector class.
"""
import unittest
from numpy import pi, arange, array
from crystalpy.util.Vector import Vector
class VectorTest(unittest.TestCase):
def testConstructor(self):
vector = Vector(1, 2, 3)
self.assertIsInstance(vector, Vector)
self.assertAlmostEqual(vector.components()[0],
1)
self.assertAlmostEqual(vector.components()[1],
2)
self.assertAlmostEqual(vector.components()[2],
3)
def testFromComponents(self):
vector = Vector.initializeFromComponents(array([11, -2, 23]))
self.assertAlmostEqual(vector.components()[0],
11)
self.assertAlmostEqual(vector.components()[1],
-2)
self.assertAlmostEqual(vector.components()[2],
23)
def testSetComponents(self):
vector = Vector(1, 2, 3)
vector.setComponents(-3.0, -4.0, 0.0)
self.assertAlmostEqual(vector.components()[0],
-3.0)
self.assertAlmostEqual(vector.components()[1],
-4.0)
self.assertAlmostEqual(vector.components()[2],
0.0)
self.assertAlmostEqual(vector.getX(),
-3.0)
self.assertAlmostEqual(vector.getY(),
-4.0)
self.assertAlmostEqual(vector.getZ(),
0.0)
def testComponents(self):
vector = Vector(1, 2, 3)
vector.setComponents(-3.0, -4.0, 0.0)
self.assertAlmostEqual(vector.components()[0],
-3.0)
self.assertAlmostEqual(vector.components()[1],
-4.0)
self.assertAlmostEqual(vector.components()[2],
0.0)
def testOperatorEqual(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(1.0, 2.0, 3.0)
vector_3 = Vector(-1, 0, 0)
self.assertTrue(vector_1 == vector_2)
self.assertFalse(vector_1 == vector_3)
def testOperatorNotEqual(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(1.0, 2.0, 3.0)
vector_3 = Vector(-1, 0, 0)
self.assertFalse(vector_1 != vector_2)
self.assertTrue(vector_1 != vector_3)
def testAddVector(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(-1, 2, 1)
vector_sum = Vector(0, 4, 4)
result = vector_1.addVector(vector_2)
self.assertTrue(result == vector_sum)
def testScalarMultiplication(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(3, 6, 9)
result = Vector(-1, -2, -3)
vector_1 = vector_1.scalarMultiplication(-1.0)
self.assertTrue(result == vector_1)
vector_1 = vector_1.scalarMultiplication(-3.0)
self.assertTrue(vector_1 == vector_2)
def testSubtractVector(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(-1, 2, 1)
vector_diff = Vector(2, 0, 2)
result = vector_1.subtractVector(vector_2)
self.assertTrue(result == vector_diff)
def testScalarProduct(self):
vector_1 = Vector(1, 2, 3)
vector_2 = Vector(-1, 2, 1)
vector_3 = Vector(-1, 2, -1)
result = vector_1.scalarProduct(vector_2)
self.assertAlmostEqual(result,
6)
result = vector_1.scalarProduct(vector_3)
self.assertAlmostEqual(result,
0)
def testCrossProduct(self):
vector_x = Vector(1, 0, 0)
vector_y = Vector(0, 1, 0)
vector_z = Vector(0, 0, 1)
vector_minus_z = Vector(0, 0, -1)
vector_null = Vector(0, 0, 0)
result = vector_x.crossProduct(vector_y)
self.assertTrue(result == vector_z)
result = vector_z.crossProduct(vector_x)
self.assertTrue(result == vector_y)
result = vector_y.crossProduct(vector_x)
self.assertTrue(result == vector_minus_z)
result = vector_x.crossProduct(vector_x)
self.assertTrue(result == vector_null)
def testNorm(self,):
vector_6 = Vector(2, 4, 4)
vector_2 = Vector(2, 0, 0)
self.assertAlmostEqual(vector_6.norm(), 6.0)
self.assertAlmostEqual(vector_2.norm(), 2.0)
def testGetNormalizedVector(self):
vector_6 = Vector(2, 4, 4)
normalized_vector = vector_6.getNormalizedVector()
self.assertAlmostEqual(normalized_vector.norm(), 1.0)
self.assertTrue(vector_6 == normalized_vector.scalarMultiplication(6))
def testRotateAroundAxis(self):
vector_x = Vector(1, 0, 0)
vector_y = Vector(0, 1, 0)
vector_z = Vector(0, 0, 1)
vector_minus_x = Vector(-1, 0, 0)
vector_minus_y = Vector(0, -1, 0)
vector_minus_z = Vector(0, 0, -1)
result = vector_x.rotateAroundAxis(vector_z, pi)
self.assertTrue(result == vector_minus_x)
result = vector_x.rotateAroundAxis(vector_z, pi / 2.0)
self.assertTrue(result == vector_y)
result = vector_x.rotateAroundAxis(vector_minus_z, pi / 2.0)
self.assertTrue(result == vector_minus_y)
result = vector_x.addVector(vector_y).rotateAroundAxis(vector_z, pi)
self.assertTrue(result == vector_minus_x.addVector(vector_minus_y))
result = vector_z.rotateAroundAxis(vector_z, pi)
self.assertTrue(result == vector_z)
result = vector_z.rotateAroundAxis(vector_x, pi / 2.0)
self.assertTrue(result == vector_minus_y)
result = vector_z.addVector(vector_x).rotateAroundAxis(vector_y, pi / 2.0)
self.assertTrue(result == vector_x.addVector(vector_minus_z))
def testParallelTo(self):
vector = Vector(1, 1, 3)
vector_z = Vector(0, 0, 1)
result = vector.parallelTo(vector_z)
self.assertTrue(result == Vector(0, 0, 3))
def testPerpendicularTo(self):
vector = Vector(1, 1, 3)
vector_z = Vector(0, 0, 1)
result = vector.perpendicularTo(vector_z)
self.assertTrue(result == Vector(1, 1, 0))
def testGetOnePerpendicularVector(self):
for vector in [Vector(1, 1, 3),
Vector(10, 1222, 23),
Vector(0.1, 12, -3),
Vector(0, 0, 1)]:
result = vector.getOnePerpendicularVector()
self.assertAlmostEqual(result.scalarProduct(vector),
0.0)
def testAngle(self):
# normalized scalar product !!
vector_x = Vector(1, 0, 0)
vector_y = Vector(0, 1, 0)
vector_z = Vector(0, 0, 1)
vector_xy = Vector(1, 1, 0)
self.assertAlmostEqual(vector_x.angle(vector_y), pi / 2.0)
self.assertAlmostEqual(vector_y.angle(vector_z), pi / 2.0)
self.assertAlmostEqual(vector_x.angle(vector_xy), pi / 4.0)
def testGetVectorWithAngle(self):
for vector in [Vector(0, 1, 0),
Vector(1, 2, 3),
Vector(3, 2, -1)]:
for angle in arange(0, pi, 0.1):
vector_with_angle = vector.getVectorWithAngle(angle)
self.assertAlmostEqual(vector.angle(vector_with_angle),
angle)
def testDuplicate(self):
v1 = Vector(1,2,3)
v2 = v1.duplicate()
self.assertTrue( v1.components()[0] == v2.components()[0])
self.assertTrue( v1.components()[1] == v2.components()[1])
self.assertTrue( v1.components()[2] == v2.components()[2])
v1.setComponents(3,4,5)
self.assertFalse( v1.components()[0] == v2.components()[0])
self.assertFalse( v1.components()[1] == v2.components()[1])
self.assertFalse( v1.components()[2] == v2.components()[2])
| |
import datetime
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
register = Library()
DOT = '.'
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
"""
Generates the list column headers.
"""
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
header, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": header,
"class_attrib": mark_safe(' class="action-checkbox-column"')
}
continue
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
admin_order_field = None
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {
"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField) or isinstance(f, models.TimeField):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| |
#!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
# Copyright (c) 2016 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos9_config
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Manage Dell EMC Networking OS9 configuration sections
description:
- OS9 configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with OS9 configuration sections in
a deterministic way.
extends_documentation_fragment: dellos9
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Note the configuration
command syntax as the device config parser automatically modifies some commands. This argument is mutually exclusive with I(src).
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If you omit the parents argument, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. The playbook designer can use this opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. As with I(before), this
the playbook designer can append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If you set
match to I(line), commands match line by line. If you set
match to I(strict), command lines match by position. If you set match to I(exact), command lines
must be an equal match. Finally, if you set match to I(none), the
module does not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If you set the replace argument to I(line), then
the modified lines push to the device in configuration
mode. If you set the replace argument to I(block), then the entire
command block pushes to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
update:
description:
- The I(update) argument controls how the configuration statements
are processed on the remote device. Valid choices for the I(update)
argument are I(merge) and I(check). When you set this argument to
I(merge), the configuration changes merge with the current
device running configuration. When you set this argument to I(check)
the configuration updates are determined but not actually configured
on the remote device.
required: false
default: merge
choices: ['merge', 'check']
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
config:
description:
- The playbook designer can use the C(config) argument to supply
the base configuration to be used to validate necessary configuration
changes. If you specify this argument, the module
does not download the running-config from the remote node.
required: false
default: null
backup:
description:
- This argument causes the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can also be done with the M(dellos9_config) module.
"""
EXAMPLES = """
- dellos9_config:
lines: ['hostname {{ inventory_hostname }}']
provider: "{{ cli }}"
- dellos9_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
- 50 permit ip host 5.5.5.5 any log
parents: ['ip access-list extended test']
before: ['no ip access-list extended test']
match: exact
provider: "{{ cli }}"
- dellos9_config:
lines:
- 10 permit ip host 1.1.1.1 any log
- 20 permit ip host 2.2.2.2 any log
- 30 permit ip host 3.3.3.3 any log
- 40 permit ip host 4.4.4.4 any log
parents: ['ip access-list extended test']
before: ['no ip access-list extended test']
replace: block
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device.
returned: Always.
type: list
sample: ['...', '...']
responses:
description: The set of responses from issuing the commands on the device.
returned: When not check_mode.
type: list
sample: ['...', '...']
saved:
description: Returns whether the configuration is saved to the startup
configuration or not.
returned: When not check_mode.
type: bool
sample: True
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/dellos9_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dellos9 import get_config, get_sublevel_config
from ansible.module_utils.dellos9 import dellos9_argument_spec, check_args
from ansible.module_utils.dellos9 import load_config, run_commands
from ansible.module_utils.dellos9 import WARNING_PROMPTS_RE
from ansible.module_utils.netcfg import NetworkConfig, dumps
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def main():
argument_spec = dict(
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
src=dict(type='path'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line',
choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
update=dict(choices=['merge', 'check'], default='merge'),
save=dict(type='bool', default=False),
config=dict(),
backup=dict(type='bool', default=False)
)
argument_spec.update(dellos9_argument_spec)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
check_args(module, warnings)
result = dict(changed=False, saved=False, warnings=warnings)
candidate = get_candidate(module)
if match != 'none':
config = get_config(module)
if parents:
contents = get_sublevel_config(config, module)
config = NetworkConfig(contents=contents, indent=1)
else:
config = NetworkConfig(contents=config, indent=1)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if module.params['backup']:
result['__backup__'] = get_config(module)
commands = list()
if configobjs:
commands = dumps(configobjs, 'commands')
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
if not module.check_mode and module.params['update'] == 'merge':
load_config(module, commands)
if module.params['save']:
cmd = {'command': 'copy runing-config startup-config', 'prompt': WARNING_PROMPTS_RE, 'answer': 'yes'}
run_commands(module, [cmd])
result['saved'] = True
result['changed'] = True
result['updates'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
"""HTTP API logic that ties API call renderers with HTTP routes."""
import json
# pylint: disable=g-bad-import-order,unused-import
from grr.gui import django_lib
# pylint: enable=g-bad-import-order,unused-import
from django import http
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing
import logging
from grr.gui import api_call_renderers
from grr.gui import api_plugins
from grr.gui import http_routing
from grr.lib import access_control
from grr.lib import rdfvalue
from grr.lib import registry
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
if request.method == "GET":
reason = request.GET.get("reason", "")
elif request.method == "POST":
reason = request.META.get("HTTP_GRR_REASON", "")
token = access_control.ACLToken(
username=request.user,
reason=reason,
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime().Now() + execution_time)
for field in ["REMOTE_ADDR", "HTTP_X_FORWARDED_FOR"]:
remote_addr = request.META.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def StripTypeInfo(rendered_data):
"""Strips type information from rendered data. Useful for debugging."""
if isinstance(rendered_data, (list, tuple)):
return [StripTypeInfo(d) for d in rendered_data]
elif isinstance(rendered_data, dict):
if "value" in rendered_data:
return StripTypeInfo(rendered_data["value"])
else:
result = {}
for k, v in rendered_data.items():
result[k] = StripTypeInfo(v)
return result
else:
return rendered_data
def RegisterHttpRouteHandler(method, route, renderer_cls):
"""Registers given ApiCallRenderer for given method and route."""
http_routing.HTTP_ROUTING_MAP.add(routing.Rule(
route, methods=[method],
endpoint=renderer_cls))
def GetRendererForHttpRequest(request):
"""Returns a renderer to handle given HTTP request."""
matcher = http_routing.HTTP_ROUTING_MAP.bind(
"%s:%s" % (request.environ["SERVER_NAME"],
request.environ["SERVER_PORT"]))
try:
match = matcher.match(request.path, request.method)
except werkzeug_exceptions.NotFound:
raise api_call_renderers.ApiCallRendererNotFoundError(
"No API renderer was found for (%s) %s" % (request.path,
request.method))
renderer_cls, route_args = match
return (renderer_cls(), route_args)
def FillAdditionalArgsFromRequest(request, supported_types):
"""Creates arguments objects from a given request dictionary."""
results = {}
for key, value in request.items():
try:
request_arg_type, request_attr = key.split(".", 1)
except ValueError:
continue
arg_class = None
for key, supported_type in supported_types.items():
if key == request_arg_type:
arg_class = supported_type
if arg_class:
if request_arg_type not in results:
results[request_arg_type] = arg_class()
results[request_arg_type].Set(request_attr, value)
results_list = []
for name, arg_obj in results.items():
additional_args = api_call_renderers.ApiCallAdditionalArgs(
name=name, type=supported_types[name].__name__)
additional_args.args = arg_obj
results_list.append(additional_args)
return results_list
class JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):
"""Custom JSON encoder that encodes renderers output.
Custom encoder is required to facilitate usage of primitive values -
booleans, integers and strings - in renderers responses.
If renderer references an RDFString, RDFInteger or and RDFBOol when building a
response, it will lead to JSON encoding failure when response encoded,
unless this custom encoder is used. Another way to solve this issue would be
to explicitly call api_value_renderers.RenderValue on every value returned
from the renderer, but it will make the code look overly verbose and dirty.
"""
def default(self, obj):
if isinstance(obj, (rdfvalue.RDFInteger,
rdfvalue.RDFBool,
rdfvalue.RDFString)):
return obj.SerializeToDataStore()
return json.JSONEncoder.default(self, obj)
def BuildResponse(status, rendered_data):
"""Builds HTTPResponse object from rendered data and HTTP status."""
response = http.HttpResponse(status=status,
content_type="application/json; charset=utf-8")
response["Content-Disposition"] = "attachment; filename=response.json"
response["X-Content-Type-Options"] = "nosniff"
response.write(")]}'\n") # XSSI protection
# To avoid IE content sniffing problems, escape the tags. Otherwise somebody
# may send a link with malicious payload that will be opened in IE (which
# does content sniffing and doesn't respect Content-Disposition header) and
# IE will treat the document as html and executre arbitrary JS that was
# passed with the payload.
str_data = json.dumps(rendered_data, cls=JSONEncoderWithRDFPrimitivesSupport)
response.write(str_data.replace("<", r"\u003c").replace(">", r"\u003e"))
return response
def RenderHttpResponse(request):
"""Handles given HTTP request with one of the available API renderers."""
renderer, route_args = GetRendererForHttpRequest(request)
strip_type_info = False
if request.method == "GET":
if request.GET.get("strip_type_info", ""):
strip_type_info = True
if renderer.args_type:
unprocessed_request = request.GET
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = renderer.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
args.Set(type_info.name, route_args[type_info.name])
elif type_info.name in unprocessed_request:
args.Set(type_info.name, unprocessed_request[type_info.name])
if renderer.additional_args_types:
if not hasattr(args, "additional_args"):
raise RuntimeError("Renderer %s defines additional arguments types "
"but its arguments object does not have "
"'additional_args' field." % renderer)
if hasattr(renderer.additional_args_types, "__call__"):
additional_args_types = renderer.additional_args_types()
else:
additional_args_types = renderer.additional_args_types
args.additional_args = FillAdditionalArgsFromRequest(
unprocessed_request, additional_args_types)
else:
args = None
elif request.method == "POST":
try:
args = renderer.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
args.Set(type_info.name, route_args[type_info.name])
if request.META["CONTENT_TYPE"].startswith("multipart/form-data;"):
payload = json.loads(request.POST["_params_"])
args.FromDict(payload)
for name, fd in request.FILES.items():
args.Set(name, fd.read())
else:
payload = json.loads(request.body)
args.FromDict(payload)
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
return BuildResponse(500, dict(message=str(e)))
else:
raise RuntimeError("Unsupported method: %s." % request.method)
token = BuildToken(request, renderer.max_execution_time)
try:
rendered_data = api_call_renderers.HandleApiCall(renderer, args,
token=token)
if strip_type_info:
rendered_data = StripTypeInfo(rendered_data)
return BuildResponse(200, rendered_data)
except access_control.UnauthorizedAccess as e:
logging.exception(
"Access denied to %s (%s) with %s: %s", request.path,
request.method, renderer.__class__.__name__, e)
return BuildResponse(403, dict(message="Access denied by ACL"))
except Exception as e: # pylint: disable=broad-except
logging.exception(
"Error while processing %s (%s) with %s: %s", request.path,
request.method, renderer.__class__.__name__, e)
return BuildResponse(500, dict(message=str(e)))
class HttpApiInitHook(registry.InitHook):
"""Register HTTP API renderers."""
def RunOnce(self):
# The list is alphabetized by route.
RegisterHttpRouteHandler("GET", "/api/aff4/<path:aff4_path>",
api_plugins.aff4.ApiAff4Renderer)
RegisterHttpRouteHandler("GET", "/api/aff4-index/<path:aff4_path>",
api_plugins.aff4.ApiAff4IndexRenderer)
RegisterHttpRouteHandler("GET", "/api/artifacts",
api_plugins.artifact.ApiArtifactsRenderer)
RegisterHttpRouteHandler("POST", "/api/artifacts/upload",
api_plugins.artifact.ApiArtifactsUploadRenderer)
RegisterHttpRouteHandler("POST", "/api/artifacts/delete",
api_plugins.artifact.ApiArtifactsDeleteRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/kb-fields",
api_plugins.client.ApiListKbFieldsRenderer)
RegisterHttpRouteHandler("GET", "/api/clients",
api_plugins.client.ApiClientSearchRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/<client_id>",
api_plugins.client.ApiClientSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/clients/labels",
api_plugins.client.ApiClientsLabelsListRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/add",
api_plugins.client.ApiClientsAddLabelsRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/labels/remove",
api_plugins.client.ApiClientsRemoveLabelsRenderer)
RegisterHttpRouteHandler("GET", "/api/config",
api_plugins.config.ApiConfigRenderer)
RegisterHttpRouteHandler("GET", "/api/config/<name>",
api_plugins.config.ApiConfigOptionRenderer)
RegisterHttpRouteHandler("GET", "/api/docs",
api_plugins.docs.ApiDocsRenderer)
RegisterHttpRouteHandler("GET", "/api/flows/<client_id>/<flow_id>/status",
api_plugins.flow.ApiFlowStatusRenderer)
RegisterHttpRouteHandler("GET", "/api/flows/descriptors",
api_plugins.flow.ApiFlowDescriptorsListRenderer)
RegisterHttpRouteHandler(
"GET", "/api/clients/<client_id>/flows/<flow_id>/results",
api_plugins.flow.ApiFlowResultsRenderer)
RegisterHttpRouteHandler(
"GET", "/api/clients/<client_id>/flows/<flow_id>/output-plugins",
api_plugins.flow.ApiFlowOutputPluginsRenderer)
RegisterHttpRouteHandler("POST",
"/api/clients/<client_id>/flows/remotegetfile",
api_plugins.flow.ApiRemoteGetFileRenderer)
RegisterHttpRouteHandler("POST", "/api/clients/<client_id>/flows/start",
api_plugins.flow.ApiStartFlowRenderer)
RegisterHttpRouteHandler(
"GET", "/api/output-plugins/all",
api_plugins.output_plugin.ApiOutputPluginsListRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts",
api_plugins.hunt.ApiHuntsListRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>",
api_plugins.hunt.ApiHuntSummaryRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/errors",
api_plugins.hunt.ApiHuntErrorsRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/log",
api_plugins.hunt.ApiHuntLogRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/results",
api_plugins.hunt.ApiHuntResultsRenderer)
RegisterHttpRouteHandler("GET", "/api/hunts/<hunt_id>/output-plugins",
api_plugins.hunt.ApiHuntOutputPluginsRenderer)
RegisterHttpRouteHandler("POST", "/api/hunts/create",
api_plugins.hunt.ApiCreateHuntRenderer)
RegisterHttpRouteHandler("POST",
"/api/hunts/<hunt_id>/results/archive-files",
api_plugins.hunt.ApiHuntArchiveFilesRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/aff4/attributes",
api_plugins.reflection.ApiAff4AttributesReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/<type>",
api_plugins.reflection.ApiRDFValueReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/reflection/rdfvalue/all",
api_plugins.reflection.ApiAllRDFValuesReflectionRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metadata",
api_plugins.stats.ApiStatsStoreMetricsMetadataRenderer)
RegisterHttpRouteHandler(
"GET", "/api/stats/store/<component>/metrics/<metric_name>",
api_plugins.stats.ApiStatsStoreMetricRenderer)
RegisterHttpRouteHandler("GET", "/api/users/me/settings",
api_plugins.user.ApiUserSettingsRenderer)
RegisterHttpRouteHandler("POST", "/api/users/me/settings",
api_plugins.user.ApiSetUserSettingsRenderer)
| |
#!/usr/bin/python
#############################################################
# /###### /## #
# /##__ ## |__/ #
# /##| ## \__/ /###### /## /####### /###### #
# |__/| ###### /##__ ##| ## /##_____/ /##__ ## #
# /## \____ ##| ## \ ##| ##| ## | ######## #
# | ## /## \ ##| ## | ##| ##| ## | ##_____/ #
# | ##| ######/| #######/| ##| #######| ####### #
# | ## \______/ | ##____/ |__/ \_______/ \_______/ #
# /## | ## | ## #
# | ######/ | ## #
# \______/ |__/ #
# #
# Jorge I. Zuluaga (C) 2016 #
#############################################################
#Function: an axtension to SpiceyPy
#############################################################
from spiceypy import wrapper as spy
import spiceypy.support_types as spytypes
#############################################################
#EXTERNAL MODULES
#############################################################
import time,datetime
import numpy as np
from scipy.optimize import brentq as _zero
from scipy.optimize import minimize_scalar as _minim
np.set_printoptions(threshold='nan')
#############################################################
#EXTEND SPICE
#############################################################
"""
This routines are intended to extend SPICE and include new
functionalities.
Convention:
def _<jroutine>(*args): Private routine
spy.j<routine>: Extended routine
Use in your code instead of:
from spiceypy import wrapper as spy
This code:
from spicext import *
SpiceyPy and Spicext can be invoked as:
spy.<routine>
spy.j<routine>
"""
#############################################################
#CONSTANTS
#############################################################
spy.IDENTITY=np.identity(3)
spy.RAD=180/np.pi
spy.DEG=1/spy.RAD
#############################################################
#ROUTINES
#############################################################
def _utcnow():
utc=datetime.datetime.utcnow()
now=utc.strftime("%m/%d/%y %H:%M:%S.%f UTC")
return now
spy.jutcnow=_utcnow
def _locnow():
loc=datetime.datetime.now()
now=loc.strftime("%m/%d/%y %H:%M:%S.%f")
return now
spy.jlocnow=_locnow
def _etnow():
return spy.str2et(spy.jlocnow())
spy.jetnow=_etnow
def _et2str(et):
deltet=spy.deltet(et,"ET")
cal=spy.etcal(et-deltet,100)
return cal
spy.jet2str=_et2str
def _dec2sex(dec,sep=None,day=False):
if day:fac=24
else:fac=60
sgn=np.sign(dec)
dec=np.abs(dec)
H=np.floor(dec)
mm=(dec-H)*fac
M=np.floor(mm)
ss=(mm-M)*60;
S=np.floor(ss);
H=sgn*H
if not sep is None:
return "%02d%s%02d%s%02.3f"%(int(H),sep[0],int(M),sep[1],ss)
return [H,M,ss]
spy.jdec2sex=_dec2sex
def _rad():return 180/np.pi
spy.jrad=_rad
def _deg():return np.pi/180
spy.jdeg=_deg
def _obsini(body,lon,lat,alt):
"""
lon: longitude in degree
lat: latitude in degree
alt: altitude in meters
obs: observer dictionary:
lat,lon (radians)
alt (kilometers)
pos (cartesian position with respect to ellipsoid ITRF93)
norm (normal vector wrt ellipoid)
radii (a, b, c, fe)
LOCALtoITRF93, ITRF93toLOCAL (transformation matrices)
"""
obs=dict(
ITRF93toLOCAL=np.zeros((3,3)),
LOCALtoITRF93=np.zeros((3,3)),
radii=np.zeros(3),
pos=np.zeros(3),
norm=np.zeros(3),
)
obs["lon"]=lon*spy.DEG
obs["lat"]=lat*spy.DEG
obs["alt"]=alt/1000.0
obs["body"]=body
# Body properties
n,obs["radii"]=spy.bodvrd(body,"RADII",3)
obs["radii"]=np.append(obs["radii"],
[(obs["radii"][0]-obs["radii"][2])/obs["radii"][0]])
obs["radii"]=np.append(obs["radii"],
[(obs["radii"][0]+obs["radii"][2])/2])
# Position in the ellipsoid
obs["pos"]=spy.georec(obs["lon"],obs["lat"],obs["alt"],
obs["radii"][0],obs["radii"][3])
# Normal vector to location
obs["norm"]=spy.surfnm(obs["radii"][0],obs["radii"][1],obs["radii"][2],obs["pos"])
# Vectors
uz=[0,0,1]
uy=spy.ucrss(obs["norm"],uz)
uz=obs["norm"]
ux=spy.ucrss(uy,uz)
# Matrices
obs["ITRF93toLOCAL"]=np.array([ux,uy,uz])
obs["LOCALtoITRF93"]=spy.invert(obs["ITRF93toLOCAL"]);
return obs
spy.jobsini=_obsini
def _rotmat(t):
mat=dict(
ITRF93toEJ2000=np.zeros((3,3)),
EJ2000toJ2000=np.zeros((3,3)),
J2000toEpoch=np.zeros((3,3)),
J2000toITRF93=np.zeros((3,3)),
)
mat["ITRF93toEJ2000"]=spy.pxform("ITRF93","ECLIPJ2000",t)
mat["EJ2000toJ2000"]=spy.pxform("ECLIPJ2000","J2000",t)
mat["J2000toEpoch"]=spy.pxform("J2000","EARTHTRUEEPOCH",t)
mat["J2000toITRF93"]=spy.pxform("J2000","ITRF93",t)
return mat
spy.jrotmat=_rotmat
def _ephem(target,t,obs,mat,depth='epoch'):
"""
Parameters:
body: string for target body
t: ephemeris time
obs: observer dictionary
mat: rotation matrices
Return:
ephem: dictionary with ephemeris
obsSSBEJ2000: Coordinate of the Observer wrt SSB in ELIPJ2000
targetSSBEJ2000: Coordinate of the target wrt SSB in ECLIPJ2000
targetSSBJ2000: Coordinate of the target wrt SSB in J2000
targetOBSEJ2000: Coordinate of the target wrt observer in ECLIPJ2000
targetOBSJ2000: Coordinate of the target wrt observer in J2000
targetOBST: Coordinate of the target wrt observer at Epoch
targetOBSITRF93: Coordinate of the target wrt observer in ITRF93
targetOBSLOCAL: Coordinate of the target wrt observer in Local coordinates
distance: distance from target to observer
RA (radians): J2000
DEC (radians): J2000
RAt (radians): at epoch
DECt (radians): at epoch
az (radians): Azimuth
el (radians): elevation
"""
ephem=dict(
target=target,
targetSSBEJ2000=np.zeros([0,0,0]),
targetOBSEJ2000=np.zeros([0,0,0]),
targetOBSJ2000=np.zeros([0,0,0]),
distance=0,
RAJ2000=0,
DECJ2000=0,
)
bodySSBEJ2000,ltmp=spy.spkezr(obs["body"],t,
"ECLIPJ2000","NONE","SOLAR SYSTEM BARYCENTER")
obsEJ2000=spy.mxv(mat["ITRF93toEJ2000"],obs["pos"])
ephem["obsSSBEJ2000"]=spy.vadd(bodySSBEJ2000[:3],obsEJ2000)
# Position of target corrected by light-time
n,ephem["radii"]=spy.bodvrd(target,"RADII",3)
ephem["radii"]=np.append(ephem["radii"],
[(ephem["radii"][0]-ephem["radii"][2])/ephem["radii"][0]])
ephem["radii"]=np.append(ephem["radii"],
[(ephem["radii"][0]+ephem["radii"][2])/2])
lt=1;ltold=0
while np.abs((lt-ltold)/lt)>=1e-10:
ltold=lt
ephem["targetSSBEJ2000"],ltmp=spy.spkezr(target,t-lt,"ECLIPJ2000","NONE",
"SOLAR SYSTEM BARYCENTER")
ephem["targetOBSEJ2000"]=spy.vsub(ephem["targetSSBEJ2000"][:3],
ephem["obsSSBEJ2000"])
lt=spy.vnorm(ephem["targetOBSEJ2000"])/spy.clight()
# Ecliptic coordinates at J2000
ephem["distance"],ephem["eclon"],ephem["eclat"]=spy.recrad(ephem["targetOBSEJ2000"])
# Equator J2000
ephem["targetOBSJ2000"]=spy.mxv(mat["EJ2000toJ2000"],ephem["targetOBSEJ2000"])
# Coordinates at J2000
ephem["distance"],ephem["RA"],ephem["DEC"]=spy.recrad(ephem["targetOBSJ2000"])
ephem["angsize"]=2*(ephem["radii"][4]/ephem["distance"])*spy.jrad()*3600
# Coordinates at Epoch
ephem["targetOBST"]=spy.mxv(mat["J2000toEpoch"],ephem["targetOBSJ2000"])
d,ephem["RAt"],ephem["DECt"]=spy.recrad(ephem["targetOBST"])
# Topocentric coordinates
ephem["targetOBSITRF93"]=spy.mxv(mat["J2000toITRF93"],ephem["targetOBSJ2000"])
ephem["targetOBSLOCAL"]=spy.mxv(obs["ITRF93toLOCAL"],ephem["targetOBSITRF93"])
udir,mag=spy.unorm(ephem["targetOBSLOCAL"])
udir[1]*=-1
d,az,el=spy.reclat(udir)
if(az<0):az+=2*np.pi
ephem["el"]=el
ephem["z"]=np.pi/2-ephem["el"]
ephem["az"]=az
return ephem
spy.jephem=_ephem
# Find zeros
spy.jzero=_zero
spy.jminim=_minim
# Angular distance
def _gcdist(lam1,lam2,phi1,phi2):
sf=np.sin((phi2-phi1)/2)
sl=np.sin((lam2-lam1)/2)
d=2*np.arcsin((sf*sf+np.cos(phi1)*np.cos(phi2)*sl*sl)**0.5)
return d
spy.jgcdist=_gcdist
def _angdis(body1,body2,t,obs,k=0):
"""Calculate the angular distance of the contact-function (fk) of two
objects as observed from observatory obs
Parameters:
body1: Body 1 string (largest body)
body2: Body 2 string
t: ephemeris time
obs: observer dictionary
k: k-parameter of the contact-function. k=0 (angular distance),
k=+1 (external contact), k=-1 (internal contact)
Returns:
if k==0: Angular distance
if k!=0: angdist-rad1-k*rad2
"""
mat=spy.jrotmat(t)
ephem1=spy.jephem(body1,t,obs,mat)
ephem2=spy.jephem(body2,t,obs,mat)
angdist=spy.jgcdist(ephem1["RA"],ephem2["RA"],ephem1["DEC"],ephem2["DEC"])
if k==0:
return angdist
else:
rad1=ephem1["angsize"]/2
rad2=ephem2["angsize"]/2
fk=angdist*spy.jrad()*3600.0-rad1-k*rad2
return fk
spy.jangdis=_angdis
| |
# -*- coding: utf-8 -*-
"""Amazon AWS Connection."""
from __future__ import absolute_import, unicode_literals
from io import BytesIO
from vine import promise, transform
from kombu.async.http import Headers, Request, get_client
from kombu.five import items, python_2_unicode_compatible
from .ext import (
boto, AWSAuthConnection, AWSQueryConnection, XmlHandler, ResultSet,
)
try:
from urllib.parse import urlunsplit
except ImportError:
from urlparse import urlunsplit # noqa
from xml.sax import parseString as sax_parse # noqa
try: # pragma: no cover
from email import message_from_file
from email.mime.message import MIMEMessage
except ImportError: # pragma: no cover
from mimetools import Message as MIMEMessage # noqa
def message_from_file(m): # noqa
return m
__all__ = [
'AsyncHTTPConnection', 'AsyncHTTPSConnection',
'AsyncHTTPResponse', 'AsyncConnection',
'AsyncAWSAuthConnection', 'AsyncAWSQueryConnection',
]
@python_2_unicode_compatible
class AsyncHTTPResponse(object):
"""Async HTTP Response."""
def __init__(self, response):
self.response = response
self._msg = None
self.version = 10
def read(self, *args, **kwargs):
return self.response.body
def getheader(self, name, default=None):
return self.response.headers.get(name, default)
def getheaders(self):
return list(items(self.response.headers))
@property
def msg(self):
if self._msg is None:
self._msg = MIMEMessage(message_from_file(
BytesIO(b'\r\n'.join(
b'{0}: {1}'.format(*h) for h in self.getheaders())
)
))
return self._msg
@property
def status(self):
return self.response.code
@property
def reason(self):
if self.response.error:
return self.response.error.message
return ''
def __repr__(self):
return repr(self.response)
@python_2_unicode_compatible
class AsyncHTTPConnection(object):
"""Async HTTP Connection."""
Request = Request
Response = AsyncHTTPResponse
method = 'GET'
path = '/'
body = None
scheme = 'http'
default_ports = {'http': 80, 'https': 443}
def __init__(self, host, port=None,
strict=None, timeout=20.0, http_client=None, **kwargs):
self.host = host
self.port = port
self.headers = []
self.timeout = timeout
self.strict = strict
self.http_client = http_client or get_client()
def request(self, method, path, body=None, headers=None):
self.path = path
self.method = method
if body is not None:
try:
read = body.read
except AttributeError:
self.body = body
else:
self.body = read()
if headers is not None:
self.headers.extend(list(items(headers)))
def getrequest(self, scheme=None):
scheme = scheme if scheme else self.scheme
host = self.host
if self.port and self.port != self.default_ports[scheme]:
host = '{0}:{1}'.format(host, self.port)
url = urlunsplit((scheme, host, self.path, '', ''))
headers = Headers(self.headers)
return self.Request(url, method=self.method, headers=headers,
body=self.body, connect_timeout=self.timeout,
request_timeout=self.timeout, validate_cert=False)
def getresponse(self, callback=None):
request = self.getrequest()
request.then(transform(self.Response, callback))
return self.http_client.add_request(request)
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
def putrequest(self, method, path, **kwargs):
self.method = method
self.path = path
def putheader(self, header, value):
self.headers.append((header, value))
def endheaders(self):
pass
def send(self, data):
if self.body:
self.body += data
else:
self.body = data
def __repr__(self):
return '<AsyncHTTPConnection: {0!r}>'.format(self.getrequest())
class AsyncHTTPSConnection(AsyncHTTPConnection):
"""Async HTTPS Connection."""
scheme = 'https'
class AsyncConnection(object):
"""Async AWS Connection."""
def __init__(self, http_client=None, **kwargs):
if boto is None:
raise ImportError('boto is not installed')
self._httpclient = http_client or get_client()
def get_http_connection(self, host, port, is_secure):
return (AsyncHTTPSConnection if is_secure else AsyncHTTPConnection)(
host, port, http_client=self._httpclient,
)
def _mexe(self, request, sender=None, callback=None):
callback = callback or promise()
boto.log.debug(
'HTTP %s/%s headers=%s body=%s',
request.host, request.path,
request.headers, request.body,
)
conn = self.get_http_connection(
request.host, request.port, self.is_secure,
)
request.authorize(connection=self)
if callable(sender):
sender(conn, request.method, request.path, request.body,
request.headers, callback)
else:
conn.request(request.method, request.path,
request.body, request.headers)
conn.getresponse(callback=callback)
return callback
class AsyncAWSAuthConnection(AsyncConnection, AWSAuthConnection):
"""Async AWS Authn Connection."""
def __init__(self, host,
http_client=None, http_client_params={}, **kwargs):
AsyncConnection.__init__(self, http_client, **http_client_params)
AWSAuthConnection.__init__(self, host, **kwargs)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, callback=None, **kwargs):
req = self.build_base_http_request(
method, path, auth_path, {}, headers, data, host,
)
return self._mexe(req, sender=sender, callback=callback)
class AsyncAWSQueryConnection(AsyncConnection, AWSQueryConnection):
"""Async AWS Query Connection."""
def __init__(self, host,
http_client=None, http_client_params={}, **kwargs):
AsyncConnection.__init__(self, http_client, **http_client_params)
AWSAuthConnection.__init__(self, host, **kwargs)
def make_request(self, action, params, path, verb, callback=None):
request = self.build_base_http_request(
verb, path, None, params, {}, '', self.server_name())
if action:
request.params['Action'] = action
request.params['Version'] = self.APIVersion
return self._mexe(request, callback=callback)
def get_list(self, action, params, markers,
path='/', parent=None, verb='GET', callback=None):
return self.make_request(
action, params, path, verb,
callback=transform(
self._on_list_ready, callback, parent or self, markers,
),
)
def get_object(self, action, params, cls,
path='/', parent=None, verb='GET', callback=None):
return self.make_request(
action, params, path, verb,
callback=transform(
self._on_obj_ready, callback, parent or self, cls,
),
)
def get_status(self, action, params,
path='/', parent=None, verb='GET', callback=None):
return self.make_request(
action, params, path, verb,
callback=transform(
self._on_status_ready, callback, parent or self,
),
)
def _on_list_ready(self, parent, markers, response):
body = response.read()
if response.status == 200 and body:
rs = ResultSet(markers)
h = XmlHandler(rs, parent)
sax_parse(body, h)
return rs
else:
raise self._for_status(response, body)
def _on_obj_ready(self, parent, cls, response):
body = response.read()
if response.status == 200 and body:
obj = cls(parent)
h = XmlHandler(obj, parent)
sax_parse(body, h)
return obj
else:
raise self._for_status(response, body)
def _on_status_ready(self, parent, response):
body = response.read()
if response.status == 200 and body:
rs = ResultSet()
h = XmlHandler(rs, parent)
sax_parse(body, h)
return rs.status
else:
raise self._for_status(response, body)
def _for_status(self, response, body):
context = 'Empty body' if not body else 'HTTP Error'
exc = self.ResponseError(response.status, response.reason, body)
boto.log.error('{0}: %r'.format(context), exc)
return exc
| |
import os
import platform
import unittest
import requests
from nose.plugins.attrib import attr
from conans import DEFAULT_REVISION_V1
from conans.client.conf import ConanClientConfigParser
from conans.client.rest.conan_requester import ConanRequester
from conans.client.rest.rest_client import RestApiClient
from conans.client.rest.rest_client_v1 import complete_url
from conans.model.info import ConanInfo
from conans.model.manifest import FileTreeManifest
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONANFILE, CONANINFO, CONAN_MANIFEST
from conans.test.utils.server_launcher import TestServerLauncher
from conans.test.utils.test_files import hello_source_files, temp_folder
from conans.test.utils.tools import TestBufferConanOutput
from conans.util.env_reader import get_env
from conans.util.files import md5, save
class RestApiUnitTest(unittest.TestCase):
def relative_url_completion_test(self):
# test absolute urls
self.assertEqual(complete_url("http://host2", "http://host"), "http://host")
self.assertEqual(complete_url("http://host2", "http://host:1234"), "http://host:1234")
self.assertEqual(complete_url("http://host2", "https://host"), "https://host")
self.assertEqual(complete_url("http://host2", "https://host:1234"), "https://host:1234")
# test relative urls
self.assertEqual(complete_url("http://host", "v1/path_to_file.txt"),
"http://host/v1/path_to_file.txt")
self.assertEqual(complete_url("http://host:1234", "v1/path_to_file.txt"),
"http://host:1234/v1/path_to_file.txt")
self.assertEqual(complete_url("https://host", "v1/path_to_file.txt"),
"https://host/v1/path_to_file.txt")
self.assertEqual(complete_url("https://host:1234", "v1/path_to_file.txt"),
"https://host:1234/v1/path_to_file.txt")
# test relative urls with subdirectory
self.assertEqual(complete_url("https://host:1234/subdir/", "v1/path_to_file.txt"),
"https://host:1234/subdir/v1/path_to_file.txt")
@attr('slow')
@attr('rest_api')
class RestApiTest(unittest.TestCase):
"""Open a real server (sockets) to test rest_api function."""
server = None
api = None
@classmethod
def setUpClass(cls):
if not cls.server:
cls.server = TestServerLauncher(server_capabilities=['ImCool', 'TooCool'])
cls.server.start()
filename = os.path.join(temp_folder(), "conan.conf")
save(filename, "")
config = ConanClientConfigParser(filename)
requester = ConanRequester(config, requests)
cls.api = RestApiClient(TestBufferConanOutput(), requester=requester,
revisions_enabled=False)
cls.api.remote_url = "http://127.0.0.1:%s" % str(cls.server.port)
# Authenticate user
token = cls.api.authenticate("private_user", "private_pass")
cls.api.token = token
@classmethod
def tearDownClass(cls):
cls.server.stop()
def tearDown(self):
RestApiTest.server.clean()
def server_info_test(self):
_, _, capabilities = self.api.server_info()
self.assertEqual(capabilities, ["ImCool", "TooCool"])
def get_conan_test(self):
# Upload a conans
ref = ConanFileReference.loads("conan1/1.0.0@private_user/testing")
self._upload_recipe(ref)
# Get the conans
tmp_dir = temp_folder()
self.api.get_recipe(ref, tmp_dir)
self.assertIn(CONANFILE, os.listdir(tmp_dir))
self.assertIn(CONAN_MANIFEST, os.listdir(tmp_dir))
def get_recipe_manifest_test(self):
# Upload a conans
ref = ConanFileReference.loads("conan2/1.0.0@private_user/testing")
self._upload_recipe(ref)
# Get the conans digest
digest = self.api.get_recipe_manifest(ref)
self.assertEqual(digest.summary_hash, "e925757129f5c49ecb2e8c84ce17e294")
self.assertEqual(digest.time, 123123123)
def get_package_test(self):
# Upload a conans
ref = ConanFileReference.loads("conan3/1.0.0@private_user/testing")
self._upload_recipe(ref)
# Upload an package
pref = PackageReference(ref, "1F23223EFDA2")
self._upload_package(pref)
# Get the package
tmp_dir = temp_folder()
self.api.get_package(pref, tmp_dir)
self.assertIn("hello.cpp", os.listdir(tmp_dir))
def get_package_info_test(self):
# Upload a conans
ref = ConanFileReference.loads("conan3/1.0.0@private_user/testing")
self._upload_recipe(ref)
# Upload an package
pref = PackageReference(ref, "1F23223EFDA")
conan_info = """[settings]
arch=x86_64
compiler=gcc
os=Linux
[options]
386=False
[requires]
Hello
Bye/2.9
Say/2.1@user/testing
Chat/2.1@user/testing:SHA_ABC
"""
self._upload_package(pref, {CONANINFO: conan_info})
# Get the package info
info = self.api.get_package_info(pref)
self.assertIsInstance(info, ConanInfo)
self.assertEqual(info, ConanInfo.loads(conan_info))
def upload_huge_conan_test(self):
if platform.system() != "Windows":
# Upload a conans
ref = ConanFileReference.loads("conanhuge/1.0.0@private_user/testing")
files = {"file%s.cpp" % name: "File conent" for name in range(1000)}
self._upload_recipe(ref, files)
# Get the conans
tmp = temp_folder()
files = self.api.get_recipe(ref, tmp)
self.assertIsNotNone(files)
self.assertTrue(os.path.exists(os.path.join(tmp, "file999.cpp")))
def search_test(self):
# Upload a conan1
conan_name1 = "HelloOnly/0.10@private_user/testing"
ref1 = ConanFileReference.loads(conan_name1)
self._upload_recipe(ref1)
# Upload a package
conan_info = """[settings]
arch=x86_64
compiler=gcc
os=Linux
[options]
386=False
[requires]
Hello
Bye/2.9
Say/2.1@user/testing
Chat/2.1@user/testing:SHA_ABC
"""
pref = PackageReference(ref1, "1F23223EFDA")
self._upload_package(pref, {CONANINFO: conan_info})
# Upload a conan2
conan_name2 = "helloonlyToo/2.1@private_user/stable"
ref2 = ConanFileReference.loads(conan_name2)
self._upload_recipe(ref2)
# Get the info about this ConanFileReference
info = self.api.search_packages(ref1, None)
self.assertEqual(ConanInfo.loads(conan_info).serialize_min(), info["1F23223EFDA"])
# Search packages
results = self.api.search("HelloOnly*", ignorecase=False)
results = [r.copy_clear_rev() for r in results]
self.assertEqual(results, [ref1])
@unittest.skipIf(get_env("TESTING_REVISIONS_ENABLED", False), "Not prepared with revs")
def remove_test(self):
# Upload a conans
ref = ConanFileReference.loads("MyFirstConan/1.0.0@private_user/testing")
self._upload_recipe(ref)
ref = ref.copy_with_rev(DEFAULT_REVISION_V1)
path1 = self.server.server_store.base_folder(ref)
self.assertTrue(os.path.exists(path1))
# Remove conans and packages
self.api.remove_conanfile(ref)
self.assertFalse(os.path.exists(path1))
@unittest.skipIf(get_env("TESTING_REVISIONS_ENABLED", False), "Not prepared with revs")
def remove_packages_test(self):
ref = ConanFileReference.loads("MySecondConan/2.0.0@private_user/testing#%s"
% DEFAULT_REVISION_V1)
self._upload_recipe(ref)
folders = {}
for sha in ["1", "2", "3", "4", "5"]:
# Upload an package
pref = PackageReference(ref, sha, DEFAULT_REVISION_V1)
self._upload_package(pref)
folder = self.server.server_store.package(pref)
self.assertTrue(os.path.exists(folder))
folders[sha] = folder
self.api.remove_packages(ref, ["1"])
self.assertTrue(os.path.exists(self.server.server_store.base_folder(ref)))
self.assertFalse(os.path.exists(folders["1"]))
self.assertTrue(os.path.exists(folders["2"]))
self.assertTrue(os.path.exists(folders["3"]))
self.assertTrue(os.path.exists(folders["4"]))
self.assertTrue(os.path.exists(folders["5"]))
self.api.remove_packages(ref, ["2", "3"])
self.assertTrue(os.path.exists(self.server.server_store.base_folder(ref)))
self.assertFalse(os.path.exists(folders["1"]))
self.assertFalse(os.path.exists(folders["2"]))
self.assertFalse(os.path.exists(folders["3"]))
self.assertTrue(os.path.exists(folders["4"]))
self.assertTrue(os.path.exists(folders["5"]))
self.api.remove_packages(ref, [])
self.assertTrue(os.path.exists(self.server.server_store.base_folder(ref)))
for sha in ["1", "2", "3", "4", "5"]:
self.assertFalse(os.path.exists(folders[sha]))
def _upload_package(self, package_reference, base_files=None):
files = hello_source_files(3, [1, 12])
if base_files:
files.update(base_files)
tmp_dir = temp_folder()
abs_paths = {}
for filename, content in files.items():
abs_path = os.path.join(tmp_dir, filename)
save(abs_path, content)
abs_paths[filename] = abs_path
self.api.upload_package(package_reference, abs_paths, None, retry=1, retry_wait=0)
def _upload_recipe(self, ref, base_files=None, retry=1, retry_wait=0):
files = hello_source_files(3, [1, 12])
if base_files:
files.update(base_files)
content = """
from conans import ConanFile
class MyConan(ConanFile):
name = "%s"
version = "%s"
settings = arch, compiler, os
""" % (ref.name, ref.version)
files[CONANFILE] = content
files_md5s = {filename: md5(content) for filename, content in files.items()}
conan_digest = FileTreeManifest(123123123, files_md5s)
tmp_dir = temp_folder()
abs_paths = {}
for filename, content in files.items():
abs_path = os.path.join(tmp_dir, filename)
save(abs_path, content)
abs_paths[filename] = abs_path
abs_paths[CONAN_MANIFEST] = os.path.join(tmp_dir, CONAN_MANIFEST)
conan_digest.save(tmp_dir)
self.api.upload_recipe(ref, abs_paths, None, retry, retry_wait)
| |
import json
import io
import binascii
import glob
import traceback
from neo.VM.ExecutionEngine import ExecutionEngine
from neo.VM.ExecutionEngine import ExecutionContext
from neo.VM.RandomAccessStack import RandomAccessStack
from neo.Core.Cryptography.Crypto import Crypto
from neo.Core.UInt160 import UInt160
from typing import Optional
from neo.VM.VMState import VMStateStr
from neo.VM.OpCode import ToName as OpcodeToName
from neo.VM.OpCode import RET
from neo.VM import InteropService
from neo.VM.Debugger import Debugger
class MessageProvider:
def __init__(self, message: str):
"""
Args:
message: expected in format "0xAABB"
"""
self.message = message[2:].encode()
def GetMessage(self) -> bytes:
return self.message
class ScriptTable:
def __init__(self):
self.data = dict() # script_hash:contract
def GetScript(self, script_hash: bytes) -> Optional[bytes]:
if script_hash.startswith(b'0x'):
script_hash = script_hash[2:]
return self.data.get(script_hash, None)
def Add(self, script: bytearray) -> None:
h = bytearray(Crypto.Default().Hash160(script))
h.reverse()
self.data[binascii.hexlify(h)] = script
file_count = 0
test_count = 0
skipped_test_count = 0
def main():
global file_count
for filename in glob.glob("./**/*.json", recursive=True):
file_count += 1
with io.open(filename, 'r', encoding='utf-8-sig') as f:
data = json.load(f) # uses dirty UTF-8 BOM header *sigh*
try:
execute_test(data)
except Exception:
# should never happen, but in case it does
traceback.print_exc()
break
print(f"Executed {test_count} test(s) from {file_count} file(s). Skipped {skipped_test_count} test(s)")
def execute_test(data: dict):
global test_count, skipped_test_count
for test in data['tests']:
test_count += 1
# interop service
service = InteropService.InteropService()
# message provider
script_container = None
message = test.get("message", None)
if message:
script_container = MessageProvider(message)
# prepare script table
scripts = test.get("scriptTable", None)
script_table = None
if scripts:
script_table = ScriptTable()
for entry in scripts:
try:
script = binascii.unhexlify(entry['script'][2:])
script_table.Add(script)
except binascii.Error:
print(f"Skipping test {data['category']}-{data['name']}, cannot read script data")
test_count -= 1
skipped_test_count += 1
continue
# create engine and run
engine = ExecutionEngine(crypto=Crypto.Default(), service=service, container=script_container, table=script_table, exit_on_error=True)
debugger = Debugger(engine)
# TODO: should enforce 0x<data> rule in the JSON test case
if test['script'].startswith('0x'):
script = test['script'][2:]
else:
script = test['script']
try:
script = binascii.unhexlify(script)
except binascii.Error:
print(f"Skipping test {data['category']}-{data['name']}, cannot read script data")
test_count -= 1
skipped_test_count += 1
continue
engine.LoadScript(script)
steps = test.get('steps', None)
if steps is None:
continue
for i, step in enumerate(steps):
actions = step.get('actions', [])
for action in actions:
if action == "StepInto":
debugger.StepInto()
elif action == "Execute":
debugger.Execute()
elif action == "StepOver":
debugger.StepOver()
elif action == "StepOut":
debugger.StepOut()
test_name = test.get("name", "")
msg = f"{data['category']}-{data['name']}-{test_name}-{i}"
assert_result(engine, step['result'], msg)
def assert_result(engine: ExecutionEngine, result: dict, msg: str):
state = VMStateStr(engine.State)
assert state.lower() == result['state'].lower(), f"[{msg}] State differs! Expected: {result['state']} Actual: {state}"
invocation_stack = result.get("invocationStack", None)
if invocation_stack:
assert_invocation_stack(engine.InvocationStack, invocation_stack, msg)
result_stack = result.get("resultStack", None)
if result_stack:
assert_stack_result(engine.ResultStack, result_stack, msg)
def assert_invocation_stack(istack: RandomAccessStack, result: dict, msg: str):
assert istack.Count == len(result), f"[{msg}] Invocation stack size differs! Expected: {len(result)} Actual: {istack.Count}"
for expected_context, actual_context in zip(result, reversed(istack.Items)): # type: ExecutionContext
expected_script_hash = expected_context['scriptHash'][2:].lower()
actual_script_hash = binascii.hexlify(actual_context.ScriptHash()).decode()
assert actual_script_hash == expected_script_hash, f"[{msg}] Script hash differs! Expected: {expected_script_hash} Actual: {actual_script_hash}"
opcode = RET if actual_context.InstructionPointer >= actual_context.Script.Length else actual_context.Script[actual_context.InstructionPointer]
expected_next_instruction = expected_context['nextInstruction']
# hack to work around C#'s lack of having defined enum members for PUSHBYTES2-PUSHBYTES74
# TODO: remove this once neo-vm is updated to have human readable names for the above enum members
if expected_next_instruction.isdecimal():
expected_next_instruction = OpcodeToName(int(expected_next_instruction))
actual_next_instruction = OpcodeToName(opcode)
assert actual_next_instruction == expected_next_instruction, f"[{msg}] Next instruction differs! Expected: {expected_next_instruction} Actual: {actual_next_instruction}"
expected_ip = expected_context['instructionPointer']
actual_ip = actual_context.InstructionPointer
assert actual_ip == expected_ip, f"[{msg}] Instruction pointer differs! Expected: {expected_ip} Actual: {actual_ip}"
eval_stack = expected_context.get("evaluationStack", None)
if eval_stack:
assert_stack_result(actual_context.EvaluationStack, eval_stack, msg)
alt_stack = expected_context.get("altStack", None)
if alt_stack:
assert_stack_result(actual_context.AltStack, alt_stack, msg)
def assert_stack_result(stack: RandomAccessStack, result: dict, msg: str):
assert stack.Count == len(result), f"[{msg}] Stack size differs! Expected: {len(result)} Actual: {stack.Count}"
for i, (expected_item, actual_item) in enumerate(zip(result, reversed(stack.Items))):
prepared_testvector = prepare_testvector(expected_item)
prepared_item = prepare_stackitem(actual_item)
assert prepared_item == prepared_testvector, f"[{msg}] Stack item differs! Expected: {prepared_testvector} Actual: {prepared_item}"
def prepare_testvector(item):
itype = item['type']
if itype in ["Array", "Struct"]:
new_value = []
for entry in item['value']:
new_value.append(prepare_testvector(entry))
return (itype, new_value)
elif itype == "Boolean":
return (itype, item['value'])
elif itype == "ByteArray":
value = bytearray.fromhex(item['value'][2:])
return (itype, value)
elif itype == "Integer":
return (itype, int(item['value']))
elif itype == "Interop":
return (itype, item['value'])
elif itype == "Map":
return (itype, item['value'])
else:
raise Exception(f"No handling for type: {itype}")
def prepare_stackitem(item):
if isinstance(item, InteropService.Struct):
# has to come before Array as it subclasses Array, otherwise we'll tag it wrong
new_value = []
for i in item.GetArray():
new_value.append(prepare_stackitem(i))
return ("Struct", new_value)
elif isinstance(item, InteropService.Array):
new_value = []
for i in item.GetArray():
new_value.append(prepare_stackitem(i))
return ("Array", new_value)
elif isinstance(item, InteropService.Boolean):
return ("Boolean", item.GetBoolean())
elif isinstance(item, InteropService.ByteArray):
return ("ByteArray", item.GetByteArray())
elif isinstance(item, InteropService.Integer):
return ("Integer", item.GetBigInteger())
elif isinstance(item, InteropService.InteropInterface):
obj = item.GetInterface()
return ("Interop", obj.__class__.__name__)
elif isinstance(item, InteropService.Map):
# TODO: implement once there is a reference test case that does not return an empty dictionary
return ("Map", {})
else:
raise Exception(f"No handling for type: {item}")
if __name__ == "__main__":
# Note: running from main requires manually downloading the tests from the neo-vm project
# and storing them in a folder in the root directory
main()
| |
#!/usr/bin/env python
# Copyright (c) 2002-2011 ActiveState Software Inc. All rights reserved.
"""ActivePython identification module
This can be run as a script to dump version info:
python .../activestate.py
or to relocate this Python installation appropriately (see relocate_python()
for details):
python .../activestate.py --relocate
"""
import sys
#---- ActivePython build/configuration info
version = "2.7.2.5"
version_info = {'bsddb_ver': None,
'build_host': 'apy-win32',
'build_num': 5,
'build_plat_fullname': 'win32-xp-x86',
'build_plat_name': 'win32-x86',
'build_time': 'Fri Jun 24 12:38:05 2011',
'bzip2_ver': (1, 0, 5),
'compiler': 'vc9-x86',
'configuration': ['-f',
'apyconfig-apy27-rrun.py',
'-p',
'apy27',
'--build-tag',
'rrun'],
'openssl_ver': (0, 9, 8, 'r'),
'platinfo': {'arch': 'x86',
'name': 'win32-x86',
'os': 'win32',
'os_csd': 'SP3',
'os_name': 'XP',
'os_ver': '5.1.2600'},
'platname': 'win32-x86',
'product_type': 'ActivePython',
'python_src': ('2.7.2', 'path', 'Python-2.7.2.tgz'),
'pywin32_build': '214',
'pywin32_src': ('20111216', 'path', 'pywin32-20111216-CRLF.zip'),
'pywin32_ver': '20111216',
'scm_revision': 'r64662-trunk',
'sqlite3_ver': (3, 6, 21),
'tcltk_ver': (8, 5, 9),
'tix_ver': (8, 4, 3),
'with_bsddb': False,
'with_bzip2': True,
'with_ctypes': True,
'with_docs': True,
'with_pywin32': True,
'with_sqlite3': True,
'with_ssl': True,
'with_tcltk': True,
'with_tests': True,
'zlib_ver': (1, 2, 3)}
compiler_info = """Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 15.00.30729.01 for 80x86"""
# Used for Python install relocation.
prefixes = set([
# Prefix to which extensions were built
'F:\\as\\apy-trunk\\build\\py2_7_2-win32-x86-apy27-rrun\\ExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIxExTAcTiVePyThOnPrEfIx',
# Prefix to which Python sources were built.
'F:\\as\\apy-trunk\\build\\py2_7_2-win32-x86-apy27-rrun\\CoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIxCoReAcTiVePyThOnPrEfIx',
# Prefix to the Python image (sys.prefix)
# (relied by pypm -- for relocation)
'F:\\as\\apy-trunk\\build\\py2_7_2-win32-x86-apy27-rrun\\image\\feature-core\\INSTALLDIR',
])
shortest_original_prefix_length = 261
#---- relocation code
def _is_path_binary(path):
"""Return true iff the given file is binary.
Raises an EnvironmentError if the file does not exist or cannot be
accessed.
"""
fin = open(path, 'rb')
try:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
finally:
fin.close()
return False
def _relocate_path(path, from_prefix, to_prefix, log):
import sys
import os
from os.path import join
import stat
import re
# Determine if this file needs to be relocated.
fin = open(path, 'rb')
try:
content = fin.read()
finally:
fin.close()
is_binary = _is_path_binary(path)
if is_binary:
from_str = join(from_prefix, "lib")
to_str = join(to_prefix, "lib")
else:
from_str = from_prefix
to_str = to_prefix
if sys.version_info[0] >= 3:
from_str = bytes(from_str, 'utf-8')
to_str = bytes(to_str, 'utf-8')
if from_str not in content:
return
# Relocate this file.
log("relocate '%s'" % path)
perm = stat.S_IMODE(os.stat(path).st_mode)
if is_binary:
if sys.platform.startswith("aix"):
# On AIX the lib path _list_ is stored as one string, rather
# than just the one path. This means that the integrity of
# the path list must be maintained by separating with ':'.
# We also change the remainder to all x's to ensure it is
# a bogus path.
to_str = join(to_prefix, "lib") \
+ ':' + "x"*(len(from_prefix)-len(to_prefix)-1)
if sys.version_info[0] >= 3:
to_str = bytes(to_str, 'utf-8')
#log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s",
# % (len(from_str), from_str, len(to_str), to_str))
content = content.replace(from_str, to_str)
else:
# Replace 'from_str' with 'to_str' in a null-terminated string.
# Make sure to properly correct for trailing content in the
# same string because:
# - on HP-UX sometimes a full path to the shared lib is stored:
# <from_str>/libtcl8.4.sl\0
# - on AIX a path _list_ is stored:
# <from_str>:other/lib/paths\0
# NOTE: This *should* work on AIX, AFAICT, but it does
# *not*. See above for special handling for AIX.
#TODO: should this regex use re.DOTALL flag?
pattern = re.compile(re.escape(from_str) + "([^\0]*)\0")
def c_string_replace(match, before=from_str, after=to_str):
lendiff = len(before) - len(after)
s = after + match.group(1) + ("\0" * lendiff) + "\0"
# Encode nulls as '0' instead of '\x00' so one can see
# the before and after strings line up.
#log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s",
# % (len(match.group(0)),
# repr(match.group(0)).replace("\\x00", '0'),
# len(s),
# repr(s).replace("\\x00", '0')))
return s
content = pattern.sub(c_string_replace, content)
else:
#log("replace (length %d)\n\t%s\nwith (length %d)\n\t%s",
# % (len(from_str), from_str, len(to_str), to_str))
content = content.replace(from_str, to_str)
# Sometimes get the following error. Avoid it by removing file first.
# IOError: [Errno 26] Text file busy: '$path'
os.remove(path)
fout = open(path, 'wb')
try:
fout.write(content)
finally:
fout.close()
os.chmod(path, perm) # restore permissions
def relocate_python(install_prefix, verbose=False):
"""Relocate this Python installation.
"Relocation" involves updating hardcoded shebang lines in Python scripts
and (on some platforms) binary patching of built-in runtime-lib-paths
to point to the given install prefix.
"""
import sys
import os
from os.path import isabs, join, splitext
if verbose:
def log(s):
sys.stderr.write(s+"\n")
else:
def log(s):
pass
assert isabs(install_prefix)
if len(install_prefix) > shortest_original_prefix_length:
raise RuntimeError("cannot properly relocate this Python "
"installation (prefix='%s') because install "
"path (%d chars) is longer than the original "
"build prefix (%d chars)"
% (install_prefix, len(install_prefix),
shortest_original_prefix_length))
log("relocate this Python to '%s'" % install_prefix)
for prefix in prefixes:
if prefix == install_prefix:
continue
for dirpath, dirnames, filenames in os.walk(install_prefix):
for filename in filenames:
if splitext(filename)[1] in (".pyo", ".pyc"):
continue
_relocate_path(join(dirpath, filename),
prefix, install_prefix, log)
#---- mainline
if __name__ == "__main__":
if "--relocate" in sys.argv:
# Determine the install_prefix holding this module and relocate
# that Python installation.
if sys.platform == "win32":
raise RuntimeError("relocating a Python install isn't "
"necessary on Windows")
# <prefix>\lib\pythonX.Y\site-packages\activestate.py
from os.path import dirname, exists, join, basename, abspath
install_prefix = dirname(dirname(dirname(dirname(abspath(__file__)))))
python_exe = join(install_prefix, "bin", "python")
if not exists(python_exe):
raise RuntimeError("'%s' does not exist: it doesn't look like "
"'%s' is in a Python site-packages dir"
% (python_exe, basename(__file__)))
del python_exe, dirname, exists, join, basename, abspath
relocate_python(install_prefix, True)
else:
for key, value in sorted(version_info.items()):
if value is None: continue
if key.endswith("_src"): continue
if key in ("platinfo", "configuration"): continue
print("%s: %s" % (key, value))
| |
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
from .validators.acmpca import (
validate_certificateauthority_type,
validate_key_algorithm,
validate_signing_algorithm,
validate_validity_type,
)
class ExtendedKeyUsage(AWSProperty):
"""
`ExtendedKeyUsage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-extendedkeyusage.html>`__
"""
props: PropsDictType = {
"ExtendedKeyUsageObjectIdentifier": (str, False),
"ExtendedKeyUsageType": (str, False),
}
class EdiPartyName(AWSProperty):
"""
`EdiPartyName <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-edipartyname.html>`__
"""
props: PropsDictType = {
"NameAssigner": (str, True),
"PartyName": (str, True),
}
class OtherName(AWSProperty):
"""
`OtherName <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-othername.html>`__
"""
props: PropsDictType = {
"TypeId": (str, True),
"Value": (str, True),
}
class Subject(AWSProperty):
"""
`Subject <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-subject.html>`__
"""
props: PropsDictType = {
"CommonName": (str, False),
"Country": (str, False),
"DistinguishedNameQualifier": (str, False),
"GenerationQualifier": (str, False),
"GivenName": (str, False),
"Initials": (str, False),
"Locality": (str, False),
"Organization": (str, False),
"OrganizationalUnit": (str, False),
"Pseudonym": (str, False),
"SerialNumber": (str, False),
"State": (str, False),
"Surname": (str, False),
"Title": (str, False),
}
class GeneralName(AWSProperty):
"""
`GeneralName <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-generalname.html>`__
"""
props: PropsDictType = {
"DirectoryName": (Subject, False),
"DnsName": (str, False),
"EdiPartyName": (EdiPartyName, False),
"IpAddress": (str, False),
"OtherName": (OtherName, False),
"RegisteredId": (str, False),
"Rfc822Name": (str, False),
"UniformResourceIdentifier": (str, False),
}
class KeyUsage(AWSProperty):
"""
`KeyUsage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-keyusage.html>`__
"""
props: PropsDictType = {
"CRLSign": (boolean, False),
"DataEncipherment": (boolean, False),
"DecipherOnly": (boolean, False),
"DigitalSignature": (boolean, False),
"EncipherOnly": (boolean, False),
"KeyAgreement": (boolean, False),
"KeyCertSign": (boolean, False),
"KeyEncipherment": (boolean, False),
"NonRepudiation": (boolean, False),
}
class Qualifier(AWSProperty):
"""
`Qualifier <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-qualifier.html>`__
"""
props: PropsDictType = {
"CpsUri": (str, True),
}
class PolicyQualifierInfo(AWSProperty):
"""
`PolicyQualifierInfo <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-policyqualifierinfo.html>`__
"""
props: PropsDictType = {
"PolicyQualifierId": (str, True),
"Qualifier": (Qualifier, True),
}
class PolicyInformation(AWSProperty):
"""
`PolicyInformation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-policyinformation.html>`__
"""
props: PropsDictType = {
"CertPolicyId": (str, True),
"PolicyQualifiers": ([PolicyQualifierInfo], False),
}
class Extensions(AWSProperty):
"""
`Extensions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-extensions.html>`__
"""
props: PropsDictType = {
"CertificatePolicies": ([PolicyInformation], False),
"ExtendedKeyUsage": ([ExtendedKeyUsage], False),
"KeyUsage": (KeyUsage, False),
"SubjectAlternativeNames": ([GeneralName], False),
}
class ApiPassthrough(AWSProperty):
"""
`ApiPassthrough <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-apipassthrough.html>`__
"""
props: PropsDictType = {
"Extensions": (Extensions, False),
"Subject": (Subject, False),
}
class Validity(AWSProperty):
"""
`Validity <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificate-validity.html>`__
"""
props: PropsDictType = {
"Type": (validate_validity_type, True),
"Value": (double, True),
}
class Certificate(AWSObject):
"""
`Certificate <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-acmpca-certificate.html>`__
"""
resource_type = "AWS::ACMPCA::Certificate"
props: PropsDictType = {
"ApiPassthrough": (ApiPassthrough, False),
"CertificateAuthorityArn": (str, True),
"CertificateSigningRequest": (str, True),
"SigningAlgorithm": (validate_signing_algorithm, True),
"TemplateArn": (str, False),
"Validity": (Validity, True),
"ValidityNotBefore": (Validity, False),
}
class AccessMethod(AWSProperty):
"""
`AccessMethod <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-accessmethod.html>`__
"""
props: PropsDictType = {
"AccessMethodType": (str, False),
"CustomObjectIdentifier": (str, False),
}
class AccessDescription(AWSProperty):
"""
`AccessDescription <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-accessdescription.html>`__
"""
props: PropsDictType = {
"AccessLocation": (GeneralName, True),
"AccessMethod": (AccessMethod, True),
}
class CsrExtensions(AWSProperty):
"""
`CsrExtensions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-csrextensions.html>`__
"""
props: PropsDictType = {
"KeyUsage": (KeyUsage, False),
"SubjectInformationAccess": ([AccessDescription], False),
}
class CrlConfiguration(AWSProperty):
"""
`CrlConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-crlconfiguration.html>`__
"""
props: PropsDictType = {
"CustomCname": (str, False),
"Enabled": (boolean, False),
"ExpirationInDays": (integer, False),
"S3BucketName": (str, False),
"S3ObjectAcl": (str, False),
}
class OcspConfiguration(AWSProperty):
"""
`OcspConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-ocspconfiguration.html>`__
"""
props: PropsDictType = {
"Enabled": (boolean, False),
"OcspCustomCname": (str, False),
}
class RevocationConfiguration(AWSProperty):
"""
`RevocationConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-acmpca-certificateauthority-revocationconfiguration.html>`__
"""
props: PropsDictType = {
"CrlConfiguration": (CrlConfiguration, False),
"OcspConfiguration": (OcspConfiguration, False),
}
class CertificateAuthority(AWSObject):
"""
`CertificateAuthority <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-acmpca-certificateauthority.html>`__
"""
resource_type = "AWS::ACMPCA::CertificateAuthority"
props: PropsDictType = {
"CsrExtensions": (CsrExtensions, False),
"KeyAlgorithm": (validate_key_algorithm, True),
"KeyStorageSecurityStandard": (str, False),
"RevocationConfiguration": (RevocationConfiguration, False),
"SigningAlgorithm": (validate_signing_algorithm, True),
"Subject": (Subject, True),
"Tags": (Tags, False),
"Type": (validate_certificateauthority_type, True),
}
class CertificateAuthorityActivation(AWSObject):
"""
`CertificateAuthorityActivation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-acmpca-certificateauthorityactivation.html>`__
"""
resource_type = "AWS::ACMPCA::CertificateAuthorityActivation"
props: PropsDictType = {
"Certificate": (str, True),
"CertificateAuthorityArn": (str, True),
"CertificateChain": (str, False),
"Status": (str, False),
}
class Permission(AWSObject):
"""
`Permission <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-acmpca-permission.html>`__
"""
resource_type = "AWS::ACMPCA::Permission"
props: PropsDictType = {
"Actions": ([str], True),
"CertificateAuthorityArn": (str, True),
"Principal": (str, True),
"SourceAccount": (str, False),
}
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import six.moves.cPickle as pickle
import os
import sys
import unittest
import stat
from contextlib import closing
from gzip import GzipFile
from tempfile import mkdtemp
from shutil import rmtree
from time import sleep, time
from six.moves import range
from swift.common import ring, utils
class TestRingBase(unittest.TestCase):
def setUp(self):
self._orig_hash_suffix = utils.HASH_PATH_SUFFIX
self._orig_hash_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
def tearDown(self):
utils.HASH_PATH_SUFFIX = self._orig_hash_suffix
utils.HASH_PATH_PREFIX = self._orig_hash_prefix
class TestRingData(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(os.path.dirname(__file__), 'ring_data')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def assert_ring_data_equal(self, rd_expected, rd_got):
self.assertEqual(rd_expected._replica2part2dev_id,
rd_got._replica2part2dev_id)
self.assertEqual(rd_expected.devs, rd_got.devs)
self.assertEqual(rd_expected._part_shift, rd_got._part_shift)
def test_attrs(self):
r2p2d = [[0, 1, 0, 1], [0, 1, 0, 1]]
d = [{'id': 0, 'zone': 0, 'region': 0, 'ip': '10.1.1.0', 'port': 7000},
{'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1', 'port': 7000}]
s = 30
rd = ring.RingData(r2p2d, d, s)
self.assertEqual(rd._replica2part2dev_id, r2p2d)
self.assertEqual(rd.devs, d)
self.assertEqual(rd._part_shift, s)
def test_can_load_pickled_ring_data(self):
rd = ring.RingData(
[[0, 1, 0, 1], [0, 1, 0, 1]],
[{'id': 0, 'zone': 0, 'ip': '10.1.1.0', 'port': 7000},
{'id': 1, 'zone': 1, 'ip': '10.1.1.1', 'port': 7000}],
30)
ring_fname = os.path.join(self.testdir, 'foo.ring.gz')
for p in range(pickle.HIGHEST_PROTOCOL):
with closing(GzipFile(ring_fname, 'wb')) as f:
pickle.dump(rd, f, protocol=p)
meta_only = ring.RingData.load(ring_fname, metadata_only=True)
self.assertEqual([
{'id': 0, 'zone': 0, 'region': 1, 'ip': '10.1.1.0',
'port': 7000},
{'id': 1, 'zone': 1, 'region': 1, 'ip': '10.1.1.1',
'port': 7000},
], meta_only.devs)
# Pickled rings can't load only metadata, so you get it all
self.assert_ring_data_equal(rd, meta_only)
ring_data = ring.RingData.load(ring_fname)
self.assert_ring_data_equal(rd, ring_data)
def test_roundtrip_serialization(self):
ring_fname = os.path.join(self.testdir, 'foo.ring.gz')
rd = ring.RingData(
[array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])],
[{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30)
rd.save(ring_fname)
meta_only = ring.RingData.load(ring_fname, metadata_only=True)
self.assertEqual([
{'id': 0, 'zone': 0, 'region': 1},
{'id': 1, 'zone': 1, 'region': 1},
], meta_only.devs)
self.assertEqual([], meta_only._replica2part2dev_id)
rd2 = ring.RingData.load(ring_fname)
self.assert_ring_data_equal(rd, rd2)
def test_deterministic_serialization(self):
"""
Two identical rings should produce identical .gz files on disk.
Only true on Python 2.7 or greater.
"""
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
return
os.mkdir(os.path.join(self.testdir, '1'))
os.mkdir(os.path.join(self.testdir, '2'))
# These have to have the same filename (not full path,
# obviously) since the filename gets encoded in the gzip data.
ring_fname1 = os.path.join(self.testdir, '1', 'the.ring.gz')
ring_fname2 = os.path.join(self.testdir, '2', 'the.ring.gz')
rd = ring.RingData(
[array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])],
[{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30)
rd.save(ring_fname1)
rd.save(ring_fname2)
with open(ring_fname1) as ring1:
with open(ring_fname2) as ring2:
self.assertEqual(ring1.read(), ring2.read())
def test_permissions(self):
ring_fname = os.path.join(self.testdir, 'stat.ring.gz')
rd = ring.RingData(
[array.array('H', [0, 1, 0, 1]), array.array('H', [0, 1, 0, 1])],
[{'id': 0, 'zone': 0}, {'id': 1, 'zone': 1}], 30)
rd.save(ring_fname)
self.assertEqual(oct(stat.S_IMODE(os.stat(ring_fname).st_mode)),
'0644')
class TestRing(TestRingBase):
def setUp(self):
super(TestRing, self).setUp()
self.testdir = mkdtemp()
self.testgz = os.path.join(self.testdir, 'whatever.ring.gz')
self.intended_replica2part2dev_id = [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])]
self.intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.0.1',
'replication_port': 6066},
{'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.0.2',
'replication_port': 6066},
None,
{'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'replication_ip': '10.2.0.1',
'replication_port': 6066},
{'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'replication_ip': '10.2.0.1',
'replication_port': 6066}]
self.intended_part_shift = 30
self.intended_reload_time = 15
ring.RingData(
self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift).save(self.testgz)
self.ring = ring.Ring(
self.testdir,
reload_time=self.intended_reload_time, ring_name='whatever')
def tearDown(self):
super(TestRing, self).tearDown()
rmtree(self.testdir, ignore_errors=1)
def test_creation(self):
self.assertEqual(self.ring._replica2part2dev_id,
self.intended_replica2part2dev_id)
self.assertEqual(self.ring._part_shift, self.intended_part_shift)
self.assertEqual(self.ring.devs, self.intended_devs)
self.assertEqual(self.ring.reload_time, self.intended_reload_time)
self.assertEqual(self.ring.serialized_path, self.testgz)
# test invalid endcap
_orig_hash_path_suffix = utils.HASH_PATH_SUFFIX
_orig_hash_path_prefix = utils.HASH_PATH_PREFIX
_orig_swift_conf_file = utils.SWIFT_CONF_FILE
try:
utils.HASH_PATH_SUFFIX = ''
utils.HASH_PATH_PREFIX = ''
utils.SWIFT_CONF_FILE = ''
self.assertRaises(SystemExit, ring.Ring, self.testdir, 'whatever')
finally:
utils.HASH_PATH_SUFFIX = _orig_hash_path_suffix
utils.HASH_PATH_PREFIX = _orig_hash_path_prefix
utils.SWIFT_CONF_FILE = _orig_swift_conf_file
def test_has_changed(self):
self.assertEqual(self.ring.has_changed(), False)
os.utime(self.testgz, (time() + 60, time() + 60))
self.assertEqual(self.ring.has_changed(), True)
def test_reload(self):
os.utime(self.testgz, (time() - 300, time() - 300))
self.ring = ring.Ring(self.testdir, reload_time=0.001,
ring_name='whatever')
orig_mtime = self.ring._mtime
self.assertEqual(len(self.ring.devs), 5)
self.intended_devs.append(
{'id': 3, 'region': 0, 'zone': 3, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 9876})
ring.RingData(
self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift).save(self.testgz)
sleep(0.1)
self.ring.get_nodes('a')
self.assertEqual(len(self.ring.devs), 6)
self.assertNotEqual(self.ring._mtime, orig_mtime)
os.utime(self.testgz, (time() - 300, time() - 300))
self.ring = ring.Ring(self.testdir, reload_time=0.001,
ring_name='whatever')
orig_mtime = self.ring._mtime
self.assertEqual(len(self.ring.devs), 6)
self.intended_devs.append(
{'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0,
'ip': '10.5.5.5', 'port': 9876})
ring.RingData(
self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift).save(self.testgz)
sleep(0.1)
self.ring.get_part_nodes(0)
self.assertEqual(len(self.ring.devs), 7)
self.assertNotEqual(self.ring._mtime, orig_mtime)
os.utime(self.testgz, (time() - 300, time() - 300))
self.ring = ring.Ring(self.testdir, reload_time=0.001,
ring_name='whatever')
orig_mtime = self.ring._mtime
part, nodes = self.ring.get_nodes('a')
self.assertEqual(len(self.ring.devs), 7)
self.intended_devs.append(
{'id': 6, 'region': 0, 'zone': 5, 'weight': 1.0,
'ip': '10.6.6.6', 'port': 6000})
ring.RingData(
self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift).save(self.testgz)
sleep(0.1)
next(self.ring.get_more_nodes(part))
self.assertEqual(len(self.ring.devs), 8)
self.assertNotEqual(self.ring._mtime, orig_mtime)
os.utime(self.testgz, (time() - 300, time() - 300))
self.ring = ring.Ring(self.testdir, reload_time=0.001,
ring_name='whatever')
orig_mtime = self.ring._mtime
self.assertEqual(len(self.ring.devs), 8)
self.intended_devs.append(
{'id': 5, 'region': 0, 'zone': 4, 'weight': 1.0,
'ip': '10.5.5.5', 'port': 6000})
ring.RingData(
self.intended_replica2part2dev_id,
self.intended_devs, self.intended_part_shift).save(self.testgz)
sleep(0.1)
self.assertEqual(len(self.ring.devs), 9)
self.assertNotEqual(self.ring._mtime, orig_mtime)
def test_reload_without_replication(self):
replication_less_devs = [{'id': 0, 'region': 0, 'zone': 0,
'weight': 1.0, 'ip': '10.1.1.1',
'port': 6000},
{'id': 1, 'region': 0, 'zone': 0,
'weight': 1.0, 'ip': '10.1.1.1',
'port': 6000},
None,
{'id': 3, 'region': 0, 'zone': 2,
'weight': 1.0, 'ip': '10.1.2.1',
'port': 6000},
{'id': 4, 'region': 0, 'zone': 2,
'weight': 1.0, 'ip': '10.1.2.2',
'port': 6000}]
intended_devs = [{'id': 0, 'region': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.1.1',
'replication_port': 6000},
{'id': 1, 'region': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.1.1',
'replication_port': 6000},
None,
{'id': 3, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'replication_ip': '10.1.2.1',
'replication_port': 6000},
{'id': 4, 'region': 0, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'replication_ip': '10.1.2.2',
'replication_port': 6000}]
testgz = os.path.join(self.testdir, 'without_replication.ring.gz')
ring.RingData(
self.intended_replica2part2dev_id,
replication_less_devs, self.intended_part_shift).save(testgz)
self.ring = ring.Ring(
self.testdir,
reload_time=self.intended_reload_time,
ring_name='without_replication')
self.assertEqual(self.ring.devs, intended_devs)
def test_reload_old_style_pickled_ring(self):
devs = [{'id': 0, 'zone': 0,
'weight': 1.0, 'ip': '10.1.1.1',
'port': 6000},
{'id': 1, 'zone': 0,
'weight': 1.0, 'ip': '10.1.1.1',
'port': 6000},
None,
{'id': 3, 'zone': 2,
'weight': 1.0, 'ip': '10.1.2.1',
'port': 6000},
{'id': 4, 'zone': 2,
'weight': 1.0, 'ip': '10.1.2.2',
'port': 6000}]
intended_devs = [{'id': 0, 'region': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.1.1',
'replication_port': 6000},
{'id': 1, 'region': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'replication_ip': '10.1.1.1',
'replication_port': 6000},
None,
{'id': 3, 'region': 1, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'replication_ip': '10.1.2.1',
'replication_port': 6000},
{'id': 4, 'region': 1, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'replication_ip': '10.1.2.2',
'replication_port': 6000}]
# simulate an old-style pickled ring
testgz = os.path.join(self.testdir,
'without_replication_or_region.ring.gz')
ring_data = ring.RingData(self.intended_replica2part2dev_id,
devs,
self.intended_part_shift)
# an old-style pickled ring won't have region data
for dev in ring_data.devs:
if dev:
del dev["region"]
gz_file = GzipFile(testgz, 'wb')
pickle.dump(ring_data, gz_file, protocol=2)
gz_file.close()
self.ring = ring.Ring(
self.testdir,
reload_time=self.intended_reload_time,
ring_name='without_replication_or_region')
self.assertEqual(self.ring.devs, intended_devs)
def test_get_part(self):
part1 = self.ring.get_part('a')
nodes1 = self.ring.get_part_nodes(part1)
part2, nodes2 = self.ring.get_nodes('a')
self.assertEqual(part1, part2)
self.assertEqual(nodes1, nodes2)
def test_get_part_nodes(self):
part, nodes = self.ring.get_nodes('a')
self.assertEqual(nodes, self.ring.get_part_nodes(part))
def test_get_nodes(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results the ring produces, they know it.
self.assertRaises(TypeError, self.ring.get_nodes)
part, nodes = self.ring.get_nodes('a')
self.assertEqual(part, 0)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a1')
self.assertEqual(part, 0)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a4')
self.assertEqual(part, 1)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[1],
self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('aa')
self.assertEqual(part, 1)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[1],
self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c1')
self.assertEqual(part, 0)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c0')
self.assertEqual(part, 3)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[1],
self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c3')
self.assertEqual(part, 2)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c2')
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o1')
self.assertEqual(part, 1)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[1],
self.intended_devs[4]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o5')
self.assertEqual(part, 0)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o0')
self.assertEqual(part, 0)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
part, nodes = self.ring.get_nodes('a', 'c', 'o2')
self.assertEqual(part, 2)
self.assertEqual(nodes, [dict(node, index=i) for i, node in
enumerate([self.intended_devs[0],
self.intended_devs[3]])])
def add_dev_to_ring(self, new_dev):
self.ring.devs.append(new_dev)
self.ring._rebuild_tier_data()
def test_get_more_nodes(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someone changes the results the ring produces, they know it.
exp_part = 6
exp_devs = [71, 77, 30]
exp_zones = set([6, 3, 7])
exp_handoffs = [99, 43, 94, 13, 1, 49, 60, 72, 27, 68, 78, 26, 21, 9,
51, 105, 47, 89, 65, 82, 34, 98, 38, 85, 16, 4, 59,
102, 40, 90, 20, 8, 54, 66, 80, 25, 14, 2, 50, 12, 0,
48, 70, 76, 32, 107, 45, 87, 101, 44, 93, 100, 42, 95,
106, 46, 88, 97, 37, 86, 96, 36, 84, 17, 5, 57, 63,
81, 33, 67, 79, 24, 15, 3, 58, 69, 75, 31, 61, 74, 29,
23, 10, 52, 22, 11, 53, 64, 83, 35, 62, 73, 28, 18, 6,
56, 104, 39, 91, 103, 41, 92, 19, 7, 55]
exp_first_handoffs = [23, 64, 105, 102, 67, 17, 99, 65, 69, 97, 15,
17, 24, 98, 66, 65, 69, 18, 104, 105, 16, 107,
100, 15, 14, 19, 102, 105, 63, 104, 99, 12, 107,
99, 16, 105, 71, 15, 15, 63, 63, 99, 21, 68, 20,
64, 96, 21, 98, 19, 68, 99, 15, 69, 62, 100, 96,
102, 17, 62, 13, 61, 102, 105, 22, 16, 21, 18,
21, 100, 20, 16, 21, 106, 66, 106, 16, 99, 16,
22, 62, 60, 99, 69, 18, 23, 104, 98, 106, 61,
21, 23, 23, 16, 67, 71, 101, 16, 64, 66, 70, 15,
102, 63, 19, 98, 18, 106, 101, 100, 62, 63, 98,
18, 13, 97, 23, 22, 100, 13, 14, 67, 96, 14,
105, 97, 71, 64, 96, 22, 65, 66, 98, 19, 105,
98, 97, 21, 15, 69, 100, 98, 106, 65, 66, 97,
62, 22, 68, 63, 61, 67, 67, 20, 105, 106, 105,
18, 71, 100, 17, 62, 60, 13, 103, 99, 101, 96,
97, 16, 60, 21, 14, 20, 12, 60, 69, 104, 65, 65,
17, 16, 67, 13, 64, 15, 16, 68, 96, 21, 104, 66,
96, 105, 58, 105, 103, 21, 96, 60, 16, 96, 21,
71, 16, 99, 101, 63, 62, 103, 18, 102, 60, 17,
19, 106, 97, 14, 99, 68, 102, 13, 70, 103, 21,
22, 19, 61, 103, 23, 104, 65, 62, 68, 16, 65,
15, 102, 102, 71, 99, 63, 67, 19, 23, 15, 69,
107, 14, 13, 64, 13, 105, 15, 98, 69]
rb = ring.RingBuilder(8, 3, 1)
next_dev_id = 0
for zone in range(1, 10):
for server in range(1, 5):
for device in range(1, 4):
rb.add_dev({'id': next_dev_id,
'ip': '1.2.%d.%d' % (zone, server),
'port': 1234 + device,
'zone': zone, 'region': 0,
'weight': 1.0})
next_dev_id += 1
rb.rebalance(seed=2)
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
# every part has the same number of handoffs
part_handoff_counts = set()
for part in range(r.partition_count):
part_handoff_counts.add(len(list(r.get_more_nodes(part))))
self.assertEqual(part_handoff_counts, {105})
# which less the primaries - is every device in the ring
self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 105)
part, devs = r.get_nodes('a', 'c', 'o')
primary_zones = set([d['zone'] for d in devs])
self.assertEqual(part, exp_part)
self.assertEqual([d['id'] for d in devs], exp_devs)
self.assertEqual(primary_zones, exp_zones)
devs = list(r.get_more_nodes(part))
self.assertEqual(len(devs), len(exp_handoffs))
dev_ids = [d['id'] for d in devs]
self.assertEqual(dev_ids, exp_handoffs)
# The first 6 replicas plus the 3 primary nodes should cover all 9
# zones in this test
seen_zones = set(primary_zones)
seen_zones.update([d['zone'] for d in devs[:6]])
self.assertEqual(seen_zones, set(range(1, 10)))
# The first handoff nodes for each partition in the ring
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
self.assertEqual(devs, exp_first_handoffs)
# Add a new device we can handoff to.
zone = 5
server = 0
rb.add_dev({'id': next_dev_id,
'ip': '1.2.%d.%d' % (zone, server),
'port': 1234, 'zone': zone, 'region': 0, 'weight': 1.0})
next_dev_id += 1
rb.pretend_min_part_hours_passed()
num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=2)
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
# so now we expect the device list to be longer by one device
part_handoff_counts = set()
for part in range(r.partition_count):
part_handoff_counts.add(len(list(r.get_more_nodes(part))))
self.assertEqual(part_handoff_counts, {106})
self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 106)
# I don't think there's any special reason this dev goes at this index
exp_handoffs.insert(27, rb.devs[-1]['id'])
# We would change expectations here, but in this part only the added
# device changed at all.
part, devs = r.get_nodes('a', 'c', 'o')
primary_zones = set([d['zone'] for d in devs])
self.assertEqual(part, exp_part)
self.assertEqual([d['id'] for d in devs], exp_devs)
self.assertEqual(primary_zones, exp_zones)
devs = list(r.get_more_nodes(part))
dev_ids = [d['id'] for d in devs]
self.assertEqual(len(dev_ids), len(exp_handoffs))
for index, dev in enumerate(dev_ids):
self.assertEqual(
dev, exp_handoffs[index],
'handoff differs at position %d\n%s\n%s' % (
index, dev_ids[index:], exp_handoffs[index:]))
# The handoffs still cover all the non-primary zones first
seen_zones = set(primary_zones)
seen_zones.update([d['zone'] for d in devs[:6]])
self.assertEqual(seen_zones, set(range(1, 10)))
# Change expectations for the rest of the parts
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
changed_first_handoff = 0
for part in range(r.partition_count):
if devs[part] != exp_first_handoffs[part]:
changed_first_handoff += 1
exp_first_handoffs[part] = devs[part]
self.assertEqual(devs, exp_first_handoffs)
self.assertEqual(changed_first_handoff, num_parts_changed)
# Remove a device - no need to fluff min_part_hours.
rb.remove_dev(0)
num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=1)
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
# so now we expect the device list to be shorter by one device
part_handoff_counts = set()
for part in range(r.partition_count):
part_handoff_counts.add(len(list(r.get_more_nodes(part))))
self.assertEqual(part_handoff_counts, {105})
self.assertEqual(len(list(rb._iter_devs())) - rb.replicas, 105)
# Change expectations for our part
exp_handoffs.remove(0)
first_matches = 0
total_changed = 0
devs = list(d['id'] for d in r.get_more_nodes(exp_part))
for i, part in enumerate(devs):
if exp_handoffs[i] != devs[i]:
total_changed += 1
exp_handoffs[i] = devs[i]
if not total_changed:
first_matches += 1
self.assertEqual(devs, exp_handoffs)
# the first 21 handoffs were the same across the rebalance
self.assertEqual(first_matches, 21)
# but as you dig deeper some of the differences show up
self.assertEqual(total_changed, 41)
# Change expectations for the rest of the parts
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
changed_first_handoff = 0
for part in range(r.partition_count):
if devs[part] != exp_first_handoffs[part]:
changed_first_handoff += 1
exp_first_handoffs[part] = devs[part]
self.assertEqual(devs, exp_first_handoffs)
self.assertEqual(changed_first_handoff, num_parts_changed)
# Test
part, devs = r.get_nodes('a', 'c', 'o')
primary_zones = set([d['zone'] for d in devs])
self.assertEqual(part, exp_part)
self.assertEqual([d['id'] for d in devs], exp_devs)
self.assertEqual(primary_zones, exp_zones)
devs = list(r.get_more_nodes(part))
dev_ids = [d['id'] for d in devs]
self.assertEqual(len(dev_ids), len(exp_handoffs))
for index, dev in enumerate(dev_ids):
self.assertEqual(
dev, exp_handoffs[index],
'handoff differs at position %d\n%s\n%s' % (
index, dev_ids[index:], exp_handoffs[index:]))
seen_zones = set(primary_zones)
seen_zones.update([d['zone'] for d in devs[:6]])
self.assertEqual(seen_zones, set(range(1, 10)))
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
for part in range(r.partition_count):
self.assertEqual(
devs[part], exp_first_handoffs[part],
'handoff for partitition %d is now device id %d' % (
part, devs[part]))
# Add a partial replica
rb.set_replicas(3.5)
num_parts_changed, _balance, _removed_dev = rb.rebalance(seed=164)
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
# Change expectations
# We have another replica now
exp_devs.append(90)
exp_zones.add(8)
# and therefore one less handoff
exp_handoffs = exp_handoffs[:-1]
# Caused some major changes in the sequence of handoffs for our test
# partition, but at least the first stayed the same.
devs = list(d['id'] for d in r.get_more_nodes(exp_part))
first_matches = 0
total_changed = 0
for i, part in enumerate(devs):
if exp_handoffs[i] != devs[i]:
total_changed += 1
exp_handoffs[i] = devs[i]
if not total_changed:
first_matches += 1
# most seeds seem to throw out first handoff stabilization with
# replica_count change
self.assertEqual(first_matches, 2)
# and lots of other handoff changes...
self.assertEqual(total_changed, 95)
self.assertEqual(devs, exp_handoffs)
# Change expectations for the rest of the parts
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
changed_first_handoff = 0
for part in range(r.partition_count):
if devs[part] != exp_first_handoffs[part]:
changed_first_handoff += 1
exp_first_handoffs[part] = devs[part]
self.assertEqual(devs, exp_first_handoffs)
self.assertLessEqual(changed_first_handoff, num_parts_changed)
# Test
part, devs = r.get_nodes('a', 'c', 'o')
primary_zones = set([d['zone'] for d in devs])
self.assertEqual(part, exp_part)
self.assertEqual([d['id'] for d in devs], exp_devs)
self.assertEqual(primary_zones, exp_zones)
devs = list(r.get_more_nodes(part))
dev_ids = [d['id'] for d in devs]
self.assertEqual(len(dev_ids), len(exp_handoffs))
for index, dev in enumerate(dev_ids):
self.assertEqual(
dev, exp_handoffs[index],
'handoff differs at position %d\n%s\n%s' % (
index, dev_ids[index:], exp_handoffs[index:]))
seen_zones = set(primary_zones)
seen_zones.update([d['zone'] for d in devs[:6]])
self.assertEqual(seen_zones, set(range(1, 10)))
devs = []
for part in range(r.partition_count):
devs.append(next(r.get_more_nodes(part))['id'])
for part in range(r.partition_count):
self.assertEqual(
devs[part], exp_first_handoffs[part],
'handoff for partitition %d is now device id %d' % (
part, devs[part]))
# One last test of a partial replica partition
exp_part2 = 136
exp_devs2 = [70, 76, 32]
exp_zones2 = set([3, 6, 7])
exp_handoffs2 = [89, 97, 37, 53, 20, 1, 86, 64, 102, 40, 90, 60, 72,
27, 99, 68, 78, 26, 105, 45, 42, 95, 22, 13, 49, 55,
11, 8, 83, 16, 4, 59, 33, 108, 61, 74, 29, 88, 66,
80, 25, 100, 39, 67, 79, 24, 65, 96, 36, 84, 54, 21,
63, 81, 56, 71, 77, 30, 48, 23, 10, 52, 82, 34, 17,
107, 87, 104, 5, 35, 2, 50, 43, 62, 73, 28, 18, 14,
98, 38, 85, 15, 57, 9, 51, 12, 6, 91, 3, 103, 41, 92,
47, 75, 44, 69, 101, 93, 106, 46, 94, 31, 19, 7, 58]
part2, devs2 = r.get_nodes('a', 'c', 'o2')
primary_zones2 = set([d['zone'] for d in devs2])
self.assertEqual(part2, exp_part2)
self.assertEqual([d['id'] for d in devs2], exp_devs2)
self.assertEqual(primary_zones2, exp_zones2)
devs2 = list(r.get_more_nodes(part2))
dev_ids2 = [d['id'] for d in devs2]
self.assertEqual(len(dev_ids2), len(exp_handoffs2))
for index, dev in enumerate(dev_ids2):
self.assertEqual(
dev, exp_handoffs2[index],
'handoff differs at position %d\n%s\n%s' % (
index, dev_ids2[index:], exp_handoffs2[index:]))
seen_zones = set(primary_zones2)
seen_zones.update([d['zone'] for d in devs2[:6]])
self.assertEqual(seen_zones, set(range(1, 10)))
# Test distribution across regions
rb.set_replicas(3)
for region in range(1, 5):
rb.add_dev({'id': next_dev_id,
'ip': '1.%d.1.%d' % (region, server), 'port': 1234,
# 108.0 is the weight of all devices created prior to
# this test in region 0; this way all regions have
# equal combined weight
'zone': 1, 'region': region, 'weight': 108.0})
next_dev_id += 1
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=1)
rb.get_ring().save(self.testgz)
r = ring.Ring(self.testdir, ring_name='whatever')
# There's 5 regions now, so the primary nodes + first 2 handoffs
# should span all 5 regions
part, devs = r.get_nodes('a1', 'c1', 'o1')
primary_regions = set([d['region'] for d in devs])
primary_zones = set([(d['region'], d['zone']) for d in devs])
more_devs = list(r.get_more_nodes(part))
seen_regions = set(primary_regions)
seen_regions.update([d['region'] for d in more_devs[:2]])
self.assertEqual(seen_regions, set(range(0, 5)))
# There are 13 zones now, so the first 13 nodes should all have
# distinct zones (that's r0z0, r0z1, ..., r0z8, r1z1, r2z1, r3z1, and
# r4z1).
seen_zones = set(primary_zones)
seen_zones.update([(d['region'], d['zone']) for d in more_devs[:10]])
self.assertEqual(13, len(seen_zones))
# Here's a brittle canary-in-the-coalmine test to make sure the region
# handoff computation didn't change accidentally
exp_handoffs = [111, 112, 35, 58, 62, 74, 20, 105, 41, 90, 53, 6, 3,
67, 55, 76, 108, 32, 12, 80, 38, 85, 94, 42, 27, 99,
50, 47, 70, 87, 26, 9, 15, 97, 102, 81, 23, 65, 33,
77, 34, 4, 75, 8, 5, 30, 13, 73, 36, 92, 54, 51, 72,
78, 66, 1, 48, 14, 93, 95, 88, 86, 84, 106, 60, 101,
57, 43, 89, 59, 79, 46, 61, 52, 44, 45, 37, 68, 25,
100, 49, 24, 16, 71, 96, 21, 107, 98, 64, 39, 18, 29,
103, 91, 22, 63, 69, 28, 56, 11, 82, 10, 17, 19, 7,
40, 83, 104, 31]
dev_ids = [d['id'] for d in more_devs]
self.assertEqual(len(dev_ids), len(exp_handoffs))
for index, dev_id in enumerate(dev_ids):
self.assertEqual(
dev_id, exp_handoffs[index],
'handoff differs at position %d\n%s\n%s' % (
index, dev_ids[index:], exp_handoffs[index:]))
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python3
import tts
import argparse
import os.path
import sys
import codecs
import locale
import _io
import json
import zipfile
import logging
class TTS_CLI:
def __init__(self):
self.preferences=tts.preferences.Preferences()
parser = argparse.ArgumentParser(description="Manipulate Tabletop Simulator files")
parser.add_argument("-d","--directory",help="Override TTS cache directory")
parser.add_argument("-l","--loglevel",help="Set logging level",choices=['debug','info','warn','error'])
subparsers = parser.add_subparsers(dest='parser',title='command',description='Valid commands.')
subparsers.required=True
# add list command
parser_list = subparsers.add_parser('list',help="List installed mods.",description='''
List installed mods.
If no id is provided, then this will return a list of all installed modules.
If an id is provided, then this will list the contents of that modules.
''')
group_list=parser_list.add_mutually_exclusive_group()
group_list.add_argument("-w","--workshop",action="store_const",metavar='save_type',dest='save_type',const=tts.SaveType.workshop,help="List workshop files (the default).")
group_list.add_argument("-s","--save",action="store_const",metavar='save_type',dest='save_type',const=tts.SaveType.save,help="List saves.")
group_list.add_argument("-c","--chest",action="store_const",metavar='save_type',dest='save_type',const=tts.SaveType.chest,help="List chest files.")
parser_list.add_argument("id",nargs='?',help="ID of specific mod to list details of.")
parser_list.set_defaults(func=self.do_list)
# export command
parser_export = subparsers.add_parser('export',help="Export a mod.",description='Export a mod in a format suitible for later import.')
group_export=parser_export.add_mutually_exclusive_group()
group_export.add_argument("-w","--workshop",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.workshop,help="ID is of workshop file (the default).")
group_export.add_argument("-s","--save",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.save,help="ID is of savegame file.")
group_export.add_argument("-c","--chest",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.chest,help="ID is of chest file.")
parser_export.add_argument("id",help="ID of mod/name of savegame to export.")
parser_export.add_argument("-o","--output",help="Location/file to export to.")
parser_export.add_argument("-f","--force",action="store_true",help="Force creation of export file.")
parser_export.add_argument("-d","--download",action="store_true",help="Attempt to download missing cache files. (EXPERIMENTAL)")
parser_export.set_defaults(func=self.do_export)
# import command
parser_import = subparsers.add_parser('import',help="Import a mod.",description="Import an previously exported mod.")
parser_import.add_argument("file",help="Mod pak file to import.")
parser_import.set_defaults(func=self.do_import)
# download command
parser_download = subparsers.add_parser('download',help='Download mod files.',description='Attempt to download any missing files for an installed mod.')
group_download=parser_download.add_mutually_exclusive_group()
group_download.add_argument("-w","--workshop",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.workshop,help="ID is of workshop file.")
group_download.add_argument("-s","--save",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.save,help="ID is of savegame file.")
group_download.add_argument("-c","--chest",action="store_const",dest='save_type',metavar='save_type',const=tts.SaveType.chest,help="ID is of chest file.")
group_download_target=parser_download.add_mutually_exclusive_group(required=True)
group_download_target.add_argument("-a","--all",action="store_true",help="Download all.")
group_download_target.add_argument("id",nargs='?',help="ID of mod/name of savegame to download.")
parser_download.set_defaults(func=self.do_download)
# cache command
parser_cache = subparsers.add_parser('cache',help='Work with the cache.')
subparsers_cache = parser_cache.add_subparsers(dest='parser_cache',title='cache_command',description='Valid sub-commands.')
subparsers_cache.required = True
parser_cache_create = subparsers_cache.add_parser('create',help='(re)create cache directory')
parser_cache_create.set_defaults(func=self.do_cache_create)
# config command
parser_config = subparsers.add_parser('config',help='Configure tts manager.')
subparsers_config = parser_config.add_subparsers(dest='parser_config',title='config_command',description='Valid sub-commands.')
subparsers_config.required = True
parser_config_list = subparsers_config.add_parser('list',help='List configuration.')
parser_config_list.set_defaults(func=self.do_config_list)
parser_config_validate = subparsers_config.add_parser('validate',help='Validate configuration.')
parser_config_validate.set_defaults(func=self.do_config_validate)
parser_config_reset = subparsers_config.add_parser('reset',help='Reset configuration.')
parser_config_reset.set_defaults(func=self.do_config_reset)
parser_config_set = subparsers_config.add_parser('set',help='Set configuration parameters.')
parser_config_set.set_defaults(func=self.do_config_set)
parser_config_set.add_argument("-m","--mod_location",choices=['documents','gamedata'],help="Where mods are stored.")
parser_config_set.add_argument("-t","--tts_location",help="TTS Install directory")
args = parser.parse_args()
# set logging
if args.loglevel:
logmap={
'debug':logging.DEBUG,
'info':logging.INFO,
'warn':logging.WARN,
'error':logging.ERROR
}
tts.logger().setLevel(logmap[args.loglevel])
else:
tts.logger().setLevel(logging.WARN)
# load filesystem values
if args.directory:
self.filesystem = tts.filesystem.FileSystem(os.path.abspath(args.directory))
else:
self.filesystem = self.preferences.get_filesystem()
if (args.parser=='list' or args.parser=='export') and not args.save_type:
# set default
args.save_type = tts.SaveType.workshop
if (args.parser=='config' and args.parser_config=='set' and not args.mod_location and not args.tts_location):
parser_config_set.error("At least one of -m or -t is required.")
rc,message = args.func(args)
if message:
print(message)
sys.exit(rc)
def do_config_set(self,args):
if args.mod_location:
self.preferences.locationIsUser = args.mod_location=='documents'
if args.tts_location:
self.preferences.TTSLocation=args.mod_location
self.preferences.save()
return 0,"Preferences set"
def do_config_reset(self,args):
self.preferences.reset()
return 0,"Preferences Reset."
def do_config_list(self,args):
return 0,self.preferences
def do_config_validate(self,args):
if self.preferences.validate():
return 0,"Configuration validated OK."
else:
return 1,"Configuration failed to validate."
def do_cache_create(self,args):
try:
self.filesystem.create_dirs()
except OSError as exception:
return 1,"OS error: {0}".format(exception)
return 0,"All directories created OK."
def list_by_type(self,save_type):
result=""
for (name,id) in tts.describe_files_by_type(self.filesystem,save_type):
result+="\n%s (%s)" % (name,id)
return 0,result
def list_item(self,data,filename,ident):
if not data:
self.list_installed()
return
save=tts.Save(savedata=data,ident=ident,filename=filename,filesystem=self.filesystem)
return 0,save
def do_download(self,args):
successful=True
if not args.all:
if not args.save_type:
args.save_type=self.filesystem.get_json_filename_type(args.id)
if not args.save_type:
return 1,"Unable to determine type of id %s" % args.id
successful = tts.download_file(self.filesystem,args.id,args.save_type)
else:
if args.save_type:
for ident in self.filesystem.get_filenames_by_type(args.save_type):
if not tts.download_file(self.filesystem,ident,args.save_type):
successful=False
break
else:
for save_type in tts.SaveType:
for ident in self.filesystem.get_filenames_by_type(save_type):
if not tts.download_file(self.filesystem,ident,save_type):
successful=False
break
if successful:
return 0, "All files downloaded."
else:
return 1, "Some files failed to download."
def do_list(self,args):
rc=0
result=None
if not args.id:
rc,result=self.list_by_type(args.save_type)
else:
if not args.save_type:
args.save_type=self.filesystem.get_json_filename_type(args.id)
if not args.save_type:
return 1,"Unable to determine type of id %s" % args.id
filename=self.filesystem.get_json_filename_for_type(args.id,args.save_type)
data=tts.load_json_file(filename)
rc,result=self.list_item(data,filename,args.id)
return rc,result
def do_export(self,args):
filename=None
if args.output:
if os.path.isdir(args.output):
filename=os.path.join(args.output,args.id+".pak")
else:
filename=args.output
else:
filename=args.id+".pak"
data=None
json_filename=None
if not args.save_type:
args.save_type=self.filesystem.get_json_filename_type(args.id)
if not args.save_type:
return 1,"Unable to determine type of id %s" % args.id
json_filename=self.filesystem.get_json_filename_for_type(args.id,args.save_type)
if not json_filename:
return 1, "Unable to find filename for id %s (wrong -s/-w/-c specified?)" % args.id
data=tts.load_json_file(json_filename)
if not data:
return 1, "Unable to load data for file %s" % json_filename
save=tts.Save(savedata=data,
filename=json_filename,
ident=args.id,
save_type=args.save_type,
filesystem=self.filesystem)
if not save.isInstalled:
if not args.download:
return 1, "Unable to find all urls required by %s. Rerun with -d to try and download them or open it within TTS.\n%s" % (args.id,save)
else:
tts.logger().info("Downloading missing files...")
successful = save.download()
if successful:
tts.logger().info("Files downloaded successfully.")
else:
return 1, "Some files failed to download"
if os.path.isfile(filename) and not args.force:
return 1,"%s already exists. Please specify another file or use '-f'" % filename
tts.logger().info("Exporting json file %s to %s" % (args.id,filename))
save.export(filename)
# TODO: exception handling
return 0,"Exported %s to %s" % (args.id,filename)
def do_import(self,args):
if tts.save.importPak(self.filesystem,args.file):
return 0, f"Successfully imported {args.file} into {{TODO}}"
else:
return 1, f"Error importing {args.file}"
if __name__ == "__main__":
# fix windows' poor unicode support
sys.stdout=_io.TextIOWrapper(sys.stdout.buffer,sys.stdout.encoding,'replace',sys.stdout.newlines,sys.stdout.line_buffering)
tts_cli=TTS_CLI()
| |
from numpy import array, mean, linalg, arccos, vdot, isnan, pi, cross, random, ones
import time
from math import atan2
import numpy as np
from matplotlib.collections import PolyCollection
import matplotlib as mpl
#mpl.use('TkAgg')
import matplotlib.pyplot as plt
import pdb
import dxfgrabber
import pickle
import DXFtoSegments
'''
Jeff's notes:
Interactive plotting isn't working on Mac OSX. Also the mpl.use function was
placed after the pyplot import which apparently did nothing.
Default plotting behavior has been changed from True to False
'''
class Vertex(object):
def __init__(self, id, coords):
self.id = id
self.coords = coords
self.region_assoc = set()
self.max_connections = []
self.connections = set()
self.boundary = False
def __str__(self):
return "Vertex " + str(self.id) + " has coordinates " + str(self.coords)
def __repr__(self):
return "{}_{}".format(self.__class__.__name__, self.id)
def associateRegion(self, regionID, corner):
self.region_assoc.append([regionID, corner])
class Edge(object):
def __init__(self, id, Vertex0, Vertex1):
self.id = id
self.Vertex = [Vertex0, Vertex1]
self.region_assoc = []
self.boundary = False
def __str__(self):
return "Edge " + str(self.id) + " has Vertices " + str(self.Vertex[0].id) + " and " + \
str(self.Vertex[1].id)
def __repr__(self):
return "{}_{}".format(self.__class__.__name__, self.id)
def associateRegion(self, regionID, side, direction):
self.region_assoc.append([regionID, side, direction])
def unit_vec(self, starting_vertex):
if starting_vertex == self.Vertex[0]:
return (self.Vertex[1].coords - self.Vertex[0].coords)/\
linalg.norm(self.Vertex[1].coords - self.Vertex[0].coords)
elif starting_vertex == self.Vertex[1]:
return (self.Vertex[0].coords - self.Vertex[1].coords)/\
linalg.norm(self.Vertex[0].coords - self.Vertex[1].coords)
def other_vertex(self, one_vertex):
if one_vertex == self.Vertex[0]:
return self.Vertex[1]
elif one_vertex == self.Vertex[1]:
return self.Vertex[0]
def is_a_vertex(self, one_vertex):
if one_vertex == self.Vertex[0]:
return 0
elif one_vertex == self.Vertex[1]:
return 1
else:
return -1
@property
def mdpt(self):
return ((self.Vertex[0].coords[0]+ self.Vertex[1].coords[0])/2.0, \
(self.Vertex[0].coords[1] + self.Vertex[1].coords[1])/2.0)
@property
def max_regions(self):
if self.boundary:
return 1
else:
return 2
class Region(object):
unit_vecs = [array([0,1]), array([1,0]), array([0,-1]), array([-1,0])]
def __init__(self, id, list_of_Edges):
self.id = id
self.Edges={}
#self.Vertices={}
# The meshregion mdpt is the average position of all its vertices
self.mdpt = (mean([edge.mdpt[0] for edge in list_of_Edges]), \
mean([edge.mdpt[1] for edge in list_of_Edges]))
# Find vectors from meshregion mdpt to each of the edge mdpts
self.edge_mdpts = [edge.mdpt for edge in list_of_Edges]
self.edge_mdpt_vecs = [(array(edge.mdpt) - array(self.mdpt))/linalg.norm((array(edge.mdpt) - array(self.mdpt))) for edge in list_of_Edges]
# For each cardinal direction (N,E,S,W)
for i in range(len(self.unit_vecs)):
# Find which (meshregion mdpt)-to-(edge-mdpt) vector has the smallest error from
# the cardinal direction in question by taking the norm of the difference of the
# two vectors
self.cardinal_norms = array([linalg.norm(self.unit_vecs[i] - edge_mdpt_vec) for edge_mdpt_vec in self.edge_mdpt_vecs])
if i == 0: # North
self.Edges['n'] = list_of_Edges[self.cardinal_norms.argmin()]
# If Vertex[0]'s x-coord is greater than Vertex[1]'s x-coord
if self.Edges['n'].Vertex[0].coords[0] > self.Edges['n'].Vertex[1].coords[0]:
self.Edges['n'].associateRegion(self.id, 'n', 1)
else:
self.Edges['n'].associateRegion(self.id, 'n', -1)
elif i == 1: # East
self.Edges['e'] = list_of_Edges[self.cardinal_norms.argmin()]
# If Vertex[1]'s y-coord is greater than Vertex[0]'s y-coord
if self.Edges['e'].Vertex[1].coords[1] > self.Edges['e'].Vertex[0].coords[1]:
self.Edges['e'].associateRegion(self.id, 'e', 1)
else:
self.Edges['e'].associateRegion(self.id, 'e', -1)
elif i == 2: # South
self.Edges['s'] = list_of_Edges[self.cardinal_norms.argmin()]
# If Vertex[1]'s x-coord is greater than Vertex[0]'s x-coord
if self.Edges['s'].Vertex[1].coords[0] > self.Edges['s'].Vertex[0].coords[0]:
self.Edges['s'].associateRegion(self.id, 's', 1)
else:
self.Edges['s'].associateRegion(self.id, 's', -1)
elif i == 3: # West#
self.Edges['w'] = list_of_Edges[self.cardinal_norms.argmin()]
# If Vertex[0]'s y-coord is greater than Vertex[1]'s y-coord
if self.Edges['w'].Vertex[0].coords[1] > self.Edges['w'].Vertex[1].coords[1]:
self.Edges['w'].associateRegion(self.id, 'w', 1)
else:
self.Edges['w'].associateRegion(self.id, 'w', -1)
# Find matching vertices in adjacent edges to determine vertex directions (ne, nw, se, sw)
#self.Vertices['ne'] = set(self.Edges['n'].Vertex).intersection(self.Edges['e'].Vertex).pop()
#self.Vertices['nw'] = set(self.Edges['n'].Vertex).intersection(self.Edges['w'].Vertex).pop()
#self.Vertices['se'] = set(self.Edges['s'].Vertex).intersection(self.Edges['e'].Vertex).pop()
#self.Vertices['sw'] = set(self.Edges['s'].Vertex).intersection(self.Edges['w'].Vertex).pop()
def Vertices(self, ord_dir):
try:
return set(self.Edges[ord_dir[0]].Vertex).intersection(self.Edges[ord_dir[1]].Vertex).pop()
except Exception:
print "Not a valid direction"
return 0
def Neighbors(self, card_dir):
# Find neighbors
try:
return [region_info for region_info in self.Edges[card_dir].region_assoc if region_info[0] != self.id][0]
except IndexError:
#print "Region " + str(self.id) + " has no neighbor in the " + card_dir + " direction."
return 0
except KeyError:
#print card_dir + " is not a valid direction"
return None
def angle_between(v1, v2):
""" Returns the clockwise angle in radians (0 to 2pi) between vectors 'v1' and 'v2':: """
consang = vdot(v1, v2)
sinang = cross(v1, v2)
angle = atan2(sinang, consang)
if angle < 0:
angle += 2*pi
return angle
def find_most_CCW(starting_vertex, leading_in_Edge, Edge_list):
angle_list = array([angle_between(edge.unit_vec(starting_vertex), leading_in_Edge.unit_vec(starting_vertex)) for edge in Edge_list])
return Edge_list[angle_list.argmin()]
def find_most_CW(starting_vertex, leading_in_Edge, Edge_list):
angle_list = array([angle_between(edge.unit_vec(starting_vertex), leading_in_Edge.unit_vec(starting_vertex)) for edge in Edge_list])
return Edge_list[angle_list.argmax()]
def find_most_SW(Vertex_list):
vertex_array = array([list(vertex.coords) for vertex in Vertex_list])
# The SW bounding box corner is the point with minimum x and minimum y value of any vertex
SW_bounding_box_corner = [min(array(vertex_array)[:,0]), min(array(vertex_array)[:,1])]
a = SW_bounding_box_corner[0]*ones(len(vertex_array))
b = SW_bounding_box_corner[1]*ones(len(vertex_array))
# Compute the vector from SW BB corner from each vertex
vector_from_SW_BB = vertex_array - array([a, b]).T
# The vertex whose distance is closest to the SW BB corner is considered the Southwestern-most point
# and is returned
norms = array([linalg.norm(vertex - SW_bounding_box_corner) for vertex in vertex_array])
return Vertex_list[norms.argmin()]
class C2DMesh(object):
def __init__(self, vertex_list, edge_list, plotting=False, plotting_pause=0.5):
# Preallocate space for a list of Vertex objects
self.Vertex_list = [None]*len(vertex_list)
self.Edge_list = [None]*len(edge_list)
self.plotting_pause = plotting_pause
self.plotting = plotting
# We want some code here to check that the edges make sense, like they only use vertices specified
#
#
for i in range(len(vertex_list)):
self.Vertex_list[i] = Vertex(i, array(vertex_list[i]))
for i in range(len(edge_list)):
self.Edge_list[i] = Edge(i, self.Vertex_list[edge_list[i][0]], self.Vertex_list[edge_list[i][1]])
# Fill in each vertex's .max_connections
for i in range(len(vertex_list)):
self.Vertex_list[i].max_connections = len([edge for edge in self.Edge_list if edge.is_a_vertex(self.Vertex_list[i]) >=0 ])
print 'Vertex ' + str(i) + ' has ' + str(self.Vertex_list[i].max_connections) + ' connections.'
current_mesh_id = 1
edges_in_current_region = []
self.Region_list = []
active_edge = None
active_vertex = None
outer_loop_i = 0
inner_loop_i = 0
restart_search = 0
# Plotting
if self.plotting:
plt.close('all')
fig, ax = plt.subplots()
plt.ion()
plt.show()
self.mark_boundary_edges()
#pdb.set_trace()
# Outer loop to find mesh regions. The outer loop determines which vertex and edge to first look at
# when trying to find a new mesh region. For the first mesh region, it just finds __. For subsequent
# mesh regions, it builds off edges that are already associated with a previously found mesh region.
while True:
print "Starting search for mesh region {}".format(current_mesh_id)
if len(self.Region_list) == 0:
# Let's try to start at the Southwestern-most vertex.
active_vertex = find_most_SW(self.Vertex_list)
print "First vertex: ", active_vertex
next_edge_candidates = [edge for edge in self.Edge_list if edge.is_a_vertex(active_vertex) >=0 and edge != active_edge]
temp_edge = Edge(-1, active_vertex, Vertex(-1, active_vertex.coords + array([.01,0])))
active_edge = find_most_CCW(active_vertex, temp_edge, next_edge_candidates)
#edges_in_current_region.append(active_edge)
else:
# Query vertices that have not maxed out their max_connections
available_vertices = [vertex for vertex in self.Vertex_list if len(vertex.connections) < vertex.max_connections]
if len(available_vertices) == 0:
print "All possible vertices have been used!"
break
# Edge search 1: find edges that are only associated with one region, so far.
edge_search1 = [edge for edge in self.Edge_list if len(edge.region_assoc) == 1]
edge_search2 = [edge for edge in edge_search1 if len(edge.region_assoc) < edge.max_regions]
active_edge = edge_search2[restart_search]
# If the first region this edge is associated with is running in one direction, then the adjacent region who shares that edge
# should run in the opposite direction. See: Winged-edge
if active_edge.region_assoc[0][2] > 0:
active_vertex = active_edge.Vertex[1]
else:
active_vertex = active_edge.Vertex[0]
print "First vertex: ", active_vertex
inner_loop_i = 0
while True:
# The new active vertex is the other end of the current edge
active_vertex = active_edge.other_vertex(active_vertex)
print active_vertex
# The next edge candidates are ones that are associated with the new active vertex, with the
# exception of the current active edge.
next_edge_candidates = [edge for edge in self.Edge_list if edge.is_a_vertex(active_vertex)>= 0\
and edge != active_edge]
#print next_edge_candidates
# The next active edge is the one with the smallest clockwise angle, or most counter-clockwise
active_edge = find_most_CCW(active_vertex, active_edge, next_edge_candidates)
# Append the new active edge to the list of edges in the current mesh region
edges_in_current_region.append(active_edge)
#print edges_in_current_region
inner_loop_i +=1
#print inner_loop_i
# If we've found four edges, we found a region!
if len(edges_in_current_region) >= 4:
# Need some code here to make sure we have indeed found a closed quadrilateral
if len(set( [edge.Vertex[i] for i in range(2) for edge in edges_in_current_region])) != 4:
print "THIS IS NOT A QUADRILATERAL"
time.sleep(1)
restart_search += 1
# Clear edges_in_current region
edges_in_current_region = []
#raise RuntimeError('This is not a quadrilateral')
# Create a meshregion object and add it to Region_list
m = Region(current_mesh_id, edges_in_current_region)
self.Region_list.append(m)
# Associate each vertex with edges that are connected to it
# There is a better way to do this than to iterate through each vertex
for i in range(len(self.Vertex_list)):
for k in range(len(edges_in_current_region)):
if edges_in_current_region[k].is_a_vertex(self.Vertex_list[i])>=0:
self.Vertex_list[i].connections.add(edges_in_current_region[k].other_vertex(self.Vertex_list[i]).id)
self.Vertex_list[i].region_assoc.add(m.id)
# Clear edges_in_current region
edges_in_current_region = []
# Plotting
if self.plotting:
plot_vertices = []
plot_vertices.append([m.Vertices('ne').coords,
m.Vertices('nw').coords,
m.Vertices('sw').coords,
m.Vertices('se').coords])
plt.annotate(str(m.id), xy=(m.mdpt[0], m.mdpt[1]))
plot_vertices = array(plot_vertices)
#z = array(range(len(self.Region_list)))
z = array(current_mesh_id)
#Make the collection and add it to the plot.
coll = PolyCollection(plot_vertices, array=z, edgecolors='000000')
#coll = PolyCollection(plot_vertices, array=z, cmap=mpl.cm.Pastel1, edgecolors='000000')
ax.add_collection(coll)
ax.autoscale_view()
ax.set_aspect('equal', 'datalim')
plt.draw()
plt.pause(self.plotting_pause)
plt.show()
print 'Mesh region {} found!'.format(current_mesh_id)
current_mesh_id += 1 # increment the current region id number
restart_search = 0
break
print "\n\n\n Done finding regions! \n"
#print self.Region_list[0].Edges.values()
#print self.Region_list[1].Edges.values()
#print self.Region_list[2].Edges.values()
def mark_boundary_edges(self):
active_edge = None
active_vertex = None
# Let's try to start at the Southwestern-most vertex.
boundary_finder_i = 0
while True:
if boundary_finder_i == 0:
active_vertex = find_most_SW(self.Vertex_list)
active_vertex.boundary = True
#print "active vertex: ", active_vertex
next_edge_candidates = [edge for edge in self.Edge_list if edge.is_a_vertex(active_vertex) >=0 and edge != active_edge]
temp_edge = Edge(-1, active_vertex, Vertex(-1, active_vertex.coords + array([.01,0])))
active_edge = find_most_CCW(active_vertex, temp_edge, next_edge_candidates)
active_edge.boundary = True
#print "active edge: ", active_edge
active_vertex = active_edge.other_vertex(active_vertex)
active_vertex.bounadry = True
#print "active vertex: ", active_vertex
boundary_finder_i += 1
#pdb.set_trace()
continue
next_edge_candidates = [edge for edge in self.Edge_list if edge.is_a_vertex(active_vertex) >=0 and edge != active_edge]
active_edge = find_most_CW(active_vertex, active_edge, next_edge_candidates)
active_edge.boundary = True
#print "active edge: ", active_edge
active_vertex = active_edge.other_vertex(active_vertex)
active_vertex.bounadry = True
#print "active vertex: ", active_vertex
boundary_finder_i += 1
#pdb.set_trace()
if active_vertex ==find_most_SW(self.Vertex_list):
return
if __name__ == '__main__':
test_case = 5
save_mesh = True
save_mesh_filename = 'testmeshyue.pickle'
if test_case == 1:
vertex_list = [(0,0), (1,0), (2,0), (0,1), (1,1), (2,1), (1,2), (2,2)]
edge_list = [(0,1), (1,2), (0,3), (1,4), (2,5), (3,4), (4,5), (4,6), (5,7), (6,7)]
elif test_case == 2:
vertex_list = [(18, 6), (0,0), (7,0), (13,0), (18,1), (3,5), (5,5), (11,8)]
edge_list = [(1,2), (2,3), (3,4), (1,5), (2,6), (3,7), (0,4), (5,6), (6,7), (0,7)]
elif test_case == 3:
vertex_list = [(0,0), (4,0), (9,0), (13,0), (5,3), (8, 3), (3, 5), (10, 5)]
edge_list = [(0,6), (0,1), (1,2), (2,3), (1,4), (2,5), (4,5), (6,7), (3,7), (4, 6), (5, 7)]
elif test_case == 4:
vertex_list = [(0,0), (4,0), (9,0), (13,0), (5,3), (8, 3), (3, 5), (10, 5), (0,8), (3,8), \
(10,8), (13,8), (-3,8), (-3,0), (16,8), (16,0), (-3,-2), (0,-2), (4,-2), (9,-2), \
(13,-2), (16,-2)]
edge_list = [(0,6), (0,1), (1,2), (2,3), (1,4), (2,5), (4,5), (6,7), (3,7), (4, 6),\
(5, 7), (0,8), (6,9), (7,10), (3,11), (8,9), (9,10), (10,11), (8,12), (11,14),\
(12,13), (14, 15), (13,16), (0,17), (18,1), (19,2), (20, 3), (15,21), (16,17), (17,18),\
(18,19), (19,20), (20,21), (0,13), (3,15)]
elif test_case == 5:
dxf = DXFtoSegments.DXFGeometry('C:\cygwin64\home\Kerry\DXFtoMesh\Ampoule2.dxf')
vertex_list,edge_list,bulge_list = dxf.cats2d_convert(len_scale=6)
mesh = C2DMesh(vertex_list, edge_list, createC2Dmesh = False, plotting = False, plotting_pause = 0.5)
if save_mesh:
with open(save_mesh_filename, 'w') as ff:
pickle.dump(mesh, ff)
| |
#!/usr/bin/env python
# coding: utf-8
# Author: Vova Zaytsev <zaytsev@usc.edu>
import os
import sys
import csv
import json
import logging
import argparse
import husky.db
import husky.eval
from husky.entity import Entity
from husky.fetchers import PageFetcher
from husky.extraction import EntityExtractor
from husky.extraction import EntityNormalizer
DOCUMENTS_DB_PATH = "documents.ldb"
def clean_directory(path):
"""
Create path if not exist otherwise recreates it.
"""
if os.path.exists(path):
os.system("rm -rf %s" % path)
os.mkdir(path)
def u(i_str):
"""
Encode string values found in `data` into utf-8 unicode.
"""
if i_str is None:
return ""
if isinstance(i_str, unicode):
return i_str.encode("utf-8")
try:
unicode_data = i_str.decode("utf-8")
return i_str
except Exception:
unicode_data = i_str.decode("latin-1")
return unicode_data.encode("utf-8")
def step_1_init_work_dir(args):
"""
Create work directory and download links.
"""
clean_directory(args.work_dir)
documents_db_path = os.path.join(args.work_dir, DOCUMENTS_DB_PATH)
documents_db = husky.db.create(documents_db_path)
fetcher = PageFetcher()
with open(args.gold, "r") as i_gold:
gold_entries = csv.reader(i_gold, delimiter=",", quotechar="\"")
gold_entries.next()
url2html_dict = {entry[0]: None for entry in gold_entries}
logging.info("Fetching %d documents" % len(url2html_dict))
fetcher.fetch_urls(url2html_dict, max_threads=args.max_threads)
for url, html in url2html_dict.iteritems():
documents_db.put(url, html)
def step_2_eval_titles(args):
"""Evaluate titles extraction."""
o_eval_fp = os.path.join(args.work_dir, "eval_title.csv")
documents_db_path = os.path.join(args.work_dir, DOCUMENTS_DB_PATH)
documents_db = husky.db.open(documents_db_path)
eval_data = []
extractor = EntityExtractor()
with open(args.cse, "rb") as cse_fl:
cse = json.load(cse_fl)
with open(args.gold, "rb") as i_gold:
gold_entries = csv.reader(i_gold, delimiter=",", quotechar="\"")
gold_entries.next()
gold_entries = list(gold_entries)
for i, entry in enumerate(gold_entries):
url = entry[0]
cse_entry = cse.get(url)
logging.info("Processing url: %r" % url)
if cse_entry is None:
logging.warn("URL #%d not found in CSE annotations: %s\nSkip." % (i, url))
continue
html = documents_db.get(url)
if html is None or len(html) == 0:
logging.error("URL #%d not found in HTML db: %s." % (i, url))
continue
try:
article = extractor.parse_article(url, html)
except Exception:
logging.error("URL #%d error while parsing: %s." % (i, url))
continue
# Gold
gold_title = entry[1]
# CSE
cse_title = min(cse_entry["title"], key=len) if len(cse_entry["title"]) > 0 else None
# NLCD
nlcd_title = cse_title
if nlcd_title is None:
nlcd_title = extractor.extract_titles(article, None, select_best=True)
# Newspaper
np_title = article.title
eval_data.append((gold_title, nlcd_title, np_title, cse_title))
gold_out, methods_out = husky.eval.compute_title_prf(eval_data)
with open(o_eval_fp, "wb") as o_eval:
eval_csv = csv.writer(o_eval, delimiter=",", quotechar="\"")
eval_csv.writerow([
"#",
"URL",
"GOLD",
"NLCD PRF=%.2f;%.2f;%.2f" % methods_out[0][0],
"NLCD ERROR",
"NP PRF=%.2f;%.2f;%.2f" % methods_out[1][0],
"NP ERROR",
"CSE PRF=%.2f;%.2f;%.2f" % methods_out[2][0],
"CSE ERROR",
])
for i in xrange(len(gold_out)):
eval_csv.writerow([
str(i),
gold_entries[i][0],
u(gold_out[i]),
u(methods_out[0][1][i][0]),
u(methods_out[0][1][i][1]),
u(methods_out[1][1][i][0]),
u(methods_out[1][1][i][1]),
u(methods_out[2][1][i][0]),
u(methods_out[2][1][i][1]),
])
def step_3_eval_authors(args):
"""Evaluate authors extraction."""
o_eval_fp = os.path.join(args.work_dir, "eval_authors.csv")
documents_db_path = os.path.join(args.work_dir, DOCUMENTS_DB_PATH)
documents_db = husky.db.open(documents_db_path)
eval_data = []
extractor = EntityExtractor()
normalizer = EntityNormalizer()
with open(args.cse, "rb") as cse_fl:
cse = json.load(cse_fl)
with open(args.gold, "rb") as i_gold:
gold_entries = csv.reader(i_gold, delimiter=",", quotechar="\"")
gold_entries.next()
gold_entries = list(gold_entries)
for i, entry in enumerate(gold_entries):
url = entry[0]
cse_entry = cse.get(url)
logging.info("Processing url: %r" % url)
if cse_entry is None:
logging.warn("URL #%d not found in CSE annotations: %s\nSkip." % (i, url))
continue
html = documents_db.get(url)
if html is None or len(html) == 0:
logging.error("URL #%d not found in HTML db: %s." % (i, url))
continue
try:
article = extractor.parse_article(url, html)
except Exception:
logging.error("URL #%d error while parsing: %s." % (i, url))
continue
# Gold
gold_authors = entry[2]
# CSE
cse_authors = cse_entry["authors"]
# NLCD
try:
entities = extractor.extract_authors(article, annotation=None)
authors = normalizer.normalize_authors(entities, article=article)
nlcd_authors = list(set((a.name for a in authors
if a.ent_type == Entity.TYPE.PER
and a.ent_rel == Entity.REL.AUTHOR)))
if len(nlcd_authors) == 0:
entities = [Entity(raw=c) for c in cse_authors]
authors = normalizer.normalize_authors(entities, article=article)
nlcd_authors = list(set((a.name for a in authors
if a.ent_type == Entity.TYPE.PER
and a.ent_rel == Entity.REL.AUTHOR)))
except Exception:
logging.warning("Error when extracting authors. %r" % url)
nlcd_authors = []
# Newspaper
np_authors = article.authors
eval_data.append((gold_authors, nlcd_authors, np_authors, cse_authors))
gold_out, methods_out = husky.eval.compute_authors_prf(eval_data)
with open(o_eval_fp, "wb") as o_eval:
eval_csv = csv.writer(o_eval, delimiter=",", quotechar="\"")
eval_csv.writerow([
"#",
"URL",
"GOLD",
"NLCD PRF=%.2f;%.2f;%.2f" % methods_out[0][0],
"NLCD ERROR",
"NP PRF=%.2f;%.2f;%.2f" % methods_out[1][0],
"NP ERROR",
"CSE PRF=%.2f;%.2f;%.2f" % methods_out[2][0],
"CSE ERROR",
])
for i in xrange(len(gold_out)):
eval_csv.writerow([
str(i),
gold_entries[i][0],
u(gold_out[i]),
u(methods_out[0][1][i][0]),
u(methods_out[0][1][i][1]),
u(methods_out[1][1][i][0]),
u(methods_out[1][1][i][1]),
u(methods_out[2][1][i][0]),
u(methods_out[2][1][i][1]),
])
def step_4_eval_source(args):
"""Evaluate sources extraction."""
o_eval_fp = os.path.join(args.work_dir, "eval_sources.csv")
documents_db_path = os.path.join(args.work_dir, DOCUMENTS_DB_PATH)
documents_db = husky.db.open(documents_db_path)
eval_data = []
extractor = EntityExtractor()
normalizer = EntityNormalizer()
with open(args.cse, "rb") as cse_fl:
cse = json.load(cse_fl)
with open(args.gold, "rb") as i_gold:
gold_entries = csv.reader(i_gold, delimiter=",", quotechar="\"")
gold_entries.next()
gold_entries = list(gold_entries)
for i, entry in enumerate(gold_entries):
url = entry[0]
cse_entry = cse.get(url)
logging.info("Processing url: %r" % url)
if cse_entry is None:
logging.warn("URL #%d not found in CSE annotations: %s\nSkip." % (i, url))
continue
html = documents_db.get(url)
if html is None or len(html) == 0:
logging.error("URL #%d not found in HTML db: %s." % (i, url))
continue
try:
article = extractor.parse_article(url, html)
except Exception:
logging.error("URL #%d error while parsing: %s." % (i, url))
continue
# Gold
gold_sources = entry[3]
# CSE
cse_sources = cse_entry["source"]
# NLCD
try:
entities = extractor.extract_authors(article, annotation=None)
entities = normalizer.normalize_authors(entities, article=article)
nlcd_sources = list(set((e.name for e in entities
if e.ent_rel == Entity.REL.SOURCE)))
except Exception:
logging.warning("Error when extracting authors. %r" % url)
nlcd_sources = []
nlcd_sources.extend(extractor.extract_sources(article, None, url))
nlcd_sources.extend(cse_sources)
# Newspaper
np_sources = ["<N/A>"]
eval_data.append((gold_sources, nlcd_sources, np_sources, cse_sources))
gold_out, methods_out = husky.eval.compute_sources_prf(eval_data)
with open(o_eval_fp, "wb") as o_eval:
eval_csv = csv.writer(o_eval, delimiter=",", quotechar="\"")
eval_csv.writerow([
"#",
"URL",
"GOLD",
"NLCD PRF=%.2f;%.2f;%.2f" % methods_out[0][0],
"NLCD ERROR",
"NP PRF=%.2f;%.2f;%.2f" % methods_out[1][0],
"NP ERROR",
"CSE PRF=%.2f;%.2f;%.2f" % methods_out[2][0],
"CSE ERROR",
])
for i in xrange(len(gold_out)):
eval_csv.writerow([
str(i),
gold_entries[i][0],
u(gold_out[i]),
u(methods_out[0][1][i][0]),
u(methods_out[0][1][i][1]),
u(methods_out[1][1][i][0]),
u(methods_out[1][1][i][1]),
u(methods_out[2][1][i][0]),
u(methods_out[2][1][i][1]),
])
def step_5_eval_dates(args):
"""Evaluate dates extraction."""
o_eval_fp = os.path.join(args.work_dir, "eval_dates.csv")
documents_db_path = os.path.join(args.work_dir, DOCUMENTS_DB_PATH)
documents_db = husky.db.open(documents_db_path)
eval_data = []
extractor = EntityExtractor()
normalizer = EntityNormalizer()
with open(args.cse, "rb") as cse_fl:
cse = json.load(cse_fl)
with open(args.gold, "rb") as i_gold:
gold_entries = csv.reader(i_gold, delimiter=",", quotechar="\"")
gold_entries.next()
gold_entries = list(gold_entries)
for i, entry in enumerate(gold_entries):
url = entry[0]
cse_entry = cse.get(url)
if cse_entry is None:
logging.warn("URL #%d not found in CSE annotations: %s\nSkip." % (i, url))
continue
# Gold
gold_dates = entry[4]
# CSE
cse_dates = normalizer.normalize_dates(cse_entry["dates"])
# NLCD
nlcd_dates = cse_dates
# Newspaper
np_dates = ["<N/A>"]
eval_data.append((gold_dates, nlcd_dates, np_dates, cse_dates))
gold_out, methods_out = husky.eval.compute_dates_prf(eval_data)
with open(o_eval_fp, "wb") as o_eval:
eval_csv = csv.writer(o_eval, delimiter=",", quotechar="\"")
eval_csv.writerow([
"#",
"URL",
"GOLD",
"NLCD PRF=%.2f;%.2f;%.2f" % methods_out[0][0],
"NLCD ERROR",
"NP PRF=%.2f;%.2f;%.2f" % methods_out[1][0],
"NP ERROR",
"CSE PRF=%.2f;%.2f;%.2f" % methods_out[2][0],
"CSE ERROR",
])
for i in xrange(len(gold_out)):
eval_csv.writerow([
str(i),
gold_entries[i][0],
u(gold_out[i]),
u(methods_out[0][1][i][0]),
u(methods_out[0][1][i][1]),
u(methods_out[1][1][i][0]),
u(methods_out[1][1][i][1]),
u(methods_out[2][1][i][0]),
u(methods_out[2][1][i][1]),
])
STEPS = (
(step_1_init_work_dir, "Prepare data for evaluating."),
(step_2_eval_titles, "Evaluate titles extraction."),
(step_3_eval_authors, "Evaluate authors extraction."),
(step_4_eval_source, "Evaluate sources extraction."),
(step_5_eval_dates, "Evaluate dates extraction."),
)
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument("-v",
"--verbosity-level",
type=int,
default=1,
choices=(0, 1, 2))
argparser.add_argument("--app-root",
type=str,
help="Directory containing processing package (e.g. `fenrir`).",
default=None)
argparser.add_argument("--work-dir",
type=str,
help="Directory for storing temporary data for processing.",
default=None)
argparser.add_argument("--nlcd-conf-file",
type=str,
help="NLCD JSON configuration file containing API credentials and other information.",
default=None)
argparser.add_argument("--pipeline-root",
type=str,
help="Directory containing pipeline python scripts.",
default=None)
argparser.add_argument("--first-step",
type=int,
help="First step of processing (all previous steps will be skipped).",
default=1)
argparser.add_argument("--last-step",
type=int,
help="Last step of processing (all following steps will be ignored).",
default=10)
argparser.add_argument("--n-cpus",
type=int,
help="Maximum number of CPUs used for computation tasks.",
default=1)
argparser.add_argument("--max-threads",
type=int,
help="Maximum number of threads used for streaming tasks (for example, downloading).",
default=10)
argparser.add_argument("--use-compression",
type=int,
help="Pipeline will use lz4 to compress high volume temporary data (e.g. html of pages).",
default=0)
argparser.add_argument("--gold",
type=str,
help="Path to the gold standard file for dates normalization.",
default=None)
argparser.add_argument("--cse",
type=str,
help="Path to JSON file with Google CSE annotations.",
default=None)
argparser.add_argument("--eval-path",
type=str,
help="Path to the evaluation results for dates normalization.",
default=None)
argparser.add_argument("--list-steps",
type=str,
help="Lists available steps.",
default=None)
args = argparser.parse_args()
if args.verbosity_level == 0:
logging.basicConfig(level=logging.NOTSET)
if args.verbosity_level == 1:
logging.basicConfig(level=logging.INFO, format="%(message)s")
if args.verbosity_level == 2:
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
# Turn-off third party loggers
logging.getLogger("requests").setLevel(logging.WARNING)
if args.list_steps is not None:
sys.stdout.write("\nAvailable pipeline steps:\n")
for i, (_, step_description) in enumerate(STEPS):
sys.stdout.write("\t (%d) %s\n" % (i + 1, step_description))
sys.stdout.write("\n\n")
logging.info("\nRunning demo pipeline with the following options %r" % args)
sys.path.append(args.app_root)
sys.stderr.write("\nThe following steps (*) will be executed:\n\n")
for i, (_, step_description) in enumerate(STEPS):
step_i = i + 1
if args.first_step <= step_i <= args.last_step:
active_step = "*"
else:
active_step = " "
sys.stderr.write("\t %s (%d) %s\n" % (active_step, step_i, step_description))
sys.stderr.write("\n\n")
for i, (step_function, step_description) in enumerate(STEPS):
step_i = i + 1
if args.first_step <= step_i <= args.last_step:
logging.info("Starting step #%d: '%s'" % (step_i, step_description))
step_function(args)
logging.info("\n")
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from future.utils import native
import flask_login
from flask_login import login_required, current_user, logout_user
from flask import flash
from wtforms import (
Form, PasswordField, StringField)
from wtforms.validators import InputRequired
from ldap3 import Server, Connection, Tls, LEVEL, SUBTREE, BASE
import ssl
from flask import url_for, redirect
from airflow import models
from airflow import configuration
from airflow.configuration import AirflowConfigException
from airflow.utils.db import provide_session
import traceback
import re
from airflow.utils.log.logging_mixin import LoggingMixin
login_manager = flask_login.LoginManager()
login_manager.login_view = 'airflow.login' # Calls login() below
login_manager.login_message = None
log = LoggingMixin().log
class AuthenticationError(Exception):
pass
class LdapException(Exception):
pass
def get_ldap_connection(dn=None, password=None):
tls_configuration = None
use_ssl = False
try:
cacert = configuration.conf.get("ldap", "cacert")
tls_configuration = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=cacert)
use_ssl = True
except:
pass
server = Server(configuration.conf.get("ldap", "uri"), use_ssl, tls_configuration)
conn = Connection(server, native(dn), native(password))
if not conn.bind():
log.error("Cannot bind to ldap server: %s ", conn.last_error)
raise AuthenticationError("Cannot bind to ldap server")
return conn
def group_contains_user(conn, search_base, group_filter, user_name_attr, username):
search_filter = '(&({0}))'.format(group_filter)
if not conn.search(native(search_base), native(search_filter),
attributes=[native(user_name_attr)]):
log.warning("Unable to find group for %s %s", search_base, search_filter)
else:
for entry in conn.entries:
if username.lower() in map(lambda attr: attr.lower(),
getattr(entry, user_name_attr).values):
return True
return False
def groups_user(conn, search_base, user_filter, user_name_att, username):
search_filter = "(&({0})({1}={2}))".format(user_filter, user_name_att, username)
try:
memberof_attr = configuration.conf.get("ldap", "group_member_attr")
except:
memberof_attr = "memberOf"
res = conn.search(native(search_base), native(search_filter),
attributes=[native(memberof_attr)])
if not res:
log.info("Cannot find user %s", username)
raise AuthenticationError("Invalid username or password")
if conn.response and memberof_attr not in conn.response[0]["attributes"]:
log.warning("""Missing attribute "%s" when looked-up in Ldap database.
The user does not seem to be a member of a group and therefore won't see any dag
if the option filter_by_owner=True and owner_mode=ldapgroup are set""",
memberof_attr)
return []
user_groups = conn.response[0]["attributes"][memberof_attr]
regex = re.compile("cn=([^,]*).*", re.IGNORECASE)
groups_list = []
try:
groups_list = [regex.search(i).group(1) for i in user_groups]
except IndexError:
log.warning("Parsing error when retrieving the user's group(s)."
" Check if the user belongs to at least one group"
" or if the user's groups name do not contain special characters")
return groups_list
class LdapUser(models.User):
def __init__(self, user):
self.user = user
self.ldap_groups = []
# Load and cache superuser and data_profiler settings.
conn = get_ldap_connection(configuration.conf.get("ldap", "bind_user"),
configuration.conf.get("ldap", "bind_password"))
superuser_filter = None
data_profiler_filter = None
try:
superuser_filter = configuration.conf.get("ldap", "superuser_filter")
except AirflowConfigException:
pass
if not superuser_filter:
self.superuser = True
log.debug("Missing configuration for superuser settings or empty. Skipping.")
else:
self.superuser = group_contains_user(conn,
configuration.conf.get("ldap", "basedn"),
superuser_filter,
configuration.conf.get("ldap",
"user_name_attr"),
user.username)
try:
data_profiler_filter = configuration.conf.get("ldap", "data_profiler_filter")
except AirflowConfigException:
pass
if not data_profiler_filter:
self.data_profiler = True
log.debug("Missing configuration for data profiler settings or empty. "
"Skipping.")
else:
self.data_profiler = group_contains_user(
conn,
configuration.conf.get("ldap", "basedn"),
data_profiler_filter,
configuration.conf.get("ldap",
"user_name_attr"),
user.username
)
# Load the ldap group(s) a user belongs to
try:
self.ldap_groups = groups_user(
conn,
configuration.conf.get("ldap", "basedn"),
configuration.conf.get("ldap", "user_filter"),
configuration.conf.get("ldap", "user_name_attr"),
user.username
)
except AirflowConfigException:
log.debug("Missing configuration for ldap settings. Skipping")
@staticmethod
def try_login(username, password):
conn = get_ldap_connection(configuration.conf.get("ldap", "bind_user"),
configuration.conf.get("ldap", "bind_password"))
search_filter = "(&({0})({1}={2}))".format(
configuration.conf.get("ldap", "user_filter"),
configuration.conf.get("ldap", "user_name_attr"),
username
)
search_scope = LEVEL
if configuration.conf.has_option("ldap", "search_scope"):
if configuration.conf.get("ldap", "search_scope") == "SUBTREE":
search_scope = SUBTREE
else:
search_scope = LEVEL
# todo: BASE or ONELEVEL?
res = conn.search(native(configuration.conf.get("ldap", "basedn")),
native(search_filter),
search_scope=native(search_scope))
# todo: use list or result?
if not res:
log.info("Cannot find user %s", username)
raise AuthenticationError("Invalid username or password")
entry = conn.response[0]
conn.unbind()
if 'dn' not in entry:
# The search filter for the user did not return any values, so an
# invalid user was used for credentials.
raise AuthenticationError("Invalid username or password")
try:
conn = get_ldap_connection(entry['dn'], password)
except KeyError:
log.error("""
Unable to parse LDAP structure. If you're using Active Directory
and not specifying an OU, you must set search_scope=SUBTREE in airflow.cfg.
%s
""" % traceback.format_exc())
raise LdapException(
"Could not parse LDAP structure. "
"Try setting search_scope in airflow.cfg, or check logs"
)
if not conn:
log.info("Password incorrect for user %s", username)
raise AuthenticationError("Invalid username or password")
@property
def is_active(self):
"""Required by flask_login"""
return True
@property
def is_authenticated(self):
"""Required by flask_login"""
return True
@property
def is_anonymous(self):
"""Required by flask_login"""
return False
def get_id(self):
"""Returns the current user id as required by flask_login"""
return self.user.get_id()
def data_profiling(self):
"""Provides access to data profiling tools"""
return self.data_profiler
def is_superuser(self):
"""Access all the things"""
return self.superuser
@login_manager.user_loader
@provide_session
def load_user(userid, session=None):
log.debug("Loading user %s", userid)
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(models.User.id == int(userid)).first()
return LdapUser(user)
@provide_session
def login(self, request, session=None):
if current_user.is_authenticated:
flash("You are already logged in")
return redirect(url_for('admin.index'))
username = None
password = None
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
username = request.form.get("username")
password = request.form.get("password")
if not username or not password:
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
try:
LdapUser.try_login(username, password)
log.info("User %s successfully authenticated", username)
user = session.query(models.User).filter(
models.User.username == username).first()
if not user:
user = models.User(
username=username,
is_superuser=False)
session.add(user)
session.commit()
session.merge(user)
flask_login.login_user(LdapUser(user))
session.commit()
return redirect(request.args.get("next") or url_for("admin.index"))
except (LdapException, AuthenticationError) as e:
if type(e) == LdapException:
flash(e, "error")
else:
flash("Incorrect login details")
return self.render('airflow/login.html',
title="Airflow - Login",
form=form)
class LoginForm(Form):
username = StringField('Username', [InputRequired()])
password = PasswordField('Password', [InputRequired()])
| |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from nova.compute import api as compute_api
from nova.db import base
from nova import exception
from nova.network.api import refresh_cache
from nova.network import model as network_model
from nova.network import quantumv2
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
help='URL for connecting to quantum'),
cfg.IntOpt('quantum_url_timeout',
default=30,
help='timeout value for connecting to quantum in seconds'),
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
help='password for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
]
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('node_availability_zone', 'nova.config')
CONF.import_opt('default_floating_pool', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
security_group_api = compute_api.SecurityGroupAPI()
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
quantum = quantumv2.get_client(context)
# If user has specified to attach instance only to specific
# networks, add them to **search_opts
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {"tenant_id": project_id, 'shared': False}
if net_ids:
search_opts['id'] = net_ids
nets = quantum.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
if net_ids:
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate all network resources for the instance."""
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = quantum.show_port(port_id).get('port')
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip:
fixed_ips[network_id] = fixed_ip
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
touched_port_ids = []
created_port_ids = []
for network in nets:
network_id = network['id']
zone = 'compute:%s' % CONF.node_availability_zone
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
if port:
quantum.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
else:
if fixed_ips.get(network_id):
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
for port_id in created_port_ids:
try:
quantum.delete_port(port_id)
except Exception as ex:
msg = _("Fail to delete port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': port_id,
'exception': ex})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
return self.get_instance_nw_info(context, instance, networks=nets)
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
for port in ports:
try:
quantumv2.get_client(context).delete_port(port['id'])
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(portid)s ")
% {'portid': port['id']})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
@refresh_cache
def get_instance_nw_info(self, context, instance, networks=None):
return self._get_instance_nw_info(context, instance, networks)
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
raise NotImplementedError()
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
raise NotImplementedError()
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
LOG.debug(_('validate_networks() for %s'),
requested_networks)
if not requested_networks:
return
net_ids = []
for (net_id, _i, port_id) in requested_networks:
if not port_id:
net_ids.append(net_id)
continue
port = quantumv2.get_client(context).show_port(port_id).get('port')
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
net_id = port['network_id']
if net_id in net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
net_ids.append(net_id)
nets = self._get_available_networks(context, context.project_id,
net_ids)
if len(nets) != len(net_ids):
requsted_netid_set = set(net_ids)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requsted_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def trigger_instance_add_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.security_group_api.trigger_handler(
'instance_add_security_group', context, instance_ref,
group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.security_group_api.trigger_handler(
'instance_remove_security_group', context, instance_ref,
group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
self.security_group_api.trigger_members_refresh(admin_context,
group_ids)
self.security_group_api.trigger_handler('security_group_members',
admin_context, group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % CONF.node_availability_zone
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
client = quantumv2.get_client(context)
return client.list_networks()
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
return client.show_network(network_uuid)
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = quantumv2.get_client(context)
fip = client.show_floatingip(id)['floatingip']
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = quantumv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Quantum v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Quantum v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = quantumv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return []
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to"""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = quantumv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
fip = client.create_floatingip(param)
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address"""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance"""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
data = quantumv2.get_client(context,
admin=True).list_ports(**search_opts)
ports = data.get('ports', [])
if not networks:
networks = self._get_available_networks(context,
instance['project_id'])
else:
# ensure ports are in preferred network order
_ensure_requested_network_ordering(
lambda x: x['network_id'],
ports,
[n['id'] for n in networks])
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
break
network_IPs = [network_model.FixedIP(address=ip_address)
for ip_address in [ip['ip_address']
for ip in port['fixed_ips']]]
# TODO(gongysh) get floating_ips for each fixed_ip
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
network = network_model.Network(
id=port['network_id'],
bridge='', # Quantum ignores this field
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
network=network,
type=port.get('binding:vif_type')))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = quantumv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handling of VM disk images.
"""
import os
import shutil
import jinja2
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from ironic.common import exception
from ironic.common.glance_service import service_utils as glance_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common import image_service as service
from ironic.common import paths
from ironic.common import utils
from ironic.openstack.common import imageutils
LOG = logging.getLogger(__name__)
image_opts = [
cfg.BoolOpt('force_raw_images',
default=True,
help=_('If True, convert backing images to "raw" disk image '
'format.')),
cfg.StrOpt('isolinux_bin',
default='/usr/lib/syslinux/isolinux.bin',
help=_('Path to isolinux binary file.')),
cfg.StrOpt('isolinux_config_template',
default=paths.basedir_def('common/isolinux_config.template'),
help=_('Template file for isolinux configuration file.')),
cfg.StrOpt('grub_config_template',
default=paths.basedir_def('common/grub_conf.template'),
help=_('Template file for grub configuration file.')),
]
CONF = cfg.CONF
CONF.register_opts(image_opts)
def _create_root_fs(root_directory, files_info):
"""Creates a filesystem root in given directory.
Given a mapping of absolute path of files to their relative paths
within the filesystem, this method copies the files to their
destination.
:param root_directory: the filesystem root directory.
:param files_info: A dict containing absolute path of file to be copied
-> relative path within the vfat image. For example,
{
'/absolute/path/to/file' -> 'relative/path/within/root'
...
}
:raises: OSError, if creation of any directory failed.
:raises: IOError, if copying any of the files failed.
"""
for src_file, path in files_info.items():
target_file = os.path.join(root_directory, path)
dirname = os.path.dirname(target_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copyfile(src_file, target_file)
def _umount_without_raise(mount_dir):
"""Helper method to umount without raise."""
try:
utils.umount(mount_dir)
except processutils.ProcessExecutionError:
pass
def create_vfat_image(output_file, files_info=None, parameters=None,
parameters_file='parameters.txt', fs_size_kib=100):
"""Creates the fat fs image on the desired file.
This method copies the given files to a root directory (optional),
writes the parameters specified to the parameters file within the
root directory (optional), and then creates a vfat image of the root
directory.
:param output_file: The path to the file where the fat fs image needs
to be created.
:param files_info: A dict containing absolute path of file to be copied
-> relative path within the vfat image. For example,
{
'/absolute/path/to/file' -> 'relative/path/within/root'
...
}
:param parameters: A dict containing key-value pairs of parameters.
:param parameters_file: The filename for the parameters file.
:param fs_size_kib: size of the vfat filesystem in KiB.
:raises: ImageCreationFailed, if image creation failed while doing any
of filesystem manipulation activities like creating dirs, mounting,
creating filesystem, copying files, etc.
"""
try:
utils.dd('/dev/zero', output_file, 'count=1', "bs=%dKiB" % fs_size_kib)
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
with utils.tempdir() as tmpdir:
try:
# The label helps ramdisks to find the partition containing
# the parameters (by using /dev/disk/by-label/ir-vfd-dev).
# NOTE: FAT filesystem label can be up to 11 characters long.
utils.mkfs('vfat', output_file, label="ir-vfd-dev")
utils.mount(output_file, tmpdir, '-o', 'umask=0')
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
try:
if files_info:
_create_root_fs(tmpdir, files_info)
if parameters:
parameters_file = os.path.join(tmpdir, parameters_file)
params_list = ['%(key)s=%(val)s' % {'key': k, 'val': v}
for k, v in parameters.items()]
file_contents = '\n'.join(params_list)
utils.write_to_file(parameters_file, file_contents)
except Exception as e:
LOG.exception(_LE("vfat image creation failed. Error: %s"), e)
raise exception.ImageCreationFailed(image_type='vfat', error=e)
finally:
try:
utils.umount(tmpdir)
except processutils.ProcessExecutionError as e:
raise exception.ImageCreationFailed(image_type='vfat', error=e)
def _generate_cfg(kernel_params, template, options):
"""Generates a isolinux or grub configuration file.
Given a given a list of strings containing kernel parameters, this method
returns the kernel cmdline string.
:param kernel_params: a list of strings(each element being a string like
'K=V' or 'K' or combination of them like 'K1=V1 K2 K3=V3') to be added
as the kernel cmdline.
:param template: the path of the config template file.
:param options: a dictionary of keywords which need to be replaced in
template file to generate a proper config file.
:returns: a string containing the contents of the isolinux configuration
file.
"""
if not kernel_params:
kernel_params = []
kernel_params_str = ' '.join(kernel_params)
tmpl_path, tmpl_file = os.path.split(template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
options.update({'kernel_params': kernel_params_str})
cfg = template.render(options)
return cfg
def create_isolinux_image_for_bios(output_file, kernel, ramdisk,
kernel_params=None):
"""Creates an isolinux image on the specified file.
Copies the provided kernel, ramdisk to a directory, generates the isolinux
configuration file using the kernel parameters provided, and then generates
a bootable ISO image.
:param output_file: the path to the file where the iso image needs to be
created.
:param kernel: the kernel to use.
:param ramdisk: the ramdisk to use.
:param kernel_params: a list of strings(each element being a string like
'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
as the kernel cmdline.
:raises: ImageCreationFailed, if image creation failed while copying files
or while running command to generate iso.
"""
ISOLINUX_BIN = 'isolinux/isolinux.bin'
ISOLINUX_CFG = 'isolinux/isolinux.cfg'
options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
with utils.tempdir() as tmpdir:
files_info = {
kernel: 'vmlinuz',
ramdisk: 'initrd',
CONF.isolinux_bin: ISOLINUX_BIN,
}
try:
_create_root_fs(tmpdir, files_info)
except (OSError, IOError) as e:
LOG.exception(_LE("Creating the filesystem root failed."))
raise exception.ImageCreationFailed(image_type='iso', error=e)
cfg = _generate_cfg(kernel_params,
CONF.isolinux_config_template, options)
isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
utils.write_to_file(isolinux_cfg, cfg)
try:
utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
'-cache-inodes', '-J', '-l', '-no-emul-boot',
'-boot-load-size', '4', '-boot-info-table',
'-b', ISOLINUX_BIN, '-o', output_file, tmpdir)
except processutils.ProcessExecutionError as e:
LOG.exception(_LE("Creating ISO image failed."))
raise exception.ImageCreationFailed(image_type='iso', error=e)
def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
kernel_params=None):
"""Creates an isolinux image on the specified file.
Copies the provided kernel, ramdisk, efiboot.img to a directory, creates
the path for grub config file, generates the isolinux configuration file
using the kernel parameters provided, generates the grub configuration
file using kernel parameters and then generates a bootable ISO image
for uefi.
:param output_file: the path to the file where the iso image needs to be
created.
:param deploy_iso: deploy iso used to initiate the deploy.
:param kernel: the kernel to use.
:param ramdisk: the ramdisk to use.
:param kernel_params: a list of strings(each element being a string like
'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
as the kernel cmdline.
:raises: ImageCreationFailed, if image creation failed while copying files
or while running command to generate iso.
"""
ISOLINUX_BIN = 'isolinux/isolinux.bin'
ISOLINUX_CFG = 'isolinux/isolinux.cfg'
isolinux_options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'}
with utils.tempdir() as tmpdir:
files_info = {
kernel: 'vmlinuz',
ramdisk: 'initrd',
CONF.isolinux_bin: ISOLINUX_BIN,
}
# Open the deploy iso used to initiate deploy and copy the
# efiboot.img i.e. boot loader to the current temporary
# directory.
with utils.tempdir() as mountdir:
uefi_path_info, e_img_rel_path, grub_rel_path = (
_mount_deploy_iso(deploy_iso, mountdir))
# if either of these variables are not initialized then the
# uefi efiboot.img cannot be created.
files_info.update(uefi_path_info)
try:
_create_root_fs(tmpdir, files_info)
except (OSError, IOError) as e:
LOG.exception(_LE("Creating the filesystem root failed."))
raise exception.ImageCreationFailed(image_type='iso', error=e)
finally:
_umount_without_raise(mountdir)
cfg = _generate_cfg(kernel_params,
CONF.isolinux_config_template, isolinux_options)
isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
utils.write_to_file(isolinux_cfg, cfg)
# Generate and copy grub config file.
grub_cfg = os.path.join(tmpdir, grub_rel_path)
grub_conf = _generate_cfg(kernel_params,
CONF.grub_config_template, grub_options)
utils.write_to_file(grub_cfg, grub_conf)
# Create the boot_iso.
try:
utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
'-cache-inodes', '-J', '-l', '-no-emul-boot',
'-boot-load-size', '4', '-boot-info-table',
'-b', ISOLINUX_BIN, '-eltorito-alt-boot',
'-e', e_img_rel_path, '-no-emul-boot',
'-o', output_file, tmpdir)
except processutils.ProcessExecutionError as e:
LOG.exception(_LE("Creating ISO image failed."))
raise exception.ImageCreationFailed(image_type='iso', error=e)
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return imageutils.QemuImgInfo()
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_href, path, force_raw=False):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
image_service = service.get_image_service(image_href,
context=context)
LOG.debug("Using %(image_service)s to download image %(image_href)s." %
{'image_service': image_service.__class__,
'image_href': image_href})
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(image_href, image_file)
if force_raw:
image_to_raw(image_href, path, "%s.part" % path)
def image_to_raw(image_href, path, path_tmp):
with fileutils.remove_path_on_error(path_tmp):
data = qemu_img_info(path_tmp)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_href)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_href,
reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file})
if fmt != "raw":
staged = "%s.converted" % path
LOG.debug("%(image)s was %(format)s, converting to raw" %
{'image': image_href, 'format': fmt})
with fileutils.remove_path_on_error(staged):
convert_image(path_tmp, staged, 'raw')
os.unlink(path_tmp)
data = qemu_img_info(staged)
if data.file_format != "raw":
raise exception.ImageConvertFailed(
image_id=image_href,
reason=_("Converted to raw, but format is "
"now %s") % data.file_format)
os.rename(staged, path)
else:
os.rename(path_tmp, path)
def download_size(context, image_href, image_service=None):
if not image_service:
image_service = service.get_image_service(image_href, context=context)
return image_service.show(image_href)['size']
def converted_size(path):
"""Get size of converted raw image.
The size of image converted to raw format can be growing up to the virtual
size of the image.
:param path: path to the image file.
:returns: virtual size of the image or 0 if conversion not needed.
"""
data = qemu_img_info(path)
return data.virtual_size
def get_image_properties(context, image_href, properties="all"):
"""Returns the values of several properties of an image
:param context: context
:param image_href: href of the image
:param properties: the properties whose values are required.
This argument is optional, default value is "all", so if not specified
all properties will be returned.
:returns: a dict of the values of the properties. A property not on the
glance metadata will have a value of None.
"""
img_service = service.get_image_service(image_href, context=context)
iproperties = img_service.show(image_href)['properties']
if properties == "all":
return iproperties
return {p: iproperties.get(p) for p in properties}
def get_temp_url_for_glance_image(context, image_uuid):
"""Returns the tmp url for a glance image.
:param context: context
:param image_uuid: the UUID of the image in glance
:returns: the tmp url for the glance image.
"""
# Glance API version 2 is required for getting direct_url of the image.
glance_service = service.GlanceImageService(version=2, context=context)
image_properties = glance_service.show(image_uuid)
LOG.debug('Got image info: %(info)s for image %(image_uuid)s.',
{'info': image_properties, 'image_uuid': image_uuid})
return glance_service.swift_temp_url(image_properties)
def create_boot_iso(context, output_filename, kernel_href,
ramdisk_href, deploy_iso_href, root_uuid=None,
kernel_params=None, boot_mode=None):
"""Creates a bootable ISO image for a node.
Given the hrefs for kernel, ramdisk, root partition's UUID and
kernel cmdline arguments, this method fetches the kernel and ramdisk,
and builds a bootable ISO image that can be used to boot up the
baremetal node.
:param context: context
:param output_filename: the absolute path of the output ISO file
:param kernel_href: URL or glance uuid of the kernel to use
:param ramdisk_href: URL or glance uuid of the ramdisk to use
:param deploy_iso_href: URL or glance uuid of the deploy iso used
:param root_uuid: uuid of the root filesystem (optional)
:param kernel_params: a string containing whitespace separated values
kernel cmdline arguments of the form K=V or K (optional).
:boot_mode: the boot mode in which the deploy is to happen.
:raises: ImageCreationFailed, if creating boot ISO failed.
"""
with utils.tempdir() as tmpdir:
kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
fetch(context, kernel_href, kernel_path)
fetch(context, ramdisk_href, ramdisk_path)
params = []
if root_uuid:
params.append('root=UUID=%s' % root_uuid)
if kernel_params:
params.append(kernel_params)
if boot_mode == 'uefi':
deploy_iso = os.path.join(tmpdir, deploy_iso_href.split('/')[-1])
fetch(context, deploy_iso_href, deploy_iso)
create_isolinux_image_for_uefi(output_filename,
deploy_iso,
kernel_path,
ramdisk_path,
params)
else:
create_isolinux_image_for_bios(output_filename,
kernel_path,
ramdisk_path,
params)
def is_whole_disk_image(ctx, instance_info):
"""Find out if the image is a partition image or a whole disk image.
:param ctx: an admin context
:param instance_info: a node's instance info dict
:returns True for whole disk images and False for partition images
and None on no image_source or Error.
"""
image_source = instance_info.get('image_source')
if not image_source:
return
is_whole_disk_image = False
if glance_utils.is_glance_image(image_source):
try:
iproperties = get_image_properties(ctx, image_source)
except Exception:
return
is_whole_disk_image = (not iproperties.get('kernel_id') and
not iproperties.get('ramdisk_id'))
else:
# Non glance image ref
if (not instance_info.get('kernel') and
not instance_info.get('ramdisk')):
is_whole_disk_image = True
return is_whole_disk_image
def _mount_deploy_iso(deploy_iso, mountdir):
"""This function opens up the deploy iso used for deploy.
:param: deploy_iso: path to the deploy iso where its
contents are fetched to.
:raises: ImageCreationFailed if mount fails.
:returns: a tuple consisting of - 1. a dictionary containing
the values as required
by create_isolinux_image,
2. efiboot.img relative path, and
3. grub.cfg relative path.
"""
e_img_rel_path = None
e_img_path = None
grub_rel_path = None
grub_path = None
try:
utils.mount(deploy_iso, mountdir, '-o', 'loop')
except processutils.ProcessExecutionError as e:
LOG.exception(_LE("mounting the deploy iso failed."))
raise exception.ImageCreationFailed(image_type='iso', error=e)
try:
for (dir, subdir, files) in os.walk(mountdir):
if 'efiboot.img' in files:
e_img_path = os.path.join(dir, 'efiboot.img')
e_img_rel_path = os.path.relpath(e_img_path,
mountdir)
if 'grub.cfg' in files:
grub_path = os.path.join(dir, 'grub.cfg')
grub_rel_path = os.path.relpath(grub_path,
mountdir)
except (OSError, IOError) as e:
LOG.exception(_LE("examining the deploy iso failed."))
_umount_without_raise(mountdir)
raise exception.ImageCreationFailed(image_type='iso', error=e)
# check if the variables are assigned some values or not during
# walk of the mountdir.
if not (e_img_path and e_img_rel_path and grub_path and grub_rel_path):
error = (_("Deploy iso didn't contain efiboot.img or grub.cfg"))
_umount_without_raise(mountdir)
raise exception.ImageCreationFailed(image_type='iso', error=error)
uefi_path_info = {e_img_path: e_img_rel_path,
grub_path: grub_rel_path}
# Returning a tuple as it makes the code simpler and clean.
# uefi_path_info: is needed by the caller for _create_root_fs to create
# appropriate directory structures for uefi boot iso.
# grub_rel_path: is needed to copy the new grub.cfg generated using
# generate_cfg() to the same directory path structure where it was
# present in deploy iso. This path varies for different OS vendors.
# e_img_rel_path: is required by mkisofs to generate boot iso.
return uefi_path_info, e_img_rel_path, grub_rel_path
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of web2py Web Framework (Copyrighted, 2007-2014).
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>.
License: LGPL v3
Login will be done via Web2py's CAS application, instead of web2py's
login form.
Include in your model (eg db.py)::
auth.define_tables(username=True)
from gluon.contrib.login_methods.saml2_auth import Saml2Auth
import os
auth.settings.login_form=Saml2Auth(
config_file = os.path.join(request.folder,'private','sp_conf'),
maps=dict(
username=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
email=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
user_id=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0]))
you must have private/sp_conf.py, the pysaml2 sp configuration file. For example:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT
import os.path
import requests
import tempfile
BASEDIR = os.path.abspath(os.path.dirname(__file__))
# Web2py SP url and application name
HOST = 'http://127.0.0.1:8000'
APP = 'sp'
# To load the IDP metadata...
IDP_METADATA = 'http://127.0.0.1:8088/metadata'
def full_path(local_file):
return os.path.join(BASEDIR, local_file)
CONFIG = {
# your entity id, usually your subdomain plus the url to the metadata view.
'entityid': '%s/%s/default/metadata' % (HOST, APP),
'service': {
'sp' : {
'name': 'MYSP',
'endpoints': {
'assertion_consumer_service': [
('%s/%s/default/user/login' % (HOST, APP), BINDING_HTTP_REDIRECT),
('%s/%s/default/user/login' % (HOST, APP), BINDING_HTTP_POST),
],
},
},
},
# Your private and public key.
'key_file': full_path('pki/mykey.pem'),
'cert_file': full_path('pki/mycert.pem'),
# where the remote metadata is stored
'metadata': {
"remote": [{
"url": IDP_METADATA,
"cert":full_path('pki/mycert.pem')
}]
},
}
"""
from saml2 import BINDING_HTTP_REDIRECT, BINDING_HTTP_POST
from saml2.client import Saml2Client
from gluon.utils import web2py_uuid
from gluon import current, redirect, URL
import os, types
def obj2dict(obj, processed=None):
"""
converts any object into a dict, recursively
"""
processed = processed if not processed is None else set()
if obj is None:
return None
if isinstance(obj,(int,long,str,unicode,float,bool)):
return obj
if id(obj) in processed:
return '<reference>'
processed.add(id(obj))
if isinstance(obj,(list,tuple)):
return [obj2dict(item,processed) for item in obj]
if not isinstance(obj, dict) and hasattr(obj,'__dict__'):
obj = obj.__dict__
else:
return repr(obj)
return dict((key,obj2dict(value,processed)) for key,value in obj.items()
if not key.startswith('_') and
not type(value) in (types.FunctionType,
types.LambdaType,
types.BuiltinFunctionType,
types.BuiltinMethodType))
def saml2_handler(session, request, config_filename = None):
config_filename = config_filename or os.path.join(request.folder,'private','sp_conf')
client = Saml2Client(config_file = config_filename)
idps = client.metadata.with_descriptor("idpsso")
entityid = idps.keys()[0]
bindings = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST]
binding, destination = client.pick_binding(
"single_sign_on_service", bindings, "idpsso", entity_id=entityid)
if request.env.request_method == 'GET':
binding = BINDING_HTTP_REDIRECT
elif request.env.request_method == 'POST':
binding = BINDING_HTTP_POST
if not request.vars.SAMLResponse:
req_id, req = client.create_authn_request(destination, binding=binding)
relay_state = web2py_uuid().replace('-','')
session.saml_outstanding_queries = {req_id: request.url}
session.saml_req_id = req_id
http_args = client.apply_binding(binding, str(req), destination,
relay_state=relay_state)
return {'url':dict(http_args["headers"])['Location']}
else:
relay_state = request.vars.RelayState
req_id = session.saml_req_id
unquoted_response = request.vars.SAMLResponse
res = {}
try:
data = client.parse_authn_request_response(
unquoted_response, binding, session.saml_outstanding_queries)
res['response'] = data if data else {}
except Exception as e:
import traceback
res['error'] = traceback.format_exc()
return res
class Saml2Auth(object):
def __init__(self, config_file=None, maps=dict(
username=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
email=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
user_id=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
), logout_url=None, change_password_url=None):
self.config_file = config_file
self.maps = maps
# URL for redirecting users to when they sign out
self.saml_logout_url = logout_url
# URL to let users change their password in the IDP system
self.saml_change_password_url = change_password_url
def login_url(self, next="/"):
d = saml2_handler(current.session, current.request)
if 'url' in d:
redirect(d['url'])
elif 'error' in d:
current.session.flash = d['error']
redirect(URL('default','index'))
elif 'response' in d:
# a['assertions'][0]['attribute_statement'][0]['attribute']
# is list of
# {'name': 'http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname', 'name_format': None, 'text': None, 'friendly_name': None, 'attribute_value': [{'text': 'CAA\\dev-mdp', 'extension_attributes': "{'{http://www.w3.org/2001/XMLSchema-instance}type': 'xs:string'}", 'extension_elements': []}], 'extension_elements': [], 'extension_attributes': '{}'}
try:
attributes = d['response'].assertions[0].attribute_statement[0].attribute
except:
attributes = d['response'].assertion.attribute_statement[0].attribute
current.session.saml2_info = dict(
(a.name, [i.text for i in a.attribute_value]) for a in attributes)
return next
def logout_url(self, next="/"):
current.session.saml2_info = None
current.session.auth = None
self._SAML_logout()
return next
def change_password_url(self, next="/"):
self._SAML_change_password()
return next
def get_user(self):
user = current.session.saml2_info
if user:
d = {'source': 'web2py saml2'}
for key in self.maps:
d[key] = self.maps[key](user)
return d
return None
def _SAML_logout(self):
"""
exposed SAML.logout()
redirects to the SAML logout page
"""
redirect(self.saml_logout_url)
def _SAML_change_password(self):
redirect(self.saml_change_password_url)
| |
#!/bin/python
import glob
import os
import re
import shutil
import traceback
import datetime
import requests
import sys
import time
import constants
import graphutils
import utils
SLACK = '-enable-slack-bot' in sys.argv and 'SLACK_BOT_TOKEN' in os.environ
QUICK = '-quick' in sys.argv
CLOUD_TEST_ONLY = '-cloud-test-only' in sys.argv
NOREPORT = '-no-report' in sys.argv
KNOWN_CHANGES = [
('2016-01-22 17:43',
'SOLR-8582: /update/json/docs is 4x slower than /update for indexing a list of json docs',
"""
SOLR-8582: Fixed memory leak in JsonRecordReader affecting /update/json/docs. Large payloads cause OOM.
Brings performance on par with /update for large json lists.
"""),
('2016-01-23 20:11:11',
'ConcurrentHttpSolrClient, 8 threads, batchSize=100, queueSize=200, 4g client heap. Fixed minor bug in indexer.',
"""
Changed indexer to use ConcurrentHttpSolrClient instead of HttpSolrClient. Dropped indexing threads
from 16 to 8. Client heap size increased to 4g from 2g. Fixed bug in indexer which caused last batch to not be indexed.
"""),
('2016-01-23 21:35',
'ConcurrentHttpSolrClient now uses binary request writer instead of default xml writer',
"""
ConcurrentHttpSolrClient now uses binary request writer instead of default xml writer. Also we explicitly set
request writer, response writer and poll time=0 for ConcurrentHttpSolrClient
"""
),
('2016-01-23 23:20',
'Client threads increased to 10 from 8',
"""
Client threads increased to 10 from 8
"""),
('2016-01-24 06:05',
'Client threads decreased from 10 to 9',
"""
Client threads decreased from 10 to 9
"""),
('2016-01-25 05:33',
'Limit client feeder threads to 1 when using ConcurrentUpdateSolrClient',
"""
When using ConcurrentUpdateSolrClient we limit the feeder threads that read wiki text from file to just
1 thread. The SolrJ client continues to use 9 threads to send data to Solr.
"""),
('2016-03-16',
'SOLR-8740: use docValues by default',
"""
docValues are now enabled by default for most non-text (string, date, and numeric) fields in
the schema templates
"""),
('2016-08-29',
'SOLR-9449: Example schemas do not index _version_ field anymore because the field has DocValues enabled already',
"""
SOLR-8740 had enabled doc values for long types by default. Since then, the _version_ field was both indexed
and had doc values. SOLR-9449 stopped indexing the _version_ field since doc values are already enabled.
"""),
('2016-08-31',
'SOLR-9452: JsonRecordReader should not deep copy document before handler.handle()',
"""
JsonRecordReader used to make a deep copy of the document map which was only required for very specialized
methods. This deep copy has been removed to optimize the common case. This change only affects JSON indexing
and therefore only the IMDB benchmark.
"""),
('2017-01-07', 'SOLR-9854: Collect metrics for index merges and index store IO',
"""
Using API for metrics management developed in SOLR-4735 we should also start collecting metrics for major aspects
of IndexWriter operation, such as read / write IO rates, number of minor and major merges and IO during
these operations, etc. This will provide a better insight into resource consumption and load at the IO level
"""),
('2017-02-19', 'SOLR-10130: Serious performance degradation in Solr 6.4.1 due to the new metrics collection',
"""
New metrics collection system in MetricsDirectoryFactory added in SOLR-9854 caused a major slowdown.
"""),
('2018-05-30 20:48:51', 'Set swappiness to 0 (was 60)', 'vm.swappiness was set to 0 from its default value of 60'),
('2019-06-26', 'Upgrade JVM to openjdk version "11.0.3" 2019-04-16. Relates to LUCENE-8738')
]
class LuceneSolrCheckout:
def __init__(self, checkoutDir, revision='LATEST'):
self.checkoutDir = checkoutDir
self.revision = revision
def checkout(self, runLogFile):
utils.info(
'Attempting to checkout Lucene/Solr revision: %s into directory: %s' % (
self.revision, self.checkoutDir))
if not os.path.exists(self.checkoutDir):
os.makedirs(self.checkoutDir)
f = os.listdir(self.checkoutDir)
x = os.getcwd()
try:
os.chdir(self.checkoutDir)
if len(f) == 0:
# clone
if self.revision == 'LATEST':
utils.runCommand(
'%s clone --progress %s . >> %s 2>&1' % (
constants.GIT_EXE, constants.GIT_REPO, runLogFile))
else:
utils.runCommand(
'%s clone --progress %s . >> %s 2>&1' % (
constants.GIT_EXE, constants.GIT_REPO, runLogFile))
self.updateToRevision(runLogFile)
try:
utils.runCommand('rm -r ~/.ant/lib/ivy-*.jar')
except:
print('Unable to remove previous ivy-2.3.0.jar')
utils.runCommand('%s ivy-bootstrap' % constants.ANT_EXE)
else:
self.updateToRevision(runLogFile)
finally:
os.chdir(x)
def updateToRevision(self, runLogFile):
# resets any staged changes (there shouldn't be any though)
utils.runCommand('%s reset --hard' % constants.GIT_EXE)
# clean ANY files not tracked in the repo -- this effectively restores pristine state
utils.runCommand('%s clean -xfd .' % constants.GIT_EXE)
if self.revision == 'LATEST':
utils.runCommand('%s checkout origin/master >> %s 2>&1' % (constants.GIT_EXE, runLogFile))
utils.runCommand('%s pull origin master >> %s 2>&1' % (constants.GIT_EXE, runLogFile))
else:
utils.runCommand('%s checkout origin/master >> %s 2>&1' % (constants.GIT_EXE, runLogFile))
utils.runCommand('%s checkout %s >> %s 2>&1' % (constants.GIT_EXE, self.revision, runLogFile))
def build(self, runLogFile):
x = os.getcwd()
try:
os.chdir('%s' % self.checkoutDir)
utils.runCommand('%s clean clean-jars >> %s 2>&1' % (constants.ANT_EXE, runLogFile))
os.chdir('%s/solr' % self.checkoutDir)
utils.runCommand('%s create-package >> %s 2>&1' % (constants.ANT_EXE, runLogFile))
packaged = os.path.join(os.getcwd(), "package")
files = glob.glob(os.path.join(packaged, '*.tgz'))
if len(files) == 0:
raise RuntimeError('No tgz file found at %s' % packaged)
elif len(files) > 1:
raise RuntimeError('More than 1 tgz file found at %s' % packaged)
else:
return files[0]
finally:
os.chdir(x)
def get_git_rev(self):
x = os.getcwd()
try:
os.chdir(self.checkoutDir)
s = utils.run_get_output([constants.GIT_EXE, 'show', '-s', '--format=%H,%ci'])
sha, date = s.split(',')
date_parts = date.split(' ')
return sha, datetime.datetime.strptime('%s %s' % (date_parts[0], date_parts[1]), '%Y-%m-%d %H:%M:%S')
finally:
os.chdir(x)
class SolrServer:
def __init__(self, tgz, extract_dir, name='', host='localhost', port='8983',
memory=None,
zk_host=None, server_dir=None, solr_home=None,
example=None, jvm_args=None, cloud_mode=False):
self.tgz = tgz
self.extract_dir = extract_dir
self.name = name
self.host = host
self.port = port
self.memory = memory
self.zk_host = zk_host
self.server_dir = server_dir
self.solr_home = solr_home
self.example = example
self.jvm_args = jvm_args
# cloud mode is true if a zk host has been specified
self.cloud_mode = cloud_mode if self.zk_host is None else True
def extract(self, runLogFile):
if os.path.exists(self.extract_dir):
shutil.rmtree(self.extract_dir)
os.makedirs(self.extract_dir)
utils.runCommand(
'tar xvf %s -C %s --strip-components=1 >> %s 2>&1' % (self.tgz, self.extract_dir, runLogFile))
def start(self, runLogFile):
x = os.getcwd()
try:
os.chdir(self.extract_dir)
cmd = ['%s/bin/solr' % self.extract_dir, 'start', '-p', self.port]
if self.jvm_args is not None:
cmd.extend(self.jvm_args)
if self.host is not None:
cmd.extend(['-h', self.host])
if self.memory is not None:
cmd.extend(['-m', self.memory])
if self.cloud_mode:
cmd.extend(['-c'])
if self.zk_host is not None:
cmd.extend(['-z', self.zk_host])
if self.server_dir is not None:
cmd.extend(['-d', self.server_dir])
if self.solr_home is not None:
cmd.extend(['-s', self.solr_home])
if self.example is not None:
cmd.extend(['-e', self.example])
utils.info('Running solr with command: %s' % ' '.join(cmd))
utils.runComand('solr server', cmd, '%s' % runLogFile)
finally:
os.chdir(x)
def create_collection(self, runLogFile, collection, num_shards='1', replication_factor='1',
config='_default'):
x = os.getcwd()
try:
os.chdir(self.extract_dir)
cmd = ['%s/bin/solr' % self.extract_dir, 'create_collection', '-p', self.port,
'-c', collection, '-shards', num_shards, '-replicationFactor', replication_factor,
'-d', config]
utils.info('Creating collection with command: %s' % ' '.join(cmd))
utils.runComand('solr create_collection', cmd, '%s' % runLogFile)
finally:
os.chdir(x)
def stop(self):
utils.runCommand('%s/bin/solr stop -p %s' % (self.extract_dir, self.port))
def get_version(self):
r = requests.get('http://%s:%s/solr/admin/info/system?wt=json' % (self.host, self.port))
solr = r.json()['lucene']['solr-impl-version'].split(' ')
return solr[0], solr[1]
def get_num_found(self, collection):
r = requests.get('http://%s:%s/solr/%s/select?q=*:*&rows=0&wt=json' % (self.host, self.port, collection))
solr = r.json()['response']['numFound']
return int(solr)
def get_jars(self):
dist = '%s/dist/*.jar' % self.extract_dir
jars = glob.glob(dist)
jars.extend(glob.glob('%s/dist/solrj-lib/*.jar' % self.extract_dir))
return jars
def get_metrics(self):
r = requests.get('http://%s:%s/solr/admin/metrics?wt=json&indent=on' % (self.host, self.port))
return r
def get_cluster_state(self):
r = requests.get('http://%s:%s/solr/admin/collections?action=clusterstatuswt=json&indent=on' % (self.host, self.port))
return r.text
def run_simple_bench(start, tgz, runLogFile, perfFile):
server = SolrServer(tgz, '%s/simple' % constants.BENCH_DIR, example='schemaless', memory='2g')
server.extract(runLogFile)
try:
server.start(runLogFile)
time.sleep(10)
solrMajorVersion, solrImplVersion = server.get_version()
cmd = ['%s/bin/post' % server.extract_dir, '-c', constants.SOLR_COLLECTION_NAME, constants.IMDB_DATA_FILE]
logFile = '%s' % runLogFile
utils.info('Running simple bench. Logging at: %s' % logFile)
utils.info('Executing: %s' % ' '.join(cmd))
t0 = time.time()
utils.runComand('binpost', cmd, logFile)
t1 = time.time() - t0
log_metrics(logFile, server, 'simple_bench')
log_sys_stats(logFile, 'simple_bench')
bytesIndexed = os.stat(constants.IMDB_DATA_FILE).st_size
docsIndexed = utils.get_num_found(constants.SOLR_COLLECTION_NAME)
if docsIndexed != constants.IMDB_NUM_DOCS:
raise RuntimeError(
'Indexed num_docs do not match expected %d != found %d' % (constants.IMDB_NUM_DOCS, docsIndexed))
print (' %.1f s' % (t1))
if not NOREPORT:
with open(perfFile, 'a+') as f:
timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
f.write('%s,%d,%d,%.1f,%s,%s\n' % (
timeStampLoggable, bytesIndexed, docsIndexed, t1, solrMajorVersion, solrImplVersion))
return bytesIndexed, docsIndexed, t1
finally:
server.stop()
time.sleep(10)
class BenchResults:
reBytesIndexed = re.compile('^Indexer: net bytes indexed (.*)$', re.MULTILINE)
reIndexingTime = re.compile(r'^Indexer: finished \((.*) msec\)$', re.MULTILINE)
# Time in JIT compilation: 54284 ms
reTimeIn = re.compile('^\s*Time in (.*?): (\d+) ms')
reTimeInLabel = re.compile('^[\\t]*(.*) - Time in (.*?): (\d+) ms')
# Garbage Generated in Young Generation: 39757.8 MiB
reGarbageIn = re.compile('^\s*Garbage Generated in (.*?): (.*) MiB$')
reGarbageInLabel = re.compile('^[\\t]*(.*) - Garbage Generated in (.*?): (.*) MiB$')
# Peak usage in Young Generation: 341.375 MiB
rePeakUsage = re.compile('^\s*Peak usage in (.*?): (.*) MiB')
rePeakUsageLabel = re.compile('^[\\t]*(.*) - Peak usage in (.*?): (.*) MiB')
# Average System Load: 4.0068359375
reAvgSysLoad = re.compile('^\s*Average System Load: (.*)')
reAvgSysLoadLabel = re.compile('^[\\t]*(.*) - Average System Load: (.*)')
# Average CPU Time: 6.1978397/400
reAvgCpuTime = re.compile('^\s*Average CPU Time: (.*)')
reAvgCpuTimeLabel = re.compile('^[\\t]*(.*) - Average CPU Time: (.*)')
# Average CPU Load: 1.1392051484117345
reAvgCpuLoad = re.compile('^\s*Average CPU Load: (.*)')
reAvgCpuLoadLabel = re.compile('^[\\t]*(.*) - Average CPU Load: (.*)')
def __init__(self, logFile, server, time_taken):
self.timeTaken = time_taken
s = open(logFile).read()
self.bytesIndexed = int(self.reBytesIndexed.search(s).group(1))
self.indexTimeSec = int(self.reIndexingTime.search(s).group(1)) / 1000.0
# extract GC times
self.node_data = {}
self.times = {}
self.garbage = {}
self.peak = {}
with open(logFile) as f:
for line in f.readlines():
m = self.reTimeIn.search(line)
if m is not None:
self.times[m.group(1)] = float(m.group(2)) / 1000.
else:
m = self.reTimeInLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['times'][m.group(2)] = float(m.group(3)) / 1000.
m = self.reGarbageIn.search(line)
if m is not None:
self.garbage[m.group(1)] = float(m.group(2))
else:
m = self.reGarbageInLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['garbage'][m.group(2)] = float(m.group(3))
m = self.rePeakUsage.search(line)
if m is not None:
self.peak[m.group(1)] = float(m.group(2))
else:
m = self.rePeakUsageLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['peak'][m.group(2)] = float(m.group(3))
m = self.reAvgSysLoad.search(line)
if m is not None:
self.avg_sys_load = m.group(1)
else:
m = self.reAvgSysLoadLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['avg_sys_load'] = m.group(2)
m = self.reAvgCpuTime.search(line)
if m is not None:
self.avg_cpu_time = m.group(1)
else:
m = self.reAvgCpuTimeLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['avg_cpu_time'] = m.group(2)
m = self.reAvgCpuLoad.search(line)
if m is not None:
self.avg_cpu_load = m.group(1)
else:
m = self.reAvgCpuLoadLabel.search(line)
if m is not None:
self.init_node_data(m.group(1))
self.node_data[m.group(1)]['avg_cpu_load'] = m.group(2)
utils.info(' took %.1f sec by client' % self.indexTimeSec)
utils.info(' took %.1f sec total' % self.timeTaken)
self.docsIndexed = server.get_num_found(constants.SOLR_COLLECTION_NAME)
def init_node_data(self, node):
if not self.node_data.has_key(node):
self.node_data[node] = {'times': {}, 'garbage': {}, 'peak': {}}
def __str__(self):
s = """Documents indexed: %d
Bytes indexed: %.1f
Time taken by client: %.1f sec
Time taken (total): %.1f sec\n""" % (self.docsIndexed, self.bytesIndexed, self.indexTimeSec, self.timeTaken)
if len(self.node_data) != 0:
for k in self.node_data:
s += 'Printing Stats for %s node\n' % k
times = self.node_data[k]['times']
garbage = self.node_data[k]['garbage']
peak = self.node_data[k]['peak']
s += self.get_stats_strings(times, garbage, peak)
s += '\tAverage System Load: %s\n' %(self.node_data[k]['avg_sys_load'])
s += '\tAverage CPU Time: %s\n' %(self.node_data[k]['avg_cpu_time'])
s += '\tAverage CPU Load: %s\n' %(self.node_data[k]['avg_cpu_load'])
else:
s += self.get_stats_strings(self.times, self.garbage, self.peak)
s += '\tAverage System Load: %s\n' % self.avg_sys_load
s += '\tAverage CPU Time: %s\n' % self.avg_cpu_time
s += '\tAverage CPU Load: %s\n' % self.avg_cpu_load
return s
def get_stats_strings(self, times, garbage, peak):
s = ''
for v in times:
s += '\tTime in %s: %.1f ms\n' % (v, times[v])
for v in garbage:
s += '\tGarbage Generated in %s: %.1f MiB\n' % (v, garbage[v])
for v in peak:
s += '\tPeak memory usage in %s: %.1f MiB\n' % (v, peak[v])
return s
def get_simple_results(self):
# bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak
return self.bytesIndexed, self.indexTimeSec, self.docsIndexed, self.times, self.garbage, self.peak
class JavaBench:
def __init__(self, benchDir):
self.benchDir = benchDir
def src_dir(self):
return os.path.join(self.benchDir, "src/java")
def src_files(self):
return ['%s/org/apache/solr/perf/%s' % (self.src_dir(), x) for x in (
'Args.java',
'IndexThreads.java',
'LineFileDocs.java',
'StatisticsHelper.java',
'WikiIndexer.java'
)]
def build_dir(self):
return os.path.join(self.benchDir, 'build')
def compile(self, server, runLogFile):
buildDir = self.build_dir()
if not os.path.exists(buildDir):
os.makedirs(buildDir)
cmd = ['javac', '-d', buildDir, '-classpath', ':'.join(server.get_jars())]
cmd.extend(self.src_files())
utils.info('Running: %s' % ' '.join(cmd))
utils.runComand('javac', cmd, runLogFile)
def get_run_command(self, server, javaExeClass, cmdArgs):
cmd = ['java']
cmd.extend(constants.CLIENT_JVM_PARAMS)
cmd.append('-cp')
cmd.append('%s:%s' % (self.build_dir(), ':'.join(server.get_jars())))
cmd.append(javaExeClass)
cmd.extend(cmdArgs)
return cmd
def run(self, testName, server, javaExeClass, cmdArgs, logFile):
cmd = self.get_run_command(server, javaExeClass, cmdArgs)
utils.info('Running %s bench. Logging at %s' % (testName, logFile))
utils.info('Executing: %s' % ' '.join(cmd))
tmpLogFile = '/tmp/%s.log' % testName
if os.path.exists(tmpLogFile):
os.remove(tmpLogFile)
t0 = time.time()
utils.runComand(testName, cmd, tmpLogFile)
t1 = time.time() - t0
results = BenchResults(tmpLogFile, server, t1)
print(results)
utils.runCommand('cat %s >> %s' % (tmpLogFile, logFile))
return results
def run_wiki_schemaless_bench(start, tgz, runLogFile, perfFile, gcFile):
server = SolrServer(tgz, '%s/wiki_schemaless' % constants.BENCH_DIR, example='schemaless', memory='4g')
server.extract(runLogFile)
try:
bench = JavaBench(os.getcwd())
bench.compile(server, runLogFile)
server.start(runLogFile)
time.sleep(10)
solrMajorVersion, solrImplVersion = server.get_version()
solrUrl = 'http://%s:%s/solr/gettingstarted' % (server.host, server.port)
logFile = '%s' % runLogFile
results = bench.run('wiki-1k-schemaless', server,
'org.apache.solr.perf.WikiIndexer',
[
# '-useHttpSolrClient', '-solrUrl', solrUrl,
'-useConcurrentUpdateSolrClient', '-solrUrl', solrUrl,
'-lineDocsFile', constants.WIKI_1K_DATA_FILE,
'-docCountLimit', '-1',
'-threadCount', '9',
'-batchSize', '100'], logFile)
bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak = results.get_simple_results()
log_metrics(logFile, server, 'wiki_schemaless_bench')
log_sys_stats(logFile, 'wiki_schemaless_bench')
if docsIndexed != constants.WIKI_1K_NUM_DOCS:
raise RuntimeError(
'Indexed num_docs do not match expected %d != found %d' % (constants.WIKI_1K_NUM_DOCS, docsIndexed))
timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
with open(perfFile, 'a+') as f:
f.write('%s,%d,%d,%.1f,%s,%s\n' % (
timeStampLoggable, bytesIndexed, docsIndexed, indexTimeSec, solrMajorVersion, solrImplVersion))
write_gc_file(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, times, garbage, peak)
finally:
server.stop()
time.sleep(10)
def run_wiki_1k_schema_bench(start, tgz, runLogFile, perfFile, gcFile):
# we start in schemaless mode but use the schema api to add the right fields
jmx_args = ['-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.local.only=true',
'-Dcom.sun.management.jmxremote.port=9999',
'-Dcom.sun.management.jmxremote.rmi.port=9999',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false']
server = SolrServer(tgz, '%s/wiki-1k-schema' % constants.BENCH_DIR, example='schemaless', memory='4g', jvm_args=jmx_args)
server.extract(runLogFile)
try:
bench = JavaBench(os.getcwd())
bench.compile(server, runLogFile)
server.start(runLogFile)
time.sleep(10)
solrMajorVersion, solrImplVersion = server.get_version()
solrUrl = 'http://%s:%s/solr/gettingstarted' % (server.host, server.port)
utils.info('Updating schema')
schemaApiUrl = '%s/schema' % solrUrl
r = requests.post(schemaApiUrl,
data='{"add-field":{"name":"title","type":"string","stored":false, "indexed":true },'
'"add-field":{"name":"titleTokenized","type":"text_en","stored":true, "indexed":true },'
'"add-field":{"name":"body","type":"text_en","stored":false, "indexed":true },'
'"add-field":{"name":"date","type":"pdate","stored":true, "indexed":true },'
'"add-field":{"name":"timesecnum","type":"pint","stored":false, "indexed":true },'
'"add-copy-field":{"source":"title","dest":[ "titleTokenized"]}}')
print(r.json())
logFile = '%s' % runLogFile
inputFile = constants.WIKI_1K_DATA_FILE
expectedDocs = constants.WIKI_1K_NUM_DOCS
if QUICK:
inputFile = constants.WIKI_4K_DATA_FILE
expectedDocs = constants.WIKI_4k_NUM_DOCS
results = bench.run('wiki-1k-schema', server,
'org.apache.solr.perf.WikiIndexer',
[
# '-useHttpSolrClient', '-solrUrl', solrUrl,
'-useConcurrentUpdateSolrClient', '-solrUrl', solrUrl,
'-lineDocsFile', inputFile,
'-docCountLimit', '-1',
'-threadCount', '9',
'-batchSize', '100'], logFile)
bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak = results.get_simple_results()
log_metrics(logFile, server, 'wiki_1k_schema_bench')
log_sys_stats(logFile, 'wiki_1k_schema_bench')
if docsIndexed != expectedDocs:
raise RuntimeError(
'Indexed num_docs do not match expected %d != found %d' % (constants.WIKI_1K_NUM_DOCS, docsIndexed))
timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
if not NOREPORT:
with open(perfFile, 'a+') as f:
f.write('%s,%d,%d,%.1f,%s,%s\n' % (
timeStampLoggable, bytesIndexed, docsIndexed, indexTimeSec, solrMajorVersion, solrImplVersion))
write_gc_file(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, times, garbage, peak)
return bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak
except:
print('Exception %s' % traceback.format_exc())
finally:
server.stop()
time.sleep(10)
def log_sys_stats(logfile, bench_name):
vmstat = utils.run_get_output(['vmstat'])
iostat = utils.run_get_output(['iostat'])
with open(logfile, 'a+') as f:
f.write('--- SYSTEM STATS AFTER %s ---\n' % bench_name)
f.write(vmstat)
f.write('\n')
f.write(iostat)
def log_metrics(logFile, server, bench_name):
metrics = server.get_metrics()
if metrics.status_code == requests.codes.ok:
with open(logFile, 'a+') as f:
f.write('--- BEGIN SOLR METRICS AFTER %s ---\n' % bench_name)
f.write(metrics.text)
f.write('--- END SOLR METRICS AFTER %s ---\n' % bench_name)
def run_wiki_4k_schema_bench(start, tgz, runLogFile, perfFile, gcFile):
# we start in schemaless mode but use the schema api to add the right fields
jmx_args = ['-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.local.only=true',
'-Dcom.sun.management.jmxremote.port=9999',
'-Dcom.sun.management.jmxremote.rmi.port=9999',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false']
server = SolrServer(tgz, '%s/wiki-4k-schema' % constants.BENCH_DIR, example='schemaless', memory='4g',
jvm_args=jmx_args)
server.extract(runLogFile)
try:
bench = JavaBench(os.getcwd())
bench.compile(server, runLogFile)
server.start(runLogFile)
time.sleep(10)
solrMajorVersion, solrImplVersion = server.get_version()
solrUrl = 'http://%s:%s/solr/gettingstarted' % (server.host, server.port)
utils.info('Updating schema')
schemaApiUrl = '%s/schema' % solrUrl
r = requests.post(schemaApiUrl,
data='{"add-field":{"name":"title","type":"string","stored":false, "indexed":true },'
'"add-field":{"name":"titleTokenized","type":"text_en","stored":true, "indexed":true },'
'"add-field":{"name":"body","type":"text_en","stored":false, "indexed":true },'
'"add-field":{"name":"date","type":"pdate","stored":true, "indexed":true },'
'"add-field":{"name":"timesecnum","type":"pint","stored":false, "indexed":true },'
'"add-copy-field":{"source":"title","dest":[ "titleTokenized"]}}')
print(r.json())
logFile = '%s' % runLogFile
results = bench.run('wiki-4k-schema', server,
'org.apache.solr.perf.WikiIndexer',
[
# '-useHttpSolrClient', '-solrUrl', solrUrl,
'-useConcurrentUpdateSolrClient', '-solrUrl', solrUrl,
'-lineDocsFile', constants.WIKI_4K_DATA_FILE,
'-docCountLimit', '-1',
'-threadCount', '9',
'-batchSize', '100'], logFile)
bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak = results.get_simple_results()
log_metrics(logFile, server, 'wiki_4k_schema_bench')
log_sys_stats(logFile, 'wiki_4k_schema_bench')
if docsIndexed != constants.WIKI_4k_NUM_DOCS:
raise RuntimeError(
'Indexed num_docs do not match expected %d != found %d' % (constants.WIKI_4k_NUM_DOCS, docsIndexed))
timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
if not NOREPORT:
with open(perfFile, 'a+') as f:
f.write('%s,%d,%d,%.1f,%s,%s\n' % (
timeStampLoggable, bytesIndexed, docsIndexed, indexTimeSec, solrMajorVersion, solrImplVersion))
write_gc_file(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, times, garbage, peak)
return bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak
except:
print('Exception %s' % traceback.format_exc())
finally:
server.stop()
time.sleep(10)
def create_collection_2x1(server, runLogFile):
utils.info('Creating collection 2x1')
server.create_collection(runLogFile, 'gettingstarted', num_shards='2', replication_factor='1')
def create_collection_1x2(server, runLogFile):
utils.info('Creating collection 1x2')
server.create_collection(runLogFile, 'gettingstarted', num_shards='1', replication_factor='2')
def run_wiki_1k_schema_cloud_bench(start, tgz, runLogFile, perfFile, gcFile, collection_function):
# we start in schemaless mode but use the schema api to add the right fields
jmx_args = ['-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.local.only=true',
'-Dcom.sun.management.jmxremote.port=9999',
'-Dcom.sun.management.jmxremote.rmi.port=9999',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false']
server = SolrServer(tgz, '%s/wiki-1k-schema_cloud_node1' % constants.BENCH_DIR, name = '1', memory='4g', jvm_args=jmx_args, cloud_mode=True)
server.extract(runLogFile)
jmx_args2 = ['-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.local.only=true',
'-Dcom.sun.management.jmxremote.port=10000',
'-Dcom.sun.management.jmxremote.rmi.port=10000',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false']
server2 = SolrServer(tgz, '%s/wiki-1k-schema_cloud_node2' % constants.BENCH_DIR_2, name='2', zk_host='localhost:9983', memory='4g', port='8984', jvm_args = jmx_args2, cloud_mode=True)
server2.extract(runLogFile)
try:
bench = JavaBench(os.getcwd())
bench.compile(server, runLogFile)
utils.info('Starting server 1 at port 8983')
server.start(runLogFile)
time.sleep(10)
utils.info('Starting server 2 at port 8984')
server2.start(runLogFile)
time.sleep(10)
collection_function(server, runLogFile)
solrMajorVersion, solrImplVersion = server.get_version()
solrUrl = 'http://%s:%s/solr/gettingstarted' % (server.host, server.port)
utils.info('Updating schema')
schemaApiUrl = '%s/schema' % solrUrl
r = requests.post(schemaApiUrl,
data='{"add-field":{"name":"title","type":"string","stored":false, "indexed":true },'
'"add-field":{"name":"titleTokenized","type":"text_en","stored":true, "indexed":true },'
'"add-field":{"name":"body","type":"text_en","stored":false, "indexed":true },'
'"add-field":{"name":"date","type":"pdate","stored":true, "indexed":true },'
'"add-field":{"name":"timesecnum","type":"pint","stored":false, "indexed":true },'
'"add-copy-field":{"source":"title","dest":[ "titleTokenized"]}}')
print(r.json())
logFile = '%s' % runLogFile
inputFile = constants.WIKI_1K_DATA_FILE
expectedDocs = constants.WIKI_1K_NUM_DOCS
if QUICK:
inputFile = constants.WIKI_4K_DATA_FILE
expectedDocs = constants.WIKI_4k_NUM_DOCS
results = bench.run('wiki-1k-schema_cloud', server,
'org.apache.solr.perf.WikiIndexer',
[
'-useCloudSolrClient',
'-zkHost', 'localhost:9983',
'-collection', 'gettingstarted',
'-lineDocsFile', inputFile,
'-docCountLimit', '-1',
'-threadCount', '9',
'-batchSize', '100'], logFile)
# bytesIndexed, indexTimeSec, docsIndexed, times, garbage, peak = results.get_simple_results()
bytesIndexed, indexTimeSec, docsIndexed = [results.bytesIndexed, results.indexTimeSec, results.docsIndexed]
log_metrics(logFile, server, 'wiki_1k_schema_cloud_bench_8983')
log_metrics(logFile, server2, 'wiki_1k_schema_cloud_bench_8984')
log_sys_stats(logFile, 'wiki_1k_schema_cloud')
if docsIndexed != expectedDocs:
raise RuntimeError(
'Indexed num_docs do not match expected %d != found %d' % (constants.WIKI_1K_NUM_DOCS, docsIndexed))
timeStampLoggable = '%04d-%02d-%02d %02d:%02d:%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
if not NOREPORT:
with open(perfFile, 'a+') as f:
f.write('%s,%d,%d,%.1f,%s,%s\n' % (
timeStampLoggable, bytesIndexed, docsIndexed, indexTimeSec, solrMajorVersion, solrImplVersion))
write_gc_file_cloud(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, results.node_data)
return results
except:
print('Exception %s' % traceback.format_exc())
finally:
try:
server2.stop()
time.sleep(10)
except:
pass
server.stop()
time.sleep(10)
def write_gc_file_cloud(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, node_data):
with open(gcFile, 'a+') as f:
f.write('%s,%s,%s' % (timeStampLoggable, solrMajorVersion, solrImplVersion))
for n in sorted(node_data):
times = node_data[n]['times']
garbage = node_data[n]['garbage']
peak = node_data[n]['peak']
for k in sorted(times):
f.write(',%f' % times[k])
for k in sorted(garbage):
f.write(',%f' % garbage[k])
for k in sorted(peak):
f.write(',%f' % peak[k])
f.write('\n')
def write_gc_file(gcFile, timeStampLoggable, solrMajorVersion, solrImplVersion, times, garbage, peak):
with open(gcFile, 'a+') as f:
f.write('%s,%s,%s,' % (timeStampLoggable, solrMajorVersion, solrImplVersion))
for k in sorted(times):
f.write('%f,' % times[k])
for k in sorted(garbage):
f.write('%f,' % garbage[k])
c = 0
for k in sorted(peak):
f.write('%f' % peak[k])
c += 1
if c < len(peak):
f.write(',')
f.write('\n')
def main():
utils.info('Running solr benchmarks with parameter: %s' % sys.argv)
t0 = time.time()
if os.path.exists(constants.BENCH_DIR):
shutil.rmtree(constants.BENCH_DIR)
os.makedirs(constants.BENCH_DIR)
if os.path.exists(constants.BENCH_DIR_2):
shutil.rmtree(constants.BENCH_DIR_2)
os.makedirs(constants.BENCH_DIR_2)
if not os.path.exists(constants.NIGHTLY_REPORTS_DIR):
os.makedirs(constants.NIGHTLY_REPORTS_DIR)
if '-clean-build' in sys.argv:
if os.path.exists(constants.CHECKOUT_DIR):
print('Deleting directory: %s' % constants.CHECKOUT_DIR)
shutil.rmtree(constants.CHECKOUT_DIR)
if os.path.exists(constants.ANT_LIB_DIR):
print('Deleting directory: %s' % constants.ANT_LIB_DIR)
shutil.rmtree(constants.ANT_LIB_DIR)
if os.path.exists(constants.IVY_LIB_CACHE):
print('Deleting directory: %s' % constants.IVY_LIB_CACHE)
shutil.rmtree(constants.IVY_LIB_CACHE)
solr = None
if '-revision' in sys.argv:
index = sys.argv.index('-revision')
revision = sys.argv[index + 1]
solr = LuceneSolrCheckout(constants.CHECKOUT_DIR, revision)
else:
solr = LuceneSolrCheckout(constants.CHECKOUT_DIR)
start = datetime.datetime.now()
timeStamp = '%04d.%02d.%02d.%02d.%02d.%02d' % (
start.year, start.month, start.day, start.hour, start.minute, start.second)
print('Current date/time: %s' % timeStamp)
if SLACK:
try:
slackUrl = os.environ.get('SLACK_URL')
slackChannel = os.environ.get('SLACK_CHANNEL')
slackToken = os.environ.get('SLACK_BOT_TOKEN')
r = requests.post('%s?token=%s&channel=%s' % (slackUrl, slackToken, slackChannel),
'Solr performance test started at %s' % timeStamp)
print(r)
except Exception:
print('Unable to send message to slackbot')
runLogDir = '%s/%s' % (constants.LOG_BASE_DIR, timeStamp)
runLogFile = '%s/output.txt' % runLogDir
if '-logFile' in sys.argv:
index = sys.argv.index('-logFile')
runLogFile = sys.argv[index + 1]
else:
os.makedirs(runLogDir)
print('Logging to %s' % runLogFile)
if '-tgz' in sys.argv:
index = sys.argv.index('-logFile')
tgz = sys.argv[index+1]
else:
solr.checkout(runLogFile)
sha, git_date = solr.get_git_rev()
if '-log-by-commit-date' in sys.argv:
start = git_date
timeStamp = '%04d.%02d.%02d.%02d.%02d.%02d' % (
git_date.year, git_date.month, git_date.day, git_date.hour, git_date.minute, git_date.second)
tgz = solr.build(runLogFile)
utils.info('Solr tgz file created at: %s' % tgz)
log_sys_stats(runLogFile, 'startup')
implVersion = ''
if not CLOUD_TEST_ONLY:
simplePerfFile = '%s/simpleIndexer.perfdata.txt' % constants.LOG_BASE_DIR
simpleBytesIndexed, simpleDocsIndexed, simpleTimeTaken = run_simple_bench(start, tgz, runLogFile, simplePerfFile)
simpleIndexChartData = []
annotations = []
if os.path.isfile(simplePerfFile):
with open(simplePerfFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, bytesIndexed, docsIndexed, timeTaken, solrMajorVersion, solrImplVersion = l.split(',')
implVersion = solrImplVersion
simpleIndexChartData.append(
'%s,%.1f' % (timeStamp, (int(bytesIndexed) / (1024 * 1024.)) / float(timeTaken)))
for date, desc, fullDesc in KNOWN_CHANGES:
if timeStamp.startswith(date):
print('add annot for simple %s' % desc)
annotations.append((date, timeStamp, desc, fullDesc))
KNOWN_CHANGES.remove((date, desc, fullDesc))
simpleIndexChartData.sort()
simpleIndexChartData.insert(0, 'Date,MB/sec')
if not CLOUD_TEST_ONLY:
wiki1kSchemaPerfFile = '%s/wiki_1k_schema.perfdata.txt' % constants.LOG_BASE_DIR
wiki1kSchemaGcFile = '%s/wiki_1k_schema.gc.txt' % constants.LOG_BASE_DIR
wiki1kBytesIndexed, wiki1kIndexTimeSec, wiki1kDocsIndexed, \
wiki1kTimes, wiki1kGarbage, wiki1kPeak = run_wiki_1k_schema_bench(start, tgz, runLogFile, wiki1kSchemaPerfFile,
wiki1kSchemaGcFile)
wiki1kSchemaIndexChartData = []
wiki1kSchemaIndexDocsSecChartData = []
wiki1kSchemaGcTimesChartData = []
wiki1kSchemaGcGarbageChartData = []
wiki1kSchemaGcPeakChartData = []
populate_gc_data(wiki1kSchemaGcFile, wiki1kSchemaGcGarbageChartData, wiki1kSchemaGcPeakChartData,
wiki1kSchemaGcTimesChartData)
if os.path.isfile(wiki1kSchemaPerfFile):
with open(wiki1kSchemaPerfFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, bytesIndexed, docsIndexed, timeTaken, solrMajorVersion, solrImplVersion = l.split(',')
implVersion = solrImplVersion
wiki1kSchemaIndexChartData.append(
'%s,%.1f' % (timeStamp, (int(bytesIndexed) / (1024 * 1024 * 1024.)) / (float(timeTaken) / 3600.)))
wiki1kSchemaIndexDocsSecChartData.append(
'%s,%.1f' % (timeStamp, (int(docsIndexed) / 1000) / float(timeTaken)))
wiki1kSchemaIndexChartData.sort()
wiki1kSchemaIndexChartData.insert(0, 'Date,GB/hour')
wiki1kSchemaIndexDocsSecChartData.sort()
wiki1kSchemaIndexDocsSecChartData.insert(0, 'Date,K docs/sec')
if not CLOUD_TEST_ONLY:
wiki4kSchemaPerfFile = '%s/wiki_4k_schema.perfdata.txt' % constants.LOG_BASE_DIR
wiki4kGcFile = '%s/wiki_4k_schema.gc.txt' % constants.LOG_BASE_DIR
wiki4kBytesIndexed, wiki4kIndexTimeSec, wiki4kDocsIndexed, \
wiki4kTimes, wiki4kGarbage, wiki4kPeak = run_wiki_4k_schema_bench(start, tgz, runLogFile, wiki4kSchemaPerfFile, wiki4kGcFile)
wiki4kSchemaIndexChartData = []
wiki4kSchemaIndexDocsSecChartData = []
wiki4kSchemaGcTimesChartData = []
wiki4kSchemaGcGarbageChartData = []
wiki4kSchemaGcPeakChartData = []
populate_gc_data(wiki4kGcFile, wiki4kSchemaGcGarbageChartData, wiki4kSchemaGcPeakChartData,
wiki4kSchemaGcTimesChartData)
if os.path.isfile(wiki4kSchemaPerfFile):
with open(wiki4kSchemaPerfFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, bytesIndexed, docsIndexed, timeTaken, solrMajorVersion, solrImplVersion = l.split(',')
implVersion = solrImplVersion
wiki4kSchemaIndexChartData.append(
'%s,%.1f' % (timeStamp, (int(bytesIndexed) / (1024 * 1024 * 1024.)) / (float(timeTaken) / 3600.)))
wiki4kSchemaIndexDocsSecChartData.append('%s,%.1f' % (timeStamp, (int(docsIndexed) / 1000) / float(timeTaken)))
wiki4kSchemaIndexChartData.sort()
wiki4kSchemaIndexChartData.insert(0, 'Date,GB/hour')
wiki4kSchemaIndexDocsSecChartData.sort()
wiki4kSchemaIndexDocsSecChartData.insert(0, 'Date,K docs/sec')
if not CLOUD_TEST_ONLY:
wiki1kSchemaCloudPerfFile = '%s/wiki_1k_schema_cloud.perfdata.txt' % constants.LOG_BASE_DIR
wiki1kCloudGcFile = '%s/wiki_1k_schema_cloud.gc.txt' % constants.LOG_BASE_DIR
results = run_wiki_1k_schema_cloud_bench(start, tgz, runLogFile,
wiki1kSchemaCloudPerfFile,
wiki1kCloudGcFile,
create_collection_2x1)
wiki1kCloudBytesIndexed, wiki1kCloudIndexTimeSec, wiki1kCloudDocsIndexed = [results.bytesIndexed, results.indexTimeSec, results.docsIndexed]
wiki1kCloudGcTimesChartData = []
wiki1kCloudGcGarbageChartData = []
wiki1kCloudGcPeakChartData = []
populate_cloud_gc_data(wiki1kCloudGcFile, results.node_data, wiki1kCloudGcTimesChartData, wiki1kCloudGcGarbageChartData, wiki1kCloudGcPeakChartData)
wiki1kCloudIndexChartData = []
wiki1kCloudIndexDocsSecChartData = []
if os.path.isfile(wiki1kSchemaCloudPerfFile):
with open(wiki1kSchemaCloudPerfFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, bytesIndexed, docsIndexed, timeTaken, solrMajorVersion, solrImplVersion = l.split(',')
implVersion = solrImplVersion
wiki1kCloudIndexChartData.append(
'%s,%.1f' % (timeStamp, (int(bytesIndexed) / (1024 * 1024 * 1024.)) / (float(timeTaken) / 3600.)))
wiki1kCloudIndexDocsSecChartData.append(
'%s,%.1f' % (timeStamp, (int(docsIndexed) / 1000) / float(timeTaken)))
wiki1kCloudIndexChartData.sort()
wiki1kCloudIndexChartData.insert(0, 'Date,GB/hour')
wiki1kCloudIndexDocsSecChartData.sort()
wiki1kCloudIndexDocsSecChartData.insert(0, 'Date,K docs/sec')
wiki1kSchemaCloud1x2PerfFile = '%s/wiki_1k_schema_cloud1x2.perfdata.txt' % constants.LOG_BASE_DIR
wiki1kCloud1x2GcFile = '%s/wiki_1k_schema_cloud1x2.gc.txt' % constants.LOG_BASE_DIR
results = run_wiki_1k_schema_cloud_bench(start, tgz,
runLogFile,
wiki1kSchemaCloud1x2PerfFile,
wiki1kCloud1x2GcFile,
create_collection_1x2)
wiki1kCloud1x2BytesIndexed, wiki1kCloudIndexTimeSec, wiki1kCloudDocsIndexed = [results.bytesIndexed, results.indexTimeSec, results.docsIndexed]
wiki1kCloud1x2IndexChartData = []
wiki1kCloud1x2IndexDocsSecChartData = []
wiki1kCloud1x2GcTimesChartData = []
wiki1kCloud1x2GcGarbageChartData = []
wiki1kCloud1x2GcPeakChartData = []
populate_cloud_gc_data(wiki1kCloud1x2GcFile, results.node_data, wiki1kCloud1x2GcTimesChartData, wiki1kCloud1x2GcGarbageChartData, wiki1kCloud1x2GcPeakChartData)
if os.path.isfile(wiki1kSchemaCloud1x2PerfFile):
with open(wiki1kSchemaCloud1x2PerfFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, bytesIndexed, docsIndexed, timeTaken, solrMajorVersion, solrImplVersion = l.split(',')
implVersion = solrImplVersion
wiki1kCloud1x2IndexChartData.append(
'%s,%.1f' % (timeStamp, (int(bytesIndexed) / (1024 * 1024 * 1024.)) / (float(timeTaken) / 3600.)))
wiki1kCloud1x2IndexDocsSecChartData.append(
'%s,%.1f' % (timeStamp, (int(docsIndexed) / 1000) / float(timeTaken)))
wiki1kCloud1x2IndexChartData.sort()
wiki1kCloud1x2IndexChartData.insert(0, 'Date,GB/hour')
wiki1kCloud1x2IndexDocsSecChartData.sort()
wiki1kCloud1x2IndexDocsSecChartData.insert(0, 'Date,K docs/sec')
if not NOREPORT or not CLOUD_TEST_ONLY:
graphutils.writeIndexingHTML(annotations,
[simpleIndexChartData,
wiki1kSchemaIndexChartData, wiki1kSchemaIndexDocsSecChartData,
wiki1kSchemaGcTimesChartData, wiki1kSchemaGcGarbageChartData,
wiki1kSchemaGcPeakChartData,
wiki4kSchemaIndexChartData, wiki4kSchemaIndexDocsSecChartData,
wiki4kSchemaGcTimesChartData, wiki4kSchemaGcGarbageChartData,
wiki4kSchemaGcPeakChartData,
wiki1kCloudIndexChartData, wiki1kCloudIndexDocsSecChartData,
wiki1kCloudGcTimesChartData, wiki1kCloudGcGarbageChartData,
wiki1kCloudGcPeakChartData,
wiki1kCloud1x2IndexChartData, wiki1kCloud1x2IndexDocsSecChartData,
wiki1kCloud1x2GcTimesChartData, wiki1kCloud1x2GcGarbageChartData,
wiki1kCloud1x2GcPeakChartData])
totalBenchTime = time.time() - t0
utils.info('Total bench time: %d seconds' % totalBenchTime)
if '-logFile' not in sys.argv and '-log-by-commit-date' in sys.argv:
# find updated runLogDir
timeStamp = '%04d.%02d.%02d.%02d.%02d.%02d' % (
git_date.year, git_date.month, git_date.day, git_date.hour, git_date.minute, git_date.second)
newRunLogDir = '%s/%s' % (constants.LOG_BASE_DIR, timeStamp)
print('Moving logs from %s to %s' % (runLogDir, newRunLogDir))
shutil.move(runLogDir, newRunLogDir)
if SLACK:
try:
slackUrl = os.environ.get('SLACK_URL')
slackChannel = os.environ.get('SLACK_CHANNEL')
slackToken = os.environ.get('SLACK_BOT_TOKEN')
message = 'Solr performance test on git sha %s completed in %d seconds:\n' \
'\t Start: %s\n' \
'\t simple: %.1f json MB/sec\n' \
'\t wiki_1k_schema: %.1f GB/hour %.1f k docs/sec\n' \
'\t wiki_1k_schema_cloud: %.1f GB/hour %.1f k docs/sec\n' \
'\t See complete report at: %s' \
% (implVersion, totalBenchTime, timeStamp,
(int(simpleBytesIndexed) / (1024 * 1024.)) / float(simpleTimeTaken),
(int(wiki1kBytesIndexed) / (1024 * 1024 * 1024.)) / (float(wiki1kIndexTimeSec) / 3600.),
(int(wiki1kDocsIndexed) / 1000) / float(wiki1kIndexTimeSec),
(int(wiki1kCloudBytesIndexed) / (1024 * 1024 * 1024.)) / (
float(wiki1kCloudIndexTimeSec) / 3600.),
(int(wiki1kCloudDocsIndexed) / 1000) / float(wiki1kCloudIndexTimeSec),
os.environ.get('SLACK_REPORT_URL'))
print('Sending message to slackbot: \n\t\t%s' % message)
r = requests.post('%s?token=%s&channel=%s' % (slackUrl, slackToken, slackChannel), message)
print('slackbot request posted:')
print(r)
except Exception:
print('Unable to send request to slackbot')
def populate_gc_data(gcFile, gcGarbageChartData, gcPeakChartData, gcTimesChartData):
if os.path.isfile(gcFile):
with open(gcFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
timeStamp, solrMajorVersion, solrImplVersion, jitCompilation, oldGenGC, \
youngGenGc, oldGenGarbage, survivorGenGarbage, youngGenGarbage, \
oldGenPeak, survivorGenPeak, youngGenPeak = l.split(',')
s = '%s,%.4f,%.4f,%.4f' % (timeStamp, float(jitCompilation), float(youngGenGc), float(oldGenGC))
gcTimesChartData.append(s)
s = '%s,%.4f,%.4f,%.4f' % (
timeStamp, float(youngGenGarbage) / 1024., float(survivorGenGarbage) / 1024.,
float(oldGenGarbage) / 1024.)
gcGarbageChartData.append(s)
s = '%s,%.4f,%.4f,%.4f' % (timeStamp, float(youngGenPeak), float(survivorGenPeak), float(oldGenPeak))
gcPeakChartData.append(s)
gcTimesChartData.sort()
gcTimesChartData.insert(0, 'Date,JIT (ms), Young GC (ms), Old GC (ms)')
gcGarbageChartData.sort()
gcGarbageChartData.insert(0, 'Date,Young Garbage (GB),Survivor Garbage (GB),Old Garbage (GB)')
gcPeakChartData.sort()
gcPeakChartData.insert(0, 'Date,Young Peak (MB),Survivor Peak (MB),Old Peak (MB)')
def populate_cloud_gc_data(gcFile, node_data, gcTimesChartData, gcGarbageChartData, gcPeakChartData):
if os.path.isfile(gcFile):
with open(gcFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for l in lines:
x = l.split(',')
timeStamp, solrMajorVersion, solrImplVersion = x[:3]
jitCompilation, oldGenGC, \
youngGenGc, oldGenGarbage, survivorGenGarbage, youngGenGarbage, \
oldGenPeak, survivorGenPeak, youngGenPeak = x[3:12]
jitCompilation2, oldGenGC2, \
youngGenGc2, oldGenGarbage2, survivorGenGarbage2, youngGenGarbage2, \
oldGenPeak2, survivorGenPeak2, youngGenPeak2 = x[12:]
s = '%s,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f' % (timeStamp, float(jitCompilation), float(youngGenGc), float(oldGenGC), float(jitCompilation2), float(youngGenGc2), float(oldGenGC2))
gcTimesChartData.append(s)
s = '%s,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f' % (
timeStamp, float(youngGenGarbage) / 1024., float(survivorGenGarbage) / 1024.,
float(oldGenGarbage) / 1024.,
float(youngGenGarbage2) / 1024., float(survivorGenGarbage2) / 1024.,
float(oldGenGarbage2) / 1024.)
gcGarbageChartData.append(s)
s = '%s,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f' % \
(timeStamp,
float(youngGenPeak), float(survivorGenPeak), float(oldGenPeak),
float(youngGenPeak2), float(survivorGenPeak2), float(oldGenPeak2))
gcPeakChartData.append(s)
node1 = sorted(node_data)[0]
node2 = sorted(node_data)[1]
gcTimesChartData.sort()
gcTimesChartData.insert(0, 'Date, %s JIT (ms), %s Young GC (ms), %s Old GC (ms), %s JIT (ms), %s Young GC (ms), %s Old GC (ms)' % (node1, node1, node1, node2, node2, node2))
gcGarbageChartData.sort()
gcGarbageChartData.insert(0, 'Date, %s Young Garbage (GB), %s Survivor Garbage (GB), %s Old Garbage (GB), %s Young Garbage (GB), %s Survivor Garbage (GB), %s Old Garbage (GB)' % (node1, node1, node1, node2, node2, node2))
gcPeakChartData.sort()
gcPeakChartData.insert(0, 'Date, %s Young Peak (MB), %s Survivor Peak (MB), %s Old Peak (MB), %s Young Peak (MB), %s Survivor Peak (MB), %s Old Peak (MB)' % (node1, node1, node1, node2, node2, node2))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.