gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import functools
import glob
import gzip
import os
import sys
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.utils import parse_apps_and_model_labels
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
READ_STDIN = '-'
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = (
"No database fixture specified. Please provide the path of at least "
"one fixture in the command line."
)
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+', help='Fixture labels.')
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to load fixtures into. Defaults to the "default" database.',
)
parser.add_argument(
'--app', dest='app_label',
help='Only look for fixtures in the specified app.',
)
parser.add_argument(
'--ignorenonexistent', '-i', action='store_true', dest='ignore',
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.',
)
parser.add_argument(
'-e', '--exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude. Can be used multiple times.',
)
parser.add_argument(
'--format',
help='Format of serialized data when reading from stdin.',
)
def handle(self, *fixture_labels, **options):
self.ignore = options['ignore']
self.using = options['database']
self.app_label = options['app_label']
self.verbosity = options['verbosity']
self.excluded_models, self.excluded_apps = parse_apps_and_model_labels(options['exclude'])
self.format = options['format']
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
'stdin': (lambda *args: sys.stdin, None),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
# Django's test suite repeatedly tries to load initial_data fixtures
# from apps that don't have any fixtures. Because disabling constraint
# checks can be expensive on some database (especially MSSQL), bail
# out early if no fixtures are found.
for fixture_label in fixture_labels:
if self.find_fixtures(fixture_label):
break
else:
return
with connection.constraint_checks_disabled():
self.objs_with_deferred_fields = []
for fixture_label in fixture_labels:
self.load_label(fixture_label)
for obj in self.objs_with_deferred_fields:
obj.save_deferred_fields(using=self.using)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write(
"Installed %d object(s) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_count)
)
else:
self.stdout.write(
"Installed %d object(s) (of %d) from %d fixture(s)"
% (self.loaded_object_count, self.fixture_object_count, self.fixture_count)
)
def load_label(self, fixture_label):
"""Load fixtures files for a given label."""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write(
"Installing %s fixture '%s' from %s."
% (ser_fmt, fixture_name, humanize(fixture_dir))
)
objects = serializers.deserialize(
ser_fmt, fixture, using=self.using, ignorenonexistent=self.ignore,
handle_forward_references=True,
)
for obj in objects:
objects_in_fixture += 1
if (obj.object._meta.app_config in self.excluded_apps or
type(obj.object) in self.excluded_models):
continue
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
# psycopg2 raises ValueError if data contains NUL chars.
except (DatabaseError, IntegrityError, ValueError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': e,
},)
raise
if obj.deferred_fields:
self.objs_with_deferred_fields.append(obj)
if objects and show_progress:
self.stdout.write() # Add a newline after progress indicator.
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@functools.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""Find fixture files for a given label."""
if fixture_label == READ_STDIN:
return [(READ_STDIN, None, READ_STDIN)]
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = (
'.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts)
)
targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob.escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
raise CommandError("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(fixture_dirs)
dirs.append('')
return [os.path.realpath(d) for d in dirs]
def parse_name(self, fixture_name):
"""
Split fixture name in name, serialization format, compression format.
"""
if fixture_name == READ_STDIN:
if not self.format:
raise CommandError('--format must be specified when reading from stdin.')
return READ_STDIN, self.format, 'stdin'
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % ('.'.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| |
import time
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.utils.crypto import salted_hmac, constant_time_compare
from ratings import cookies, exceptions
from ratings.forms.widgets import SliderWidget, StarWidget
class VoteForm(forms.Form):
"""
Form class to handle voting of content objects.
You can customize the app giving a custom form class, following
some rules:
- the form must define the *content_type* and *object_pk* fields
- the form's *__init__* method must take as first and second positional
arguments the target object getting voted and the ratings key
- the form must define the *get_vote* method, getting the request and
a boolean *allow_anonymous* and returning an unsaved instance of
the vote model
- the form must define the *delete* method, getting the request and
returning True if the form requests the deletion of the vote
"""
# rating data
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
key = forms.RegexField(regex=r'^[\w.+-]+$', widget=forms.HiddenInput,
required=False)
# security data
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40,
widget=forms.HiddenInput)
honeypot = forms.CharField(required=False, widget=forms.HiddenInput)
def __init__(self, target_object, key, score_range=None, score_step=None,
can_delete_vote=None, data=None, initial=None):
self.target_object = target_object
self.key = key
self.score_range = score_range
self.score_step = score_step
self.can_delete_vote = can_delete_vote
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(VoteForm, self).__init__(data=data, initial=initial)
self.fields['score'] = self.get_score_field(score_range, score_step,
can_delete_vote)
# FACTORY METHODS
def get_score_field(self, score_range, score_step, can_delete_vote):
"""
Return the score field.
Subclasses may ovveride this method in order to change
the field used to store score value.
"""
try:
_, decimals = str(score_step).split('.')
except ValueError:
field = forms.IntegerField
else:
field = forms.FloatField if int(decimals) else forms.IntegerField
widget = self.get_score_widget(score_range, score_step, can_delete_vote)
return field(widget=widget, label=u'')
def get_score_widget(self, score_range, score_step, can_delete_vote):
"""
Return the score widget.
Subclasses may ovveride this method in order to change
the widget used to display score input.
"""
return forms.TextInput
# SECURITY
def clean_security_hash(self):
"""
Check the security hash.
"""
security_hash_dict = {
'content_type' : self.data.get('content_type', ''),
'object_pk' : self.data.get('object_pk', ''),
'key': self.data.get('key', ''),
'timestamp' : self.data.get('timestamp', ''),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data['security_hash']
if not constant_time_compare(expected_hash, actual_hash):
raise forms.ValidationError('Security hash check failed.')
return actual_hash
def clean_timestamp(self):
"""
Make sure the timestamp isn't too far (> 2 hours) in the past.
"""
timestamp = self.cleaned_data['timestamp']
if time.time() - timestamp > (2 * 60 * 60):
raise forms.ValidationError('Timestamp check failed')
return timestamp
def clean_honeypot(self):
"""
Check that nothing's been entered into the honeypot.
"""
value = self.cleaned_data['honeypot']
if value:
raise forms.ValidationError('Your vote is spam. Shame on you!')
return value
def generate_security_data(self):
"""
Generate a dict of security data for *initial* data.
"""
timestamp = int(time.time())
security_dict = {
'content_type': str(self.target_object._meta),
'object_pk': str(self.target_object._get_pk_val()),
'key': str(self.key),
'timestamp': str(timestamp),
'security_hash': self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from *self.target_object*
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'key': str(self.key),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, key, timestamp):
"""
Generate a HMAC security hash from the provided info.
"""
key_salt = 'ratings.forms.VoteForm'
value = '-'.join((content_type, object_pk, key, timestamp))
return salted_hmac(key_salt, value).hexdigest()
# VOTE
def clean_score(self):
"""
If *score_range* was given to the form, then check if the
score is in range.
Again, if *score_step* was given, then check if the score is valid
for that step.
"""
score = self.cleaned_data['score']
self._delete_vote = False
# a 0 score means the user want to delete his vote
if score == 0:
if not self.can_delete_vote:
raise forms.ValidationError('Vote deletion is not allowed')
self._delete_vote = True
return score
# score range, if given we have to check score is in that range
if self.score_range:
if not (self.score_range[0] <= score <= self.score_range[1]):
raise forms.ValidationError('Score is not in range')
# check score steps
if self.score_step:
try:
_, decimals = str(self.score_step).split('.')
except ValueError:
decimal_places = 0
else:
decimal_places = len(decimals) if int(decimals) else 0
if not decimal_places and int(score) != score:
raise forms.ValidationError('Score is not in steps')
factor = 10 ** decimal_places
if int(score * factor) % int(self.score_step * factor):
raise forms.ValidationError('Score is not in steps')
return score
def get_vote_model(self):
"""
Return the vote model used to rate an object.
"""
from ratings import models
return models.Vote
def get_vote_data(self, request, allow_anonymous):
"""
Return two dicts of data to be used to look for a vote and to create
a vote.
Subclasses in custom ratings apps that override *get_vote_model* can
override this method too to add extra fields into a custom vote model.
If the first dict is None, then the lookup is not performed.
"""
content_type = ContentType.objects.get_for_model(self.target_object)
ip_address = request.META.get('REMOTE_ADDR')
lookups = {
'content_type': content_type,
'object_id': self.target_object.pk,
'key': self.cleaned_data['key'],
}
data = lookups.copy()
data.update({
'score': self.cleaned_data['score'],
'ip_address': ip_address,
})
if allow_anonymous:
# votes are handled by cookies
if not ip_address:
raise exceptions.DataError('Invalid ip address')
cookie_name = cookies.get_name(self.target_object, self.key)
cookie_value = request.COOKIES.get(cookie_name)
if cookie_value:
# the user maybe voted this object (it has a cookie)
lookups.update({'cookie': cookie_value, 'user__isnull':True})
data['cookie'] = cookie_value
else:
lookups = None
data['cookie'] = cookies.get_value(ip_address)
elif request.user.is_authenticated():
# votes are handled by database (django users)
lookups.update({'user': request.user, 'cookie__isnull': True})
data['user'] = request.user
else:
# something went very wrong: if anonymous votes are not allowed
# and the user is not authenticated the view should have blocked
# the voting process
raise exceptions.DataError('Anonymous user cannot vote.')
return lookups, data
def get_vote(self, request, allow_anonymous):
"""
Return an unsaved vote object based on the information in this form.
Assumes that the form is already validated and will throw a
ValueError if not.
The vote can be a brand new vote or a changed vote. If the vote is
just created then the instance's id will be None.
"""
if not self.is_valid():
raise ValueError('get_vote may only be called on valid forms')
# get vote model and data
model = self.get_vote_model()
lookups, data = self.get_vote_data(request, allow_anonymous)
if lookups is None:
return model(**data)
try:
# trying to get an existing vote
vote = model.objects.get(**lookups)
except model.DoesNotExist:
# create a brand new vote
vote = model(**data)
else:
# change data for existting vote
vote.score = data['score']
vote.ip_address = data['ip_address']
return vote
# DELETE
def delete(self, request):
"""
Return True if the form requests to delete the vote.
"""
return self._delete_vote
class SliderVoteForm(VoteForm):
"""
Handle voting using a slider widget.
In order to use this form you must load the jQuery.ui slider
javascript.
This form triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def get_score_widget(self, score_range, score_step, can_delete_vote):
return SliderWidget(score_range[0], score_range[1], score_step,
instance=self.target_object, can_delete_vote=can_delete_vote, key=self.key)
class StarVoteForm(VoteForm):
"""
Handle voting using a star widget.
In order to use this form you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This form triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def get_score_widget(self, score_range, score_step, can_delete_vote):
return StarWidget(score_range[0], score_range[1], score_step,
instance=self.target_object, can_delete_vote=can_delete_vote, key=self.key)
| |
from itertools import product
from collections import defaultdict, OrderedDict
import h5py
import numpy
import tables
from six.moves import zip, range
from fuel.datasets import Dataset
from fuel.utils import do_not_pickle_attributes
@do_not_pickle_attributes('nodes', 'h5file')
class PytablesDataset(Dataset):
"""A pytables dataset.
An HDF5 Dataset which was created with pytables. The dataset should
have the following structure: `/<data_node>/paths/to/sources`. In
order to have train/validation/test split you may want to open
several datasets with different data nodes or source paths. It is
also possible to use start and stop arguments to split your dataset.
Parameters
----------
sources : tuple of strings
Sources which the dataset returns.
start : int
Start index. Optional, by default is 0.
stop : int
Stop index. Optional, if is not provided, will be set to the
number of rows of the first source.
data_node : str
Parent data node in HDF5 file, all path are relative to this node.
sources_in_file : tuple of strings
Names of nodes in HDF5 file which contain sources. Should the same
length as `sources`.
Optional, if not set will be equal to `sources`.
"""
def __init__(self, path, sources, start=0, stop=None, data_node='Data',
sources_in_file=None):
if sources_in_file is None:
sources_in_file = sources
self.sources_in_file = sources_in_file
self.provides_sources = sources
self.path = path
self.data_node = data_node
self.start = start
self.stop = stop
self.nodes = None
self.open_file(path)
super(PytablesDataset, self).__init__(self.provides_sources)
def open_file(self, path):
self.h5file = tables.open_file(path, mode="r")
node = self.h5file.get_node('/', self.data_node)
self.nodes = [getattr(node, source) for source in self.sources_in_file]
if self.stop is None:
self.stop = self.nodes[0].nrows
self.num_examples = self.stop - self.start
def load(self):
self.open_file(self.path)
def close_file(self):
self.h5file.close()
del self._h5file
del self._nodes
def get_data(self, state=None, request=None):
""" Returns data from HDF5 dataset.
.. note:: The best performance if `request` is a slice.
"""
if isinstance(request, slice):
request = slice(request.start + self.start,
request.stop + self.start, request.step)
data = [node[request] for node in self.nodes]
elif isinstance(request, list):
request = [index + self.start for index in request]
data = [node[request, ...] for node in self.nodes]
else:
raise ValueError
return data
@do_not_pickle_attributes('data_sources', 'external_file_handle',
'source_shapes')
class H5PYDataset(Dataset):
"""An h5py-fueled HDF5 dataset.
This dataset class assumes a particular file layout:
* Data sources reside in the root group, and their names define the
source names.
* Data sources are not explicitly split. Instead, splits are defined
in the `split` attribute of the root group. It's expected to be a
1D numpy array of compound ``dtype`` with six fields, organized as
follows:
1. ``split`` : string identifier for the split name
2. ``source`` : string identifier for the source name
3. ``start`` : start index (inclusive) of the split in the source
array
4. ``stop`` : stop index (exclusive) of the split in the source
array
5. ``available`` : boolean, ``False`` is this split is not available
for this source
6. ``comment`` : comment string
Parameters
----------
file_or_path : :class:`h5py.File` or str
HDF5 file handle, or path to the HDF5 file.
which_set : str
Which split to use.
subset : slice, optional
A slice of data *within the context of the split* to use. Defaults
to `None`, in which case the whole split is used. **Note:
at the moment, `slice.step` must be either 1 or `None`.**
load_in_memory : bool, optional
Whether to load the data in main memory. Defaults to `False`.
driver : str, optional
Low-level driver to use. Defaults to `None`. See h5py
documentation for a complete list of available options.
sort_indices : bool, optional
Whether to explicitly sort requested indices when data is
requested in the form of a list of indices. Defaults to `True`.
This flag can be set to `False` for greater performance. In
that case, it is the user's responsibility to make sure that
indices are ordered.
"""
interface_version = '0.2'
_ref_counts = defaultdict(int)
_file_handles = {}
def __init__(self, file_or_path, which_set, subset=None,
load_in_memory=False, driver=None, sort_indices=True,
**kwargs):
if isinstance(file_or_path, h5py.File):
self.path = file_or_path.filename
self.external_file_handle = file_or_path
else:
self.path = file_or_path
self.external_file_handle = None
self.driver = driver
self.sort_indices = sort_indices
if which_set not in self.available_splits:
raise ValueError(
"'{}' split is not provided by this ".format(which_set) +
"dataset. Available splits are " +
"{}.".format(self.available_splits))
self.which_set = which_set
subset = subset if subset else slice(None)
if subset.step not in (1, None):
raise ValueError("subset.step must be either 1 or None")
self._subset_template = subset
self.load_in_memory = load_in_memory
kwargs.setdefault('axis_labels', self.load_axis_labels())
super(H5PYDataset, self).__init__(**kwargs)
@staticmethod
def create_split_array(split_dict):
"""Create a valid array for the `split` attribute of the root node.
Parameters
----------
split_dict : dict
Maps split names to dict. Those dict map source names to
tuples. Those tuples contain two or three elements:
the start index, the stop index and (optionally) a comment.
If a particular split/source combination isn't present
in the split dict, it's considered as unavailable and the
`available` element will be set to `False` it its split array
entry.
"""
# Determine maximum split, source and string lengths
split_len = max(len(split) for split in split_dict)
sources = set()
comment_len = 1
for split in split_dict.values():
sources |= set(split.keys())
for val in split.values():
if len(val) == 3:
comment_len = max([comment_len, len(val[-1])])
sources = sorted(list(sources))
source_len = max(len(source) for source in sources)
# Instantiate empty split array
split_array = numpy.empty(
len(split_dict) * len(sources),
dtype=numpy.dtype([
('split', 'a', split_len),
('source', 'a', source_len),
('start', numpy.int64, 1), ('stop', numpy.int64, 1),
('available', numpy.bool, 1),
('comment', 'a', comment_len)]))
# Fill split array
for i, (split, source) in enumerate(product(split_dict, sources)):
if source in split_dict[split]:
start, stop = split_dict[split][source][:2]
available = True
# Workaround for bug when pickling an empty string
comment = '.'
if len(split_dict[split][source]) == 3:
comment = split_dict[split][source][2]
if not comment:
comment = '.'
else:
(start, stop, available, comment) = (0, 0, False, '.')
# Workaround for H5PY being unable to store unicode type
split_array[i]['split'] = split.encode('utf8')
split_array[i]['source'] = source.encode('utf8')
split_array[i]['start'] = start
split_array[i]['stop'] = stop
split_array[i]['available'] = available
split_array[i]['comment'] = comment.encode('utf8')
return split_array
@staticmethod
def parse_split_array(split_array):
split_dict = OrderedDict()
for row in split_array:
split, source, start, stop, available, comment = row
split = split.decode('utf8')
source = source.decode('utf8')
comment = comment.decode('utf8')
if available:
if split not in split_dict:
split_dict[split] = OrderedDict()
split_dict[split][source] = (start, stop, comment)
return split_dict
@staticmethod
def unsorted_fancy_index(request, indexable):
"""Safe unsorted list indexing.
Some objects, such as h5py datasets, only support list indexing
if the list is sorted.
This static method adds support for unsorted list indexing by
sorting the requested indices, accessing the corresponding
elements and re-shuffling the result.
Parameters
----------
request : list of int
Unsorted list of example indices.
indexable : any fancy-indexable object
Indexable we'd like to do unsorted fancy indexing on.
"""
if len(request) > 1:
indices = numpy.argsort(request)
data = numpy.empty(shape=(len(request),) + indexable.shape[1:],
dtype=indexable.dtype)
data[indices] = indexable[numpy.array(request)[indices], ...]
else:
data = indexable[request]
return data
@property
def split_dict(self):
if not hasattr(self, '_split_dict'):
self._out_of_memory_open()
handle = self._file_handle
split_array = handle.attrs['split']
self._split_dict = H5PYDataset.parse_split_array(split_array)
self._out_of_memory_close()
return self._split_dict
def load_axis_labels(self):
self._out_of_memory_open()
handle = self._file_handle
axis_labels = {}
for source_name in handle:
if source_name in self.vlen_sources:
axis_labels[source_name] = (
(handle[source_name].dims[0].label,) +
tuple(label.decode('utf8') for label in
handle[source_name].dims[0]['shape_labels']))
else:
axis_labels[source_name] = tuple(
dim.label for dim in handle[source_name].dims)
self._out_of_memory_close()
return axis_labels
@property
def available_splits(self):
return tuple(self.split_dict.keys())
@property
def provides_sources(self):
return tuple(self.split_dict[self.which_set].keys())
@property
def vlen_sources(self):
if not hasattr(self, '_vlen_sources'):
self._out_of_memory_open()
handle = self._file_handle
vlen_sources = []
for source_name in self.sources:
source = handle[source_name]
if len(source.dims) > 0 and 'shapes' in source.dims[0]:
if len(source.dims) > 1:
raise ValueError('Variable-length sources must have '
'only one dimension.')
vlen_sources.append(source_name)
self._vlen_sources = tuple(vlen_sources)
self._out_of_memory_close()
return self._vlen_sources
@property
def subsets(self):
if not hasattr(self, '_subsets'):
subsets = [self._subset_template for source in self.sources]
num_examples = None
for i, source_name in enumerate(self.sources):
start, stop = self.split_dict[self.which_set][source_name][:2]
subset = subsets[i]
subset = slice(
start if subset.start is None else subset.start,
stop if subset.stop is None else subset.stop,
subset.step)
subsets[i] = subset
if num_examples is None:
num_examples = subset.stop - subset.start
if num_examples != subset.stop - subset.start:
raise ValueError("sources have different lengths")
self._subsets = subsets
return self._subsets
def load(self):
if not hasattr(self, '_external_file_handle'):
self.external_file_handle = None
self._out_of_memory_open()
handle = self._file_handle
if self.load_in_memory:
self.data_sources = tuple(
handle[source_name][subset] for source_name, subset in
zip(self.sources, self.subsets))
self.source_shapes = tuple(
handle[source_name].dims[0]['shapes'][subset]
if source_name in self.vlen_sources else None
for source_name, subset in zip(self.sources, self.subsets))
else:
self.data_sources = None
self.source_shapes = None
self._out_of_memory_close()
@property
def num_examples(self):
return self.subsets[0].stop - self.subsets[0].start
def open(self):
return None if self.load_in_memory else self._out_of_memory_open()
def _out_of_memory_open(self):
if not self._external_file_handle:
if self.path not in self._file_handles:
handle = h5py.File(
name=self.path, mode="r", driver=self.driver)
self._file_handles[self.path] = handle
self._ref_counts[self.path] += 1
def close(self, state):
if not self.load_in_memory:
self._out_of_memory_close()
def _out_of_memory_close(self):
if not self._external_file_handle:
self._ref_counts[self.path] -= 1
if not self._ref_counts[self.path]:
del self._ref_counts[self.path]
self._file_handles[self.path].close()
del self._file_handles[self.path]
@property
def _file_handle(self):
if self._external_file_handle:
return self._external_file_handle
elif self.path in self._file_handles:
return self._file_handles[self.path]
else:
raise IOError('no open handle for file {}'.format(self.path))
def get_data(self, state=None, request=None):
if self.load_in_memory:
data, shapes = self._in_memory_get_data(state, request)
else:
data, shapes = self._out_of_memory_get_data(state, request)
for i in range(len(data)):
if shapes[i] is not None:
for j in range(len(data[i])):
data[i][j] = data[i][j].reshape(shapes[i][j])
return tuple(data)
def _in_memory_get_data(self, state=None, request=None):
if state is not None or request is None:
raise ValueError
data = [data_source[request] for data_source in self.data_sources]
shapes = [shape[request] if shape is not None else None
for shape in self.source_shapes]
return data, shapes
def _out_of_memory_get_data(self, state=None, request=None):
data = []
shapes = []
handle = self._file_handle
for source_name, subset in zip(self.sources, self.subsets):
if isinstance(request, slice):
req = slice(request.start + subset.start,
request.stop + subset.start, request.step)
val = handle[source_name][req]
if source_name in self.vlen_sources:
shape = handle[
source_name].dims[0]['shapes'][req]
else:
shape = None
elif isinstance(request, list):
req = [index + subset.start for index in request]
if self.sort_indices:
val = self.unsorted_fancy_index(req, handle[source_name])
if source_name in self.vlen_sources:
shape = self.unsorted_fancy_index(
req, handle[source_name].dims[0]['shapes'])
else:
shape = None
else:
val = handle[source_name][req]
if source_name in self.vlen_sources:
shape = handle[
source_name].dims[0]['shapes'][req]
else:
shape = None
else:
raise ValueError
data.append(val)
shapes.append(shape)
return data, shapes
| |
"""Manages the creation and deployment of desired services configuration."""
# coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from time import time
import f5_cccl.exceptions as exc
from f5_cccl.service.config_reader import ServiceConfigReader
from f5_cccl.service.validation import ServiceConfigValidator
from f5_cccl.resource.ltm.node import ApiNode
from f5_cccl.utils.route_domain import (
encoded_normalize_address_with_route_domain)
from f5_cccl.utils.route_domain import split_ip_with_route_domain
LOGGER = logging.getLogger(__name__)
# Check for upgrade issues on first pass only
class ServiceConfigDeployer(object):
"""CCCL config deployer class."""
first_pass = True
def __init__(self, bigip_proxy):
"""Initialize the config deployer."""
self._bigip = bigip_proxy
# pylint: disable=too-many-locals
def _get_resource_tasks(self, existing, desired):
"""Get the list of resources to create, delete, update.
Here, the term 'manage' means absolute control by CCCL.
The term 'unmanaged' means the end-user has control over
the resource except for fields we must change. We don't
undo end-user changes (unless it conflicts with CCCL
requested changes) and we don't delete the resource.
"""
unmanaged = {
name: resource for name, resource in list(existing.items())
if resource.whitelist is True
}
managed = {
name: resource for name, resource in list(existing.items())
if resource.whitelist is False
}
desired_set = set(desired)
existing_set = set(existing)
unmanaged_set = set(unmanaged)
managed_set = set(managed)
# Create any managed resource that doesn't currently exist
create_list = [
desired[resource] for resource in
desired_set - existing_set
]
# Update managed resources that diff between desired and actual
update_list = [
desired[resource] for resource in desired_set & managed_set
if desired[resource] != managed[resource]
]
# Merge unmanaged resources with desired if needed
for resource in unmanaged_set:
update_resource = self._merge_resource(
resource, desired, unmanaged)
if update_resource:
update_list.append(update_resource)
# Delete any managed resource that isn't still desired
delete_list = [
managed[resource] for resource in
managed_set - desired_set
]
# These resources, and the resource they reference,
# should not be deleted
unmanaged_list = [
unmanaged[resource] for resource in unmanaged_set
]
return (create_list, update_list, delete_list, unmanaged_list)
def _merge_resource(self, resource, desired, unmanaged):
"""Merge desired settings with existing settings.
If there are any differences, return the merged object.
"""
unmanaged_resource = unmanaged[resource] # this always exists
desired_resource = desired.get(resource)
if desired_resource is None:
desired_data = {}
else:
desired_data = desired_resource.data
# determine if any changes occurred after merging
if unmanaged_resource.merge(desired_data):
return unmanaged_resource
return None
def _create_resources(self, create_list):
"""Iterate over the resources and call create method."""
LOGGER.debug("Creating %d resources...", len(create_list))
retry_list = list()
for resource in create_list:
try:
start_time = time()
resource.create(self._bigip.mgmt_root())
LOGGER.debug("Created %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceConflictError:
LOGGER.warning(
"Resource /%s/%s already exists, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceCreateError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
LOGGER.error(
"Resource /%s/%s creation error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _update_resources(self, update_list):
"""Iterate over the resources and call update method."""
LOGGER.debug("Updating %d resources...", len(update_list))
retry_list = list()
for resource in update_list:
try:
start_time = time()
resource.update(self._bigip.mgmt_root())
LOGGER.debug("Updated %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceNotFoundError as e:
LOGGER.warning(
"Resource /%s/%s does not exist, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceUpdateError,
exc.F5CcclResourceRequestError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
LOGGER.error(
"Resource /%s/%s update error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _delete_resources(self, delete_list, retry=True):
"""Iterate over the resources and call delete method."""
LOGGER.debug("Deleting %d resources...", len(delete_list))
retry_list = list()
for resource in delete_list:
try:
start_time = time()
resource.delete(self._bigip.mgmt_root())
LOGGER.debug("Deleted %s in %.5f seconds.",
resource.name, (time() - start_time))
except exc.F5CcclResourceNotFoundError:
LOGGER.warning(
"Resource /%s/%s does not exist, skipping task...",
resource.partition, resource.name)
except (exc.F5CcclResourceDeleteError,
exc.F5CcclResourceRequestError,
exc.F5CcclError) as e:
LOGGER.error(str(e))
if retry:
LOGGER.error(
"Resource /%s/%s delete error, requeuing task...",
resource.partition, resource.name)
retry_list.append(resource)
return retry_list
def _get_monitor_tasks(self, desired_config):
"""Get CRUD tasks for all monitors."""
create_monitors = list()
delete_monitors = list()
update_monitors = list()
for hm_type in ['http', 'https', 'tcp', 'icmp', 'udp']:
existing = self._bigip.get_monitors(hm_type)
config_key = "{}_monitors".format(hm_type)
desired = desired_config.get(config_key, dict())
(create_hm, update_hm, delete_hm) = (
self._get_resource_tasks(existing, desired)[0:3])
create_monitors += create_hm
update_monitors += update_hm
delete_monitors += delete_hm
return (create_monitors, update_monitors, delete_monitors)
def _get_user_tunnel_tasks(self, desired):
"""Get the update tasks for user-created fdb tunnels."""
all_tunnels = self._bigip.get_fdb_tunnels(all_tunnels=True)
# Get only the tunnels we desire
update_list = set(desired) & set(all_tunnels)
update_list = [
desired[resource] for resource in update_list
if desired[resource] != all_tunnels[resource]
]
return update_list
# pylint: disable=too-many-locals
def _desired_nodes(self, default_route_domain):
"""Desired nodes is inferred from the active pool members."""
desired_nodes = dict()
nodes = self._bigip.get_nodes()
pools = self._bigip.get_pools(True)
for pool in pools:
for member in pools[pool].members:
pool_addr = member.name.split('%3A')[0]
pool_addr_rd = encoded_normalize_address_with_route_domain(
pool_addr, default_route_domain, True, False)
# make a copy to iterate over, then delete from 'nodes'
node_list = list(nodes.keys())
for key in node_list:
node_addr = nodes[key].data['address']
node_addr_rd = encoded_normalize_address_with_route_domain(
node_addr, default_route_domain, False, False)
if node_addr_rd == pool_addr_rd:
node = {'name': key,
'partition': nodes[key].partition,
'address': node_addr_rd,
'default_route_domain': default_route_domain,
'state': 'user-up',
'session': 'user-enabled'}
desired_node = ApiNode(**node)
desired_nodes[desired_node.name] = desired_node
return desired_nodes
# pylint: disable=too-many-locals
def _pre_deploy_legacy_ltm_cleanup(self):
"""Remove legacy named resources (pre Route Domain support)
We now create node resources with names that include the route
domain whether the end user specified them or not. This prevents
inconsistent behavior when the default route domain is changed for
the managed partition.
This function can be removed when the cccl version >= 2.0
"""
# Detect legacy names (nodes do not include the route domain)
self._bigip.refresh_ltm()
existing_nodes = self._bigip.get_nodes()
node_list = list(existing_nodes.keys())
for node_name in node_list:
route_domain = split_ip_with_route_domain(node_name)[1]
if route_domain is None:
break
else:
return
existing_iapps = self._bigip.get_app_svcs()
existing_virtuals = self._bigip.get_virtuals()
existing_policies = self._bigip.get_l7policies()
existing_irules = self._bigip.get_irules()
existing_internal_data_groups = self._bigip.get_internal_data_groups()
existing_pools = self._bigip.get_pools()
delete_iapps = self._get_resource_tasks(existing_iapps, {})[2]
delete_virtuals = self._get_resource_tasks(existing_virtuals, {})[2]
delete_policies = self._get_resource_tasks(existing_policies, {})[2]
delete_irules = self._get_resource_tasks(existing_irules, {})[2]
delete_internal_data_groups = self._get_resource_tasks(
existing_internal_data_groups, {})[2]
delete_pools = self._get_resource_tasks(existing_pools, {})[2]
delete_monitors = self._get_monitor_tasks({})[2]
delete_nodes = self._get_resource_tasks(existing_nodes, {})[2]
delete_tasks = delete_iapps + delete_virtuals + delete_policies + \
delete_irules + delete_internal_data_groups + delete_pools + \
delete_monitors + delete_nodes
taskq_len = len(delete_tasks)
finished = False
LOGGER.debug("Removing legacy resources...")
while not finished:
LOGGER.debug("Legacy cleanup service task queue length: %d",
taskq_len)
# Must remove all resources that depend on nodes (vs, pools, ???)
delete_tasks = self._delete_resources(delete_tasks)
tasks_remaining = len(delete_tasks)
# Did the task queue shrink?
if tasks_remaining >= taskq_len or tasks_remaining == 0:
# No, we have stopped making progress.
finished = True
# Reset the taskq length.
taskq_len = tasks_remaining
def _post_deploy(self, desired_config, default_route_domain):
"""Perform post-deployment service tasks/cleanup.
Remove superfluous resources that could not be inferred from the
desired config.
"""
LOGGER.debug("Perform post-deploy service tasks...")
self._bigip.refresh_ltm()
# Delete/update nodes (no creation)
LOGGER.debug("Post-process nodes.")
existing = self._bigip.get_nodes()
desired = self._desired_nodes(default_route_domain)
(update_nodes, delete_nodes) = \
self._get_resource_tasks(existing, desired)[1:3]
self._update_resources(update_nodes)
self._delete_resources(delete_nodes)
# Delete extraneous virtual addresses
LOGGER.debug("Remove superfluous virtual addresses.")
desired = desired_config.get('virtual_addresses', dict())
(referenced, unreferenced) = (
self._bigip.get_virtual_address_references()
)
delete_vaddrs = self._get_resource_tasks(unreferenced, desired)[2]
self._delete_resources(delete_vaddrs)
# Get the set of virtual addresses that are created by virtuals
# but not in the set of desired virtual addresses.
update_vaddrs = list()
auto_created = self._get_resource_tasks(referenced, desired)[2]
for vaddr in auto_created:
if vaddr.data['enabled'] == "no":
vaddr.data['enabled'] = "yes"
update_vaddrs.append(vaddr)
self._update_resources(update_vaddrs)
def deploy_ltm( # pylint: disable=too-many-locals,too-many-statements
self, desired_config, default_route_domain):
"""Deploy the managed partition with the desired LTM config.
:param desired_config: A dictionary with the configuration
to be applied to the bigip managed partition.
:returns: The number of tasks that could not be completed.
"""
# Remove legacy resources (pre RD-named resources) before deploying
# new configuration
# Fix: Customer issue- Hotfix
# if ServiceConfigDeployer.first_pass:
# ServiceConfigDeployer.first_pass = False
# self._pre_deploy_legacy_ltm_cleanup()
self._bigip.refresh_ltm()
# Get the list of virtual address tasks
LOGGER.debug("Getting virtual address tasks...")
existing = self._bigip.get_virtual_addresses()
desired = desired_config.get('virtual_addresses', dict())
(create_vaddrs, update_vaddrs) = (
self._get_resource_tasks(existing, desired))[0:2]
# Get the list of virtual server tasks
LOGGER.debug("Getting virtual server tasks...")
existing_virtuals = self._bigip.get_virtuals()
desired = desired_config.get('virtuals', dict())
(create_virtuals, update_virtuals, delete_virtuals,
unmanaged_virtuals) = (
self._get_resource_tasks(existing_virtuals, desired)[0:4])
# Get the list of pool tasks
LOGGER.debug("Getting pool tasks...")
existing_pools = self._bigip.get_pools()
desired = desired_config.get('pools', dict())
(create_pools, update_pools, delete_pools, unmanaged_pools) = (
self._get_resource_tasks(existing_pools, desired)[0:4])
# Get the list of irule tasks
LOGGER.debug("Getting iRule tasks...")
existing = self._bigip.get_irules()
desired = desired_config.get('irules', dict())
(create_irules, update_irules, delete_irules) = (
self._get_resource_tasks(existing, desired)[0:3])
# Get the list of internal data group tasks
LOGGER.debug("Getting InternalDataGroup tasks...")
existing = self._bigip.get_internal_data_groups()
desired = desired_config.get('internaldatagroups', dict())
(create_internal_data_groups, update_internal_data_groups,
delete_internal_data_groups) = (
self._get_resource_tasks(existing, desired)[0:3])
# Get the list of policy tasks
LOGGER.debug("Getting policy tasks...")
existing = self._bigip.get_l7policies()
desired = desired_config.get('l7policies', dict())
(create_policies, update_policies, delete_policies) = (
self._get_resource_tasks(existing, desired)[0:3])
# Get the list of iapp tasks
LOGGER.debug("Getting iApp tasks...")
existing_iapps = self._bigip.get_app_svcs()
desired = desired_config.get('iapps', dict())
(create_iapps, update_iapps, delete_iapps) = (
self._get_resource_tasks(existing_iapps, desired)[0:3])
# Get the list of monitor tasks
LOGGER.debug("Getting monitor tasks...")
(create_monitors, update_monitors, delete_monitors) = (
self._get_monitor_tasks(desired_config))
# Trim resources from being deleted if they are referenced from
# a whitelisted resource (an 'unmanaged' resource)
ignore_unmanaged_references(unmanaged_virtuals, unmanaged_pools,
delete_policies, delete_irules,
delete_pools, delete_monitors,
delete_internal_data_groups)
LOGGER.debug("Building task lists...")
create_tasks = create_vaddrs + create_monitors + \
create_pools + create_internal_data_groups + create_irules + \
create_policies + create_virtuals + create_iapps
update_tasks = update_vaddrs + update_monitors + \
update_pools + update_internal_data_groups + update_irules + \
update_policies + update_virtuals + update_iapps
delete_tasks = delete_iapps + delete_virtuals + delete_policies + \
delete_irules + delete_internal_data_groups + delete_pools + \
delete_monitors
taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)
taskq_len = self._run_tasks(
taskq_len, create_tasks, update_tasks, delete_tasks)
self._post_deploy(desired_config, default_route_domain)
return taskq_len
def deploy_net(self, desired_config): # pylint: disable=too-many-locals
"""Deploy the managed partition with the desired NET config.
:param desired_config: A dictionary with the configuration
to be applied to the bigip managed partition.
:returns: The number of tasks that could not be completed.
"""
self._bigip.refresh_net()
# Get the list of arp tasks
LOGGER.debug("Getting arp tasks...")
existing = self._bigip.get_arps()
desired = desired_config.get('arps', dict())
create_arps = update_arps = delete_arps = list()
# To avoid recreating ARPs
if len(desired) > 0:
(create_arps, update_arps, delete_arps) = (
self._get_resource_tasks(existing, desired)[0:3])
# Get the list of tunnel tasks
LOGGER.debug("Getting tunnel tasks...")
existing = self._bigip.get_fdb_tunnels()
desired = desired_config.get('fdbTunnels', dict())
(create_tunnels, update_tunnels, delete_tunnels) = (
self._get_resource_tasks(existing, desired)[0:3])
# If there are pre-existing (user-created) tunnels that we are
# managing, we want to only update these tunnels.
LOGGER.debug("Getting pre-existing tunnel update tasks...")
desired = desired_config.get('userFdbTunnels', dict())
update_existing_tunnels = self._get_user_tunnel_tasks(desired)
LOGGER.debug("Building task lists...")
create_tasks = create_arps + create_tunnels
update_tasks = update_arps + update_tunnels + update_existing_tunnels
delete_tasks = delete_arps + delete_tunnels
taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)
return self._run_tasks(
taskq_len, create_tasks, update_tasks, delete_tasks)
def _run_tasks(self, taskq_len, create_tasks, update_tasks, delete_tasks):
"""Create, update, and delete the necessary resources."""
# 'finished' indicates that the task queue is empty, or there is
# no way to continue to make progress. If there are errors in
# deploying any resource, it is saved in the queue until another
# pass can be made to deploy the configuration. When we have
# gone through the queue on a pass without shrinking the task
# queue, it is determined that progress has stopped and the
# loop is exited with work remaining.
finished = False
while not finished:
LOGGER.debug("Service task queue length: %d", taskq_len)
# Iterate over the list of resources to create
create_tasks = self._create_resources(create_tasks)
# Iterate over the list of resources to update
update_tasks = self._update_resources(update_tasks)
# Iterate over the list of resources to delete
delete_tasks = self._delete_resources(delete_tasks)
tasks_remaining = (
len(create_tasks) + len(update_tasks) + len(delete_tasks))
# Did the task queue shrink?
if tasks_remaining >= taskq_len or tasks_remaining == 0:
# No, we have stopped making progress.
finished = True
# Reset the taskq length.
taskq_len = tasks_remaining
return taskq_len
# pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches
def ignore_unmanaged_references(unmanaged_virtuals, unmanaged_pools,
delete_policies, delete_irules,
delete_pools, delete_monitors,
delete_data_groups):
"""Ignore any referenced resource for whitelisted virtuals and pools
This is necessary for situations where a CCCL created resource is
eventually whitelisted. If and when the resource is no
longer controlled by CCCL, it will not be able to determine which
properties were merged (and need to be backed out).
ISSUE: This function cannot detect data groups used by irules
"""
ignore_policies = set()
ignore_irules = set()
ignore_pools = set()
ignore_monitors = set()
ignore_data_groups = set()
# Start off getting a list of referenced resources
for virtual in unmanaged_virtuals:
for rule in virtual.data['rules']:
ignore_irules.add(rule)
for policy in virtual.data['policies']:
ignore_policies.add("/{}/{}".format(policy['partition'],
policy['name']))
for policy in delete_policies:
if policy.full_path() in ignore_policies:
for rule in policy.data['rules']:
if 'actions' in rule:
for action in rule['actions']:
if 'pool' in action:
# pool name contains partition already
ignore_pools.add(action['pool'])
# For irules we examine the irule content for a reference to
# any of the data groups we want to delete (not ideal). If we
# find a mention of the data group, we don't delete it.
for irule in delete_irules:
if irule.full_path() in ignore_irules:
for data_group in delete_data_groups:
if data_group.name in irule.data['apiAnonymous']:
ignore_data_groups.add(data_group.full_path())
# For the pools we are not going to delete (either unmanaged, or
# referenced by an unmanaged virtual server), we must not delete
# the referenced health monitor.
for pool in unmanaged_pools:
ignore_monitors.update(pool.monitors())
for pool in delete_pools:
if pool.full_path() in ignore_pools:
ignore_monitors.update(pool.monitors())
# Remove from the delete list any resource still used by the
# whitelisted virtuals
def _prune_resources(resource_name, resource_list, ignore_resources):
found = True
while found:
found = False
for idx, resource in enumerate(resource_list):
if resource.full_path() in ignore_resources:
LOGGER.debug("Pruning %s resource %s from delete list",
resource_name, resource.full_path())
del resource_list[idx]
found = True
break
_prune_resources("policy", delete_policies, ignore_policies)
_prune_resources("irule", delete_irules, ignore_irules)
_prune_resources("pool", delete_pools, ignore_pools)
_prune_resources("monitor", delete_monitors, ignore_monitors)
_prune_resources("data_group", delete_data_groups, ignore_data_groups)
class ServiceManager(object):
"""CCCL apply config implementation class."""
def __init__(self, bigip_proxy, partition, schema):
"""Initialize the ServiceManager.
Args:
bigip_proxy: BigIPProxy object, f5_cccl.bigip.BigIPProxy.
partition: The managed partition.
schema: Schema that defines the structure of a service
configuration.
Raises:
F5CcclError: Error initializing the validator or reading the
API schema.
"""
self._partition = partition
self._bigip = bigip_proxy
self._config_validator = ServiceConfigValidator(schema)
self._service_deployer = ServiceConfigDeployer(bigip_proxy)
self._config_reader = ServiceConfigReader(self._partition)
def get_partition(self):
"""Get the name of the managed partition."""
return self._partition
def apply_ltm_config(self, service_config, user_agent):
"""Apply the desired LTM service configuration.
Args:
service_config: The desired configuration state of the managed
partition.
Returns:
The number of resources that were not successfully deployed.
Raises:
F5CcclValidationError: Indicates that the service_configuration
does not conform to the API schema.
"""
LOGGER.debug("apply_ltm_config start")
start_time = time()
# Validate the service configuration.
self._config_validator.validate(service_config)
# Determine the default route domain for the partition
default_route_domain = self._bigip.get_default_route_domain()
# Read in the configuration
desired_config = self._config_reader.read_ltm_config(
service_config, default_route_domain, user_agent)
# Deploy the service desired configuration.
retval = self._service_deployer.deploy_ltm(
desired_config, default_route_domain)
LOGGER.debug(
"apply_ltm_config took %.5f seconds.", (time() - start_time))
return retval
def apply_net_config(self, service_config):
"""Apply the desired NET service configuration.
Args:
service_config: The desired configuration state of the managed
partition.
Returns:
The number of resources that were not successfully deployed.
Raises:
F5CcclValidationError: Indicates that the service_configuration
does not conform to the API schema.
"""
LOGGER.debug("apply_net_config start")
start_time = time()
# Validate the service configuration.
self._config_validator.validate(service_config)
# Determine the default route domain for the partition
default_route_domain = self._bigip.get_default_route_domain()
# Read in the configuration
desired_config = self._config_reader.read_net_config(
service_config, default_route_domain)
# Deploy the service desired configuration.
retval = self._service_deployer.deploy_net(desired_config)
LOGGER.debug(
"apply_net_config took %.5f seconds.", (time() - start_time))
return retval
| |
from __future__ import unicode_literals
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
try :
from django.utils import importlib
except ImportError:
import importlib
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe
from django.utils import six
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None, use_in='all'):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
self.use_in = 'all'
if use_in in ['all', 'detail', 'list'] or callable(use_in):
self.use_in = use_in
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle, for_list=True):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not self.instance_name in bundle.data:
if getattr(self, 'is_related', False) and not getattr(self, 'is_m2m', False):
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
if self.blank:
return None
elif self.attribute and getattr(bundle.obj, self.attribute, None):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and hasattr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
return bundle.data[self.instance_name]
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
def hydrate(self, bundle):
value = super(DecimalField, self).hydrate(bundle)
if value and not isinstance(value, Decimal):
value = Decimal(value)
return value
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
if isinstance(value, six.string_types):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except (ValueError, TypeError):
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
else:
raise ApiFieldError("Datetime provided to '%s' field must be a string: %s" % (self.instance_name, value))
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
Optionally accepts ``use_in``. This may be one of ``list``, ``detail``
``all`` or a callable which accepts a ``bundle`` and returns
``True`` or ``False``. Indicates wheather this field will be included
during dehydration of a list of objects or a single object. If ``use_in``
is a callable, and returns ``True``, the field will be included during
dehydration.
Defaults to ``all``.
Optionally accepts a ``full_list``, which indicated whether or not
data should be fully dehydrated when the request is for a list of
resources. Accepts ``True``, ``False`` or a callable that accepts
a bundle and returns ``True`` or ``False``. Depends on ``full``
being ``True``. Defaults to ``True``.
Optionally accepts a ``full_detail``, which indicated whether or not
data should be fully dehydrated when then request is for a single
resource. Accepts ``True``, ``False`` or a callable that accepts a
bundle and returns ``True`` or ``False``.Depends on ``full``
being ``True``. Defaults to ``True``.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
self.use_in = 'all'
self.full_list = full_list
self.full_detail = full_detail
if use_in in ['all', 'detail', 'list'] or callable(use_in):
self.use_in = use_in
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self, related_instance):
"""
Instaniates the related resource.
"""
related_resource = self.to_class()
# Fix the ``api_name`` if it's not present.
if related_resource._meta.api_name is None:
if self._resource and not self._resource._meta.api_name is None:
related_resource._meta.api_name = self._resource._meta.api_name
# Try to be efficient about DB queries.
related_resource.instance = related_instance
return related_resource
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, six.string_types):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, for_list=True):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
should_dehydrate_full_resource = self.should_full_dehydrate(bundle, for_list=for_list)
if not should_dehydrate_full_resource:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(
obj=related_resource.instance,
request=bundle.request,
objects_saved=bundle.objects_saved
)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
err_msg = "Could not find the provided %s object via resource URI '%s'." % (fk_resource._meta.resource_name, uri,)
if not uri:
raise ApiFieldError(err_msg)
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(
obj=obj,
request=request
)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError(err_msg)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(
data=data,
request=request
)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
unique_keys = dict((k, v) for k, v in data.items() if k == 'pk' or (hasattr(fk_resource, k) and getattr(fk_resource, k).unique))
# If we have no unique keys, we shouldn't go look for some resource that
# happens to match other kwargs. In the case of a create, it might be the
# completely wrong resource.
# We also need to check to see if updates are allowed on the FK resource.
if unique_keys and fk_resource.can_update():
try:
return fk_resource.obj_update(fk_bundle, skip_errors=True, **data)
except (NotFound, TypeError):
try:
# Attempt lookup by primary key
return fk_resource.obj_update(fk_bundle, skip_errors=True, **unique_keys)
except NotFound:
pass
except MultipleObjectsReturned:
pass
# If we shouldn't update a resource, or we couldn't find a matching
# resource we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle)
return fk_bundle
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(
obj=obj,
request=request
)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class()
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, Bundle):
# Already hydrated, probably nested bundles. Just return.
return value
elif isinstance(value, six.string_types):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif hasattr(value, 'items'):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
def should_full_dehydrate(self, bundle, for_list):
"""
Based on the ``full``, ``list_full`` and ``detail_full`` returns ``True`` or ``False``
indicating weather the resource should be fully dehydrated.
"""
should_dehydrate_full_resource = False
if self.full:
is_details_view = not for_list
if is_details_view:
if (not callable(self.full_detail) and self.full_detail) or (callable(self.full_detail) and self.full_detail(bundle)):
should_dehydrate_full_resource = True
else:
if (not callable(self.full_list) and self.full_list) or (callable(self.full_list) and self.full_list(bundle)):
should_dehydrate_full_resource = True
return should_dehydrate_full_resource
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
full_list=full_list, full_detail=full_detail
)
self.fk_resource = None
def dehydrate(self, bundle, for_list=True):
foreign_obj = None
error_to_raise = None
if isinstance(self.attribute, six.string_types):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
elif callable(self.attribute):
previous_obj = bundle.obj
foreign_obj = self.attribute(bundle)
if not foreign_obj:
if not self.null:
if callable(self.attribute):
raise ApiFieldError("The related resource for resource %s could not be found." % (previous_obj))
else:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource, for_list=for_list)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None, use_in='all', full_list=True, full_detail=True):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text, use_in=use_in,
full_list=full_list, full_detail=full_detail
)
self.m2m_bundles = []
def dehydrate(self, bundle, for_list=True):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, six.string_types):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource(m2m)
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, for_list=for_list))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj, for_list=True):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, six.string_types):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except (ValueError, TypeError) as e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
| |
import argparse
from os.path import join
import re
from subprocess import CalledProcessError, check_output, STDOUT
import sys
from packaging.version import Version as V
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
sys.platform == "win32" and colorama.init()
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text): return text
def blue(text): return text
def red(text): return text
def green(text): return text
def yellow(text): return text
class config(object):
ANY_VERSION = re.compile(r"^(\d+\.\d+\.\d+)((?:dev|rc)\d+)?$")
FULL_VERSION = re.compile(r"^(\d+\.\d+\.\d+)?$")
def __init__(self):
self._new_version = None
self._last_any_version = None
self._last_full_version = None
self._problems = []
@property
def new_version(self): return self._new_version
@new_version.setter
def new_version(self, v):
m = self.ANY_VERSION.match(v)
if not m: raise ValueError("Invalid Bokeh version %r" % v)
self._new_version = v
@property
def last_any_version(self): return self._last_any_version
@last_any_version.setter
def last_any_version(self, v):
m = self.ANY_VERSION.match(v)
if not m: raise ValueError("Invalid Bokeh version %r" % v)
self._last_any_version = v
@property
def last_full_version(self): return self._last_full_version
@last_full_version.setter
def last_full_version(self, v):
m = self.FULL_VERSION.match(v)
if not m: raise ValueError("Invalid Bokeh version %r" % v)
self._last_full_version = v
@property
def version_type(self):
if "rc" in self._new_version: return "RELEASE CANDIDATE"
elif "dev" in self._new_version: return "DEV BUILD"
else: return "FULL RELEASE"
@property
def release_branch(self):
return "release_%s" % self.new_version
@property
def problems(self):
return self._problems
@property
def top_dir(self):
return run("git rev-parse --show-toplevel")
CONFIG = config()
#--------------------------------------
#
# Utility functions
#
#--------------------------------------
def run(cmd):
if isinstance(cmd, str):
cmd = cmd.split()
return check_output(cmd, stderr=STDOUT).decode('utf-8').strip()
#--------------------------------------
#
# UI functions
#
#--------------------------------------
def banner(color, msg):
print()
print(color('='*80))
print(color("{:^80}".format(msg)))
print(color('='*80 + "\n"))
def passed(msg):
print(dim(green("[PASS] ")) + msg)
def failed(msg, details=None):
print((red("[FAIL] ")) + msg)
if details:
print()
for line in details:
print(" " + dim(red(line)))
print()
CONFIG.problems.append(msg)
def abort(checkout_master=True):
print()
print(bright(red("!!! The deploy has been aborted.")))
print()
print(bright(red("!!! NO REMOTE ACTIONS have been taken --- local checkout may be dirty")))
print()
run("git checkout master")
sys.exit(1)
def confirm(msg):
resp = "foo"
while resp not in "yn" or resp=='':
resp = input(bright(yellow(msg)) + bright(" (y/n): "))
if resp == "n":
run("git checkout master")
abort()
#--------------------------------------
#
# Check functions
#
#--------------------------------------
def check_py3():
if sys.version_info.major == 3:
passed("Running Python 3.x")
else:
failed("This script requires Python 3.x")
def check_git():
try:
run("which git")
passed("Command 'git' is available")
except CalledProcessError:
failed("Command 'git' is missing")
abort(checkout_master=False)
def check_maintainers():
try:
email = run("git config --get user.email")
except CalledProcessError:
failed("Could not determine Git config user.email")
abort()
filename = join(CONFIG.top_dir, "MAINTAINERS")
if any(email == line.strip() for line in open(filename)):
passed("Git config user.email %r found in MAINTAINERS file" % email)
else:
failed("User config user.email %r NOT found in MAINTAINERS file" % email)
print()
print(bright(yellow(" This probably means you should not try to run this script")))
abort()
def check_repo():
try:
run("git status")
except CalledProcessError:
failed("Executing outside of a git repository")
abort()
try:
remote = run("git config --get remote.origin.url")
if "bokeh/bokeh" in remote:
passed("Executing inside the the bokeh/bokeh repository")
else:
failed("Executing OUTSIDE the bokeh/bokeh repository")
abort()
except CalledProcessError:
failed("Could not determine Git config remote.origin.url")
abort()
def check_checkout():
try:
branch = run("git rev-parse --abbrev-ref HEAD")
if branch == "master":
passed("Working on master branch")
else:
failed("NOT working on master branch %r" % branch)
abort()
extras = run("git status --porcelain").split("\n")
extras = [x for x in extras if x != '']
if extras:
failed("Local checkout is NOT clean", extras)
else:
passed("Local checkout is clean")
try:
run("git remote update")
local = run("git rev-parse @")
remote = run("git rev-parse @{u}")
base = run("git merge-base @ @{u}")
if local == remote:
passed("Checkout is up to date with GitHub")
else:
if local == base: status = "NEED TO PULL"
elif remote == base: status = "NEED TO PUSH"
else: status = "DIVERGED"
failed("Checkout is NOT up to date with GitHub (%s)" % status)
except CalledProcessError:
failed("Could not check whether local and GitHub are up to date")
abort()
except CalledProcessError:
failed("Could not check the checkout state")
abort()
def check_tags():
try:
out = run("git for-each-ref --sort=-taggerdate --format '%(tag)' refs/tags")
tags = [x.strip("'\"") for x in out.split("\n")]
if CONFIG.new_version in tags:
failed("There is already an existing tag for new version %r" % CONFIG.new_version)
abort()
else:
passed("New version %r does not already have a tag" % CONFIG.new_version)
try:
CONFIG.last_any_version = tags[0]
passed("Detected valid last dev/rc/full version %r" % CONFIG.last_any_version)
except ValueError:
failed("Last dev/rc/full version %r is not a valid Bokeh version!" % CONFIG.last_any_version)
abort()
try:
CONFIG.last_full_version = [tag for tag in tags if ('rc' not in tag and 'dev' not in tag)][0]
passed("Detected valid last full release version %r" % CONFIG.last_full_version)
except ValueError:
failed("Last full release version %r is not a valid Bokeh version!" % CONFIG.last_full_version)
abort()
except CalledProcessError:
failed("Could not detect last version tags")
abort()
def check_version_order():
if V(CONFIG.new_version) > V(CONFIG.last_any_version):
passed("New version %r is newer than last version %r" % (CONFIG.new_version, CONFIG.last_any_version))
else:
failed("New version %r is NOT newer than last version %r" % (CONFIG.new_version, CONFIG.last_any_version))
def check_release_branch():
out = run("git branch --list %s" % CONFIG.release_branch)
if out:
failed("Release branch %r ALREADY exists" % CONFIG.release_branch)
else:
passed("Release branch %r does not already exist" % CONFIG.release_branch)
def check_issues():
try:
out = run("python issues.py -c -p %s" % CONFIG.last_full_version)
passed("Issue labels are BEP-1 compliant")
except CalledProcessError as e:
out = e.output.decode('utf-8')
if "HTTP Error 403: Forbidden" in out:
failed("Issues cannot be checked right now due to GitHub rate limiting")
else:
failed("Issue labels are NOT BEP-1 compliant", out.split("\n"))
#--------------------------------------
#
# Update functions
#
#--------------------------------------
def commit(filename, version):
path = join(CONFIG.top_dir, filename)
try:
run("git add %s" % path)
except CalledProcessError as e:
failed("Could not git add %r" % filename, str(e).split("/n"))
return
try:
run(["git", "commit", "-m", "'Updating for version %s'" % version])
except CalledProcessError as e:
failed("Could not git commit %r" % filename, str(e).split("/n"))
return
passed("Committed file %r" % filename)
def update_bokehjs_versions():
filenames = [
'bokehjs/src/coffee/version.coffee',
'bokehjs/package.json',
]
pat = r"(release|version)([\" ][:=] [\"\'])" + CONFIG.last_any_version + "([\"\'])"
for filename in filenames:
path = join(CONFIG.top_dir, filename)
with open(path) as f:
text = f.read()
match = re.search(pat, text)
if not match:
failed("Unable to find version string for %r in file %r" % (CONFIG.last_any_version, filename))
continue
text = re.sub(pat, r'\g<1>\g<2>%s\g<3>' % CONFIG.new_version, text)
try:
with open(path, 'w') as f:
f.write(text)
except Exception as e:
failed("Unable to write new version to file %r" % filename, str(e).split("\n"))
else:
passed("Updated version from %r to %r in file %r" % (CONFIG.last_any_version, CONFIG.new_version, filename))
commit(filename, CONFIG.new_version)
def update_docs_versions():
# Update all_versions.txt
filename = 'sphinx/source/all_versions.txt'
path = join(CONFIG.top_dir, filename)
try:
with open(path, 'a') as f:
f.write("{version}\n".format(version=CONFIG.new_version))
except Exception as e:
failed("Could not write new version to file %r" % filename, str(e).split("\n"))
else:
passed("Appended version %r to %r" % (CONFIG.new_version, filename))
commit(filename, CONFIG.new_version)
def update_changelog():
try:
out = run("python issues.py -p %s -r %s" % (CONFIG.last_full_version, CONFIG.new_version))
passed("Updated CHANGELOG with new closed issues")
filename = join(CONFIG.top_dir, "CHANGELOG")
commit(filename, CONFIG.new_version)
except CalledProcessError as e:
out = e.output.decode('utf-8')
if "HTTP Error 403: Forbidden" in out:
failed("CHANGELOG cannot be updated right now due to GitHub rate limiting")
else:
failed("CHANGELOG update failed", out.split("\n"))
def merge_and_push():
try:
run("git checkout master")
passed("Checked out master branch")
except Exception as e:
failed("[FATAL] COULD NOT CHECK OUT MASTER BRANCH: %s" % e)
return False
try:
run(["git", "merge", "--no-ff", CONFIG.release_branch, "-m", "'Merge branch %s'" % CONFIG.release_branch])
passed("Merged release branch into master branch")
except Exception as e:
failed("[FATAL] COULD NOT MERGE RELEASE BRANCH TO MASTER: %s" % e)
return False
try:
# use --no-verify to prevent git hook that might ask for confirmation
run("git push --no-verify origin master")
passed("Pushed master branch to GitHub")
except Exception as e:
failed("[FATAL] COULD NOT PUSH MASTER TO ORIGIN: %s" % e)
return False
try:
out = run(["git", "branch", "-d", CONFIG.release_branch])
passed("Deleted release branch")
except Exception:
failed("[NON-FATAL] Could not delete release branch", out.split("\n"))
try:
run(["git", "tag", "-a", CONFIG.new_version, "-m", "Release %s" % CONFIG.new_version])
passed("Tagged release %r" % CONFIG.new_version)
except Exception as e:
failed("[FATAL] COULD NOT TAG RELEASE: %s" % e)
return False
try:
# use --no-verify to prevent git hook that might ask for confirmation
run(["git", "push", "--no-verify", "origin", CONFIG.new_version])
passed("Pushed tag %r to GitHub" % CONFIG.new_version)
except Exception as e:
failed("[FATAL] COULD NOT PUSH MASTER TO ORIGIN: %s" % e)
return False
try:
out = run("git checkout master")
passed("Returned to master branch")
except Exception as e:
failed("[NON-FATAL] Could not return to master branch", out.split("\n"))
return True
def show_updates():
print()
print("!!! Here is a diff of the changes made on the release branch:")
print()
diff = run("git diff --minimal master").split("\n")
for line in diff:
print(blue(" %s" % line))
print()
#--------------------------------------
#
# Main
#
#--------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deploy a Bokeh release.')
parser.add_argument('version',
type=str,
nargs=1,
help='The new version number for this release')
args = parser.parse_args()
new_version = args.version[0]
banner(blue, "{:^80}".format("You are starting a Bokeh release deployment for %r" % new_version))
# pre-checks ------------------------------------------------------------
print("!!! Running pre-checks for release deploy\n")
check_py3()
check_git()
check_maintainers()
check_repo()
check_checkout()
try:
CONFIG.new_version = args.version[0]
passed("New version %r is a valid Bokeh version (%s)" % (CONFIG.new_version, bright(CONFIG.version_type)))
except ValueError:
failed("Version %r is NOT a valid Bokeh version" % CONFIG.new_version)
abort()
check_tags()
check_version_order()
check_release_branch()
if V(CONFIG.new_version).is_prerelease:
print(blue("[SKIP] ") + "Not checking issues for BEP-1 compliance for pre-releases")
else:
check_issues()
if CONFIG.problems:
print(red("\n!!! Some pre-checks have failed:\n"))
for p in CONFIG.problems:
print(" - " + yellow(p))
abort()
print(green("\n!!! All pre-checks have passed\n"))
confirm("Would you like to continue to file modifications?")
print(blue("\n" + '-'*80 + "\n"))
# modifications ---------------------------------------------------------
try:
run("git checkout -b %s" % CONFIG.release_branch)
passed("Checked out release branch %r" % CONFIG.release_branch)
except CalledProcessError as e:
failed("Could not check out release branch %r" % CONFIG.release_branch, str(e).split("/n"))
abort()
update_bokehjs_versions()
if V(CONFIG.new_version).is_prerelease:
print(blue("[SKIP] ") + "Not updating docs version or change log for pre-releases")
else:
update_docs_versions()
update_changelog()
if CONFIG.problems:
print(red("\n!!! Some updates have failed:\n"))
for p in CONFIG.problems:
print(" - " + yellow(p))
abort()
# confirmation ----------------------------------------------------------
show_updates()
confirm("Merge release branch and push these changes? [LAST CHANCE TO ABORT]")
success = merge_and_push()
if success:
if CONFIG.problems:
print(blue("\n!!! Some NON-FATAL problems occurred:\n"))
for p in CONFIG.problems:
print(" - " + yellow(p))
print()
banner(blue, "{:^80}".format("Bokeh %r release deployment: SUCCESS" % CONFIG.new_version))
else:
if CONFIG.problems:
print(red("\n!!! Some FATAL problems occurred:\n"))
for p in CONFIG.problems:
print(" - " + yellow(p))
print()
print(bright(red("!!! REMOTE ACTIONS MAY HAVE BEEN TAKEN --- local AND remote branches may be dirty")))
print()
banner(red, "{:^80}".format("Bokeh %r release deployment: FAILURE" % CONFIG.new_version))
sys.exit(1)
| |
'''
Created on 12/12/2017
@author: Liza L. Lemos <lllemos@inf.ufrgs.br>
'''
from environment import Environment
import traci
import sumolib
from xml.dom import minidom
import sys, os
import subprocess
import atexit
from contextlib import contextmanager
import time
from array import array
import numpy as np
import datetime
import math
class SUMOTrafficLights(Environment):
def __init__(self, cfg_file, port=8813, use_gui=False):
super(SUMOTrafficLights, self).__init__()
self.__create_env(cfg_file, port, use_gui)
'''
Create the environment as a MDP. The MDP is modeled as follows:
* for each traffic light:
* the STATE is defined as a vector [current phase, elapsed time of current phase, queue length for each phase]
* for simplicity, the elapsed time is discretized in intervals of 5s
* and, the queue length is calculated according to the occupation of the link.
* The occupation is discretized in 4 intervals (equally distributed)
* The number of ACTIONS is equal to the number of phases
* Currentlly, there are only two phases thus the actions are either keep green time at the current phase or
* allow green to another phase. As usual, we call these actions 'keep' and 'change'
* At each junction, REWARD is defined as the difference between the current and the previous average queue length (AQL)
* at the approaching lanes, i.e., for each traffic light the reward is defined as $R(s,a,s')= AQL_{s} - AQL_{s'}$.
* the transitions between states are deterministic
'''
def __create_env(self, cfg_file, port, use_gui):
#check for SUMO's binaries
if use_gui:
self._sumo_binary = sumolib.checkBinary('sumo-gui')
else:
self._sumo_binary = sumolib.checkBinary('sumo')
#register SUMO/TraCI parameters
self.__cfg_file = cfg_file
self.__net_file = self.__cfg_file[:self.__cfg_file.rfind("/")+1] + minidom.parse(self.__cfg_file).getElementsByTagName('net-file')[0].attributes['value'].value
#read the network file
self.__net = sumolib.net.readNet(self.__net_file)
self.__env = {}
d = ['keep', 'change']
# d[0] = 'keep'
# d[1] = 'change'
# to each state the actions are the same
# self.__env[state] has 160 possible variations
# [idPhase, elapsed time, queue NS, queue EW] = [2, 5, 4, 4]
# 2 * 5 * 4 * 4 = 160
# idPhase: 2 phases - NS, EW
# elapsed time: 30s that are discretize in 5 intervals
# queue: 0 to 100% discretize in 4 intervals
# Note: to change the number of phases, it is necessary to change the number of states, e.g. 3 phases: [3, 5, 4, 4, 4]
# it is also necessary to change the method change_trafficlight
for x in range(0, 160):
self.__env[x] = d
#create the set of traffic ligths
self.__create_trafficlights()
self.__create_edges()
def __create_trafficlights(self):
# set of all traffic lights in the simulation
# each element in __trafficlights correspond to another in __learners
self.__trafficlights = {}
# process all trafficlights entries
junctions_parse = minidom.parse(self.__net_file).getElementsByTagName('junction')
for element in junctions_parse:
if element.getAttribute('type') == "traffic_light":
tlID = element.getAttribute('id').encode('utf-8')
# create the entry in the dictionary
self.__trafficlights[tlID] = {
'greenTime': 0,
'nextGreen': -1,
'yellowTime': -1,
'redTime': -1
}
def reset_episode(self):
super(SUMOTrafficLights, self).reset_episode()
# initialise TraCI
traci.start([self._sumo_binary , "-c", self.__cfg_file])
# reset traffic lights attributes
for tlID in self.get_trafficlights_ID_list():
self.__trafficlights[tlID]['greenTime'] = 0
self.__trafficlights[tlID]['nextGreen'] = -1
self.__trafficlights[tlID]['yellowTime'] = -1
self.__trafficlights[tlID]['redTime'] = -1
# define the edges/lanes that are controled for each traffic light
# the function getControlledLanes() from TRACI, returned the names of lanes doubled
# that's way is listed here
def __create_edges(self):
self._edgesNS = {}
self._edgesEW = {}
self._edgesNS[0] = ['0Ni_0', '0Ni_1', '0Si_0', '0Si_1']
self._edgesEW[0] = ['0Wi_0', '0Wi_1', '0Ei_0', '0Ei_1']
self._edgesNS[1] = ['1Ni_0', '1Ni_1', '1Si_0', '1Si_1']
self._edgesEW[1] = ['1Wi_0', '1Wi_1', '1Ei_0', '1Ei_1']
self._edgesNS[2] = ['2Ni_0', '2Ni_1', '2Si_0', '2Si_1']
self._edgesEW[2] = ['2Wi_0', '2Wi_1', '2Ei_0', '2Ei_1']
self._edgesNS[3] = ['3Ni_0', '3Ni_1', '3Si_0', '3Si_1']
self._edgesEW[3] = ['3Wi_0', '3Wi_1', '3Ei_0', '3Ei_1']
self._edgesNS[4] = ['4Ni_0', '4Ni_1', '4Si_0', '4Si_1']
self._edgesEW[4] = ['4Wi_0', '4Wi_1', '4Ei_0', '4Ei_1']
self._edgesNS[5] = ['5Ni_0', '5Ni_1', '5Si_0', '5Si_1']
self._edgesEW[5] = ['5Wi_0', '5Wi_1', '5Ei_0', '5Ei_1']
self._edgesNS[6] = ['6Ni_0', '6Ni_1', '6Si_0', '6Si_1']
self._edgesEW[6] = ['6Wi_0', '6Wi_1', '6Ei_0', '6Ei_1']
self._edgesNS[7] = ['7Ni_0', '7Ni_1', '7Si_0', '7Si_1']
self._edgesEW[7] = ['7Wi_0', '7Wi_1', '7Ei_0', '7Ei_1']
self._edgesNS[8] = ['8Ni_0', '8Ni_1', '8Si_0', '8Si_1']
self._edgesEW[8] = ['8Wi_0', '8Wi_1', '8Ei_0', '8Ei_1']
# calculates the capacity for each queue of each traffic light
def __init_edges_capacity(self):
self._edgesNScapacity = {}
self._edgesEWcapacity = {}
for tlID in self.get_trafficlights_ID_list():
#~ print '----'
#~ print 'tlID', tlID
lengthNS = 0
lengthWE = 0
for lane in self._edgesNS[int(tlID)]:
lengthNS += traci.lane.getLength(lane)
for lane in self._edgesEW[int(tlID)]:
lengthWE += traci.lane.getLength(lane)
lengthNS = lengthNS/7.5 # vehicle length 5m + 2.5m (minGap)
lengthWE = lengthWE/7.5
self._edgesNScapacity[int(tlID)] = lengthNS
self._edgesEWcapacity[int(tlID)] = lengthWE
# https://sourceforge.net/p/sumo/mailman/message/35824947/
# It's necessary set a new logic, because we need more duration time.
# in SUMO the duration of the phases are set in .net file.
# but if in .net the phase duration is set to 30s and if we want 40s, the simulator will change phase in 30s
# thus, we set the duration with a high value
# also, the yellow and all red phase duration can be set here
# if prefer, this can be changed in .net file 'tllogic' tag
# obs: the duration is set in ms
def __create_tlogic(self):
phases = []
phases.append(traci._trafficlights.Phase(200000, 200000, 200000, "GGGgrrrrGGGgrrrr")) # N-S
phases.append(traci._trafficlights.Phase(2000, 2000, 2000, "YYYYrrrrYYYYrrrr"))
phases.append(traci._trafficlights.Phase(1000, 1000, 1000, "rrrrrrrrrrrrrrrr"))
phases.append(traci._trafficlights.Phase(200000, 200000,200000, "rrrrGGGgrrrrGGGg")) # E-W
phases.append(traci._trafficlights.Phase(2000, 2000, 2000, "rrrrYYYYrrrrYYYY"))
phases.append(traci._trafficlights.Phase(1000, 1000, 1000, "rrrrrrrrrrrrrrrr"))
logic = traci._trafficlights.Logic("new-program", 0, 0, 0, phases)
for tlID in self.get_trafficlights_ID_list():
traci.trafficlights.setCompleteRedYellowGreenDefinition(tlID,logic)
def get_trafficlights_ID_list(self):
# return a list with the traffic lights' IDs
return self.__trafficlights.keys()
# commands to be performed upon normal termination
def __close_connection(self):
traci.close() # stop TraCI
sys.stdout.flush() # clear standard output
def get_state_actions(self, state):
self.__check_env()
# print state
# print self.__env[state]
return self.__env[state]
# check whether the environment is ready to run
def __check_env(self):
# check whether the environment data structure was defined
if not self.__env:
raise Exception("The traffic lights must be set before running!")
# discretize the queue occupation in 4 classes equally distributed
def discretize_queue(self, queue):
q_class = math.ceil((queue)/25)
if queue >= 75:
q_class = 3
# percentage
#~ if queue < 25:
#~ q_class = 0 # 0 - 25%
#~ if queue >= 25 and queue < 50:
#~ q_class = 1 # 25 - 50%
#~ if queue >= 50 and queue < 75:
#~ q_class = 2 # 50 - 75%
#~ if queue >= 75:
#~ q_class = 3 # 75 - 100%
return int(q_class)
#http://stackoverflow.com/questions/759296/converting-a-decimal-to-a-mixed-radix-base-number
def mixed_radix_encode(self, idPhase, duration, queueNS, queueEW):
factors = [2, 5, 4, 4]
queueNS = self.discretize_queue(queueNS)
queueEW = self.discretize_queue(queueEW)
# the total elapsed time is 30s that are discretize in intervals
# discretize the duration time (elapsed time) in intervals of 5s (interv_action_selection), except the first interval
# the fisrt interval is 0 - minGreenTime
if duration > 0 and duration <= 10: # minGreenTime
duration = 0
if duration > 10 and duration <= 15:
duration = 1
if duration > 15 and duration <= 20:
duration = 2
if duration > 20 and duration <= 25:
duration = 3
if duration > 25:
duration = 4
# idPhase = 0 (NS green), idPhase = 3 (EW green),
# but for the mixed radix conversion idPhase can only assume 0 or 1
if idPhase == 3:
idPhase = 1
# mixed radix conversion
values = [idPhase, duration, queueNS, queueEW]
res = 0
for i in range(4):
res = res * factors[i] + values[i]
return res
# decode a mixed radix conversion
def mixed_radix_decode(self, value):
print 'value', value
factors = [2, 5, 4, 4]
res = [0,0,0,0]
for i in reversed(range(4)):
res[i] = value % factors[i]
value = value / factors[i]
print 'reverse %s' % (res)
# change the traffic light phase
# set yellow phase and save the next green
def change_trafficlight(self, tlID):
if traci.trafficlights.getPhase(tlID) == 0: # NS phase
traci.trafficlights.setPhase(tlID, 1)
self.__trafficlights[tlID]["nextGreen"] = 3
elif traci.trafficlights.getPhase(tlID) == 3: # EW phase
traci.trafficlights.setPhase(tlID, 4)
self.__trafficlights[tlID]["nextGreen"] = 0
# obs: traci.trafficlights.getPhaseDuration(tlID)
# it is the time defined in .net file, not the current elapsed time
def update_phaseTime(self, string, tlID):
self.__trafficlights[tlID][string] += 1
#for states
def calculate_queue_size(self, tlID):
minSpeed = 2.8 # 10km/h - 2.78m/s
allVehicles = traci.vehicle.getIDList()
for vehID in allVehicles:
traci.vehicle.subscribe(vehID, [traci.constants.VAR_LANE_ID, traci.constants.VAR_SPEED])
info_veh = traci.vehicle.getSubscriptionResults()
# VAR_LANE_ID = 81
# VAR_SPEED = 64 Returns the speed of the named vehicle within the last step [m/s]; error value: -1001
qNS = []
qEW = []
if len(info_veh) > 0:
for x in info_veh.keys():
if info_veh[x][81] in self._edgesNS[int(tlID)]:
qNS.append(x)
if info_veh[x][81] in self._edgesEW[int(tlID)]:
qEW.append(x)
return [qNS, qEW]
#for the reward
def calculate_stopped_queue_length(self, tlID):
minSpeed = 2.8 # 10km/h - 2.78m/s
allVehicles = traci.vehicle.getIDList()
for vehID in allVehicles:
traci.vehicle.subscribe(vehID, [traci.constants.VAR_LANE_ID, traci.constants.VAR_SPEED])
info_veh = traci.vehicle.getSubscriptionResults()
# VAR_LANE_ID = 81
# VAR_SPEED = 64 Returns the speed of the named vehicle within the last step [m/s]; error value: -1001
qNS = []
qEW = []
if len(info_veh) > 0:
for x in info_veh.keys():
if info_veh[x][64] <= minSpeed:
if info_veh[x][81] in self._edgesNS[int(tlID)]:
qNS.append(x)
if info_veh[x][81] in self._edgesEW[int(tlID)]:
qEW.append(x)
return [len(qNS), len(qEW)]
def calculate_new_state(self, tlID):
# 1) index of the current phase
idPhase = traci.trafficlights.getPhase(tlID)
# 2) the elapsed time in the current phase
# obs: duration = traci.trafficlights.getPhaseDuration(tlID)
# its the time defined in .net file, not the current elapsed time
duration = self.__trafficlights[tlID]["greenTime"]
# 3) queue size
qNS_list, qEW_list = self.calculate_queue_size(tlID)
qNS = len(qNS_list)
qEW = len(qEW_list)
# vehicle / capacity
qNS_occupation = 0
qEW_occupation = 0
if qNS > 0:
qNS_occupation = (qNS*100)/self._edgesNScapacity[int(tlID)]
if qEW > 0:
qEW_occupation = (qEW*100)/self._edgesEWcapacity[int(tlID)]
new_state = self.mixed_radix_encode(idPhase, duration ,qNS_occupation, qEW_occupation)
return new_state
def run_episode(self, max_steps=-1, arq_tl='saida_tl.txt', exp=None):
self.__check_env()
start = time.time()
max_steps *= 1000 # traci returns steps in ms, not s
self._has_episode_ended = False
self._episodes += 1
self.reset_episode()
self.__init_edges_capacity() # initialize the queue capacity of each traffic light
self.__create_tlogic()
#----------------------------------------------------------------------------------
current_time = 0
previousNSqueue = [0] * len(self.get_trafficlights_ID_list())
previousEWqueue = [0] * len(self.get_trafficlights_ID_list())
currentNSqueue = [0] * len(self.get_trafficlights_ID_list())
currentEWqueue = [0] * len(self.get_trafficlights_ID_list())
new_state = [0] * len(self.get_trafficlights_ID_list())
state = [0] * len(self.get_trafficlights_ID_list())
choose = [0] * len(self.get_trafficlights_ID_list()) # flag: if choose an action
maxGreenTime = 180 # maximum green time, to prevent starvation
minGreenTime = 10
interv_action_selection = 5 # interval for action selection
update_epsilon = maxGreenTime * 2 # maxGreenTime *2: to assure that the traffic ligth pass at least one time in each phase
# main loop
while ((max_steps > -1 and traci.simulation.getCurrentTime() < max_steps) or max_steps <= -1) and (traci.simulation.getMinExpectedNumber() > 0 or traci.simulation.getArrivedNumber() > 0):
queueNS = [0] * len(self.get_trafficlights_ID_list())
queueEW = [0] * len(self.get_trafficlights_ID_list())
learner_state_action = {}
for tlID in self.get_trafficlights_ID_list():
# A) LEARNER ACTION
# each traffic light makes a decision at each interv_action_selection (5s)
if self.__trafficlights[tlID]["greenTime"] > 9 and (self.__trafficlights[tlID]["greenTime"] % interv_action_selection) == 0 :
new_state[int(tlID)] = self.calculate_new_state(tlID)
state[int(tlID)], action = self._learners[tlID].act_last(new_state[int(tlID)])
learner_state_action[tlID] = [state[int(tlID)], action]
# if green time is equal or more than maxGreenTime, change phase
if self.__trafficlights[tlID]["greenTime"] >= maxGreenTime:
learner_state_action[tlID] = [state[int(tlID)], 'change']
choose[int(tlID)] = True # flag: if choose an action
else:
choose[int(tlID)] = False
# run a single simulation step
traci.simulationStep()
current_time = traci.simulation.getCurrentTime()/1000
# update epsilon manually - traffic lights are not a episodic task
# maxGreenTime *2: to assure that the traffic ligth pass at least one time in each phase
if update_epsilon == current_time:
update_epsilon = update_epsilon + (maxGreenTime*2)
exp.update_epsilon_manually()
# before start needs 'change' or 'keep' the phase according to the selected action
for tlID in self.get_trafficlights_ID_list():
# green phase: idPhase = 0 or 3 (when have two phases)
# if yellow or all red phase - do nothing
if traci.trafficlights.getPhase(tlID) == 0 or traci.trafficlights.getPhase(tlID) == 3:
self.update_phaseTime('greenTime', tlID)
# if choose == True: run the action (change, keep)
# else: just calculate the queue length (reward will be the average queue length)
if choose[int(tlID)] == True:
# B) RUN ACTION
if learner_state_action[tlID][1] == 'change': #TODO: more phases
self.__trafficlights[tlID]["greenTime"] = 0
# this method must set yellow phase and save the next green phase
self.change_trafficlight(tlID)
else: # if action = 'keep' just calculte queue size
# calculate queue size
queueNS[int(tlID)], queueEW[int(tlID)] = self.calculate_stopped_queue_length(tlID)
currentNSqueue[int(tlID)] += queueNS[int(tlID)]
currentEWqueue[int(tlID)] += queueEW[int(tlID)]
else:
# calculate queue size
queueNS[int(tlID)], queueEW[int(tlID)] = self.calculate_stopped_queue_length(tlID)
currentNSqueue[int(tlID)] += queueNS[int(tlID)]
currentEWqueue[int(tlID)] += queueEW[int(tlID)]
# if it will select action in the next step,
# in the previous you need to calculate the feedback and update Q-table
if self.__trafficlights[tlID]["greenTime"] > (minGreenTime - 1) and (self.__trafficlights[tlID]["greenTime"] % interv_action_selection) == 0 and current_time > 13:
# if current_time: it can enter in the beggining - 13 = 10 (minGreenTime) + 2 (yellow) + 1 (allRed)
# calculate the average queue length
if self.__trafficlights[tlID]["greenTime"] == minGreenTime: # action 'change': stay minGreenTime before select new action
aver_currentNSqueue = currentNSqueue[int(tlID)]/float(minGreenTime)
aver_currentEWqueue = currentEWqueue[int(tlID)]/float(minGreenTime)
else: # action 'keep': stay interv_action_selection before select new action
aver_currentNSqueue = currentNSqueue[int(tlID)]/float(interv_action_selection)
aver_currentEWqueue = currentEWqueue[int(tlID)]/float(interv_action_selection)
# C) CALCULATE REWARD
trafficlight_to_proces_feedback = {}
# we define the reward as the difference between the previous and current average queue length (AQL)
# at the junction $R(s,a,s')= AQL_{s} - AQL_{s'}$
reward = ((aver_currentEWqueue + aver_currentNSqueue) - (previousEWqueue[int(tlID)] + previousNSqueue[int(tlID)]))
reward *= -1
# D) PROCESS FEEDBACK
trafficlight_to_proces_feedback[tlID] = [
reward,
new_state[int(tlID)],
state[int(tlID)]
]
self.__process_trafficlights_feedback(trafficlight_to_proces_feedback)
# update previous queue
previousNSqueue[int(tlID)] = aver_currentNSqueue
previousEWqueue[int(tlID)] = aver_currentEWqueue
# clean current queue
currentNSqueue[int(tlID)] = 0
currentEWqueue[int(tlID)] = 0
self.metrics(arq_tl, current_time)
self.__close_connection()
self._has_episode_ended = True
def __process_trafficlights_feedback(self, traffic_lights):
# feedback_last
for tlID in traffic_lights.keys():
self._learners[str(tlID)].feedback_last(traffic_lights[tlID][0], traffic_lights[tlID][1], traffic_lights[tlID][2])
def metrics(self, arquivo, current_time):
minSpeed = 2.8 # 10km/h - 2.78m/s
# using subcriptions
allVehicles = traci.vehicle.getIDList()
for vehID in allVehicles:
traci.vehicle.subscribe(vehID, [traci.constants.VAR_LANE_ID, traci.constants.VAR_SPEED])
lanes = traci.vehicle.getSubscriptionResults()
# VAR_LANE_ID = 81
# VAR_SPEED = 64 Returns the speed of the named vehicle within the last step [m/s]; error value: -1001
# VAR_WAITING_TIME = 122 Returns the waiting time [s]
cont_veh_per_tl = [0] * len(self.get_trafficlights_ID_list())
if len(lanes) > 0:
for x in lanes.keys():
for tlID in self.get_trafficlights_ID_list():
if lanes[x][64] <= minSpeed:
if (lanes[x][81] in self._edgesNS[int(tlID)]) or (lanes[x][81] in self._edgesEW[int(tlID)]):
cont_veh_per_tl[int(tlID)] += 1
# save in a file
# how many vehicles were in queue in each timestep
average_queue = 0
for tlID in self.get_trafficlights_ID_list():
average_queue = average_queue + cont_veh_per_tl[int(tlID)]
average_queue = average_queue/float(len(self.__trafficlights))
arquivo.writelines('%d,%s,%.1f,%d\n' % (current_time, str(cont_veh_per_tl)[1:-1], average_queue, len(allVehicles)))
def run_step(self):
raise Exception('run_step is not available in %s class' % self)
return
def has_episode_ended(self):
return self._has_episode_ended
def __calc_reward(self, state, action, new_state):
raise Exception('__calc_reward is not available in %s class' % self)
return
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from swift import gettext_ as _
from six.moves.urllib.parse import unquote
from swift.common.utils import public, csv_append, Timestamp
from swift.common.constraints import check_metadata
from swift.common.http import HTTP_ACCEPTED, is_success
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, set_info_cache, clear_info_cache
from swift.common.storage_policy import POLICIES
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPNotFound, HTTPServerError
class ContainerController(Controller):
"""WSGI controller for container requests"""
server_type = 'Container'
# Ensure these are all lowercase
pass_through_headers = ['x-container-read', 'x-container-write',
'x-container-sync-key', 'x-container-sync-to',
'x-versions-location']
def __init__(self, app, account_name, container_name, **kwargs):
super(ContainerController, self).__init__(app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
def _x_remove_headers(self):
st = self.server_type.lower()
return ['x-remove-%s-read' % st,
'x-remove-%s-write' % st,
'x-remove-versions-location',
'x-remove-%s-sync-key' % st,
'x-remove-%s-sync-to' % st]
def _convert_policy_to_index(self, req):
"""
Helper method to convert a policy name (from a request from a client)
to a policy index (for a request to a backend).
:param req: incoming request
"""
policy_name = req.headers.get('X-Storage-Policy')
if not policy_name:
return
policy = POLICIES.get_by_name(policy_name)
if not policy:
raise HTTPBadRequest(request=req,
content_type="text/plain",
body=("Invalid %s '%s'"
% ('X-Storage-Policy', policy_name)))
if policy.is_deprecated:
body = 'Storage Policy %r is deprecated' % (policy.name)
raise HTTPBadRequest(request=req, body=body)
return int(policy)
def clean_acls(self, req):
if 'swift.clean_acl' in req.environ:
for header in ('x-container-read', 'x-container-write'):
if header in req.headers:
try:
req.headers[header] = \
req.environ['swift.clean_acl'](header,
req.headers[header])
except ValueError as err:
return HTTPBadRequest(request=req, body=str(err))
return None
def GETorHEAD(self, req):
"""Handler for HTTP GET/HEAD requests."""
ai = self.account_info(self.account_name, req)
if not ai[1]:
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
# Don't cache this. The lack of account will be cached, and that
# is sufficient.
return HTTPNotFound(request=req)
part = self.app.container_ring.get_part(
self.account_name, self.container_name)
concurrency = self.app.container_ring.replica_count \
if self.app.concurrent_gets else 1
node_iter = self.app.iter_nodes(self.app.container_ring, part)
params = req.params
params['format'] = 'json'
req.params = params
resp = self.GETorHEAD_base(
req, _('Container'), node_iter, part,
req.swift_entity_path, concurrency)
# Cache this. We just made a request to a storage node and got
# up-to-date information for the container.
resp.headers['X-Backend-Recheck-Container-Existence'] = str(
self.app.recheck_container_existence)
set_info_cache(self.app, req.environ, self.account_name,
self.container_name, resp)
if 'swift.authorize' in req.environ:
req.acl = resp.headers.get('x-container-read')
aresp = req.environ['swift.authorize'](req)
if aresp:
# Don't cache this. It doesn't reflect the state of the
# container, just that the user can't access it.
return aresp
if not req.environ.get('swift_owner', False):
for key in self.app.swift_owner_headers:
if key in resp.headers:
del resp.headers[key]
return resp
@public
@delay_denial
@cors_validation
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@delay_denial
@cors_validation
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
def PUT(self, req):
"""HTTP PUT request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
policy_index = self._convert_policy_to_index(req)
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
length_limit = self.get_name_length_limit()
if len(self.container_name) > length_limit:
resp = HTTPBadRequest(request=req)
resp.body = 'Container name length of %d longer than %d' % \
(len(self.container_name), length_limit)
return resp
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts and self.app.account_autocreate:
if not self.autocreate_account(req, self.account_name):
return HTTPServerError(request=req)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
if 0 < self.app.max_containers_per_account <= container_count and \
self.account_name not in self.app.max_containers_whitelist:
container_info = \
self.container_info(self.account_name, self.container_name,
req)
if not is_success(container_info.get('status')):
resp = HTTPForbidden(request=req)
resp.body = 'Reached container limit of %s' % \
self.app.max_containers_per_account
return resp
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts,
policy_index)
resp = self.make_requests(
req, self.app.container_ring,
container_partition, 'PUT', req.swift_entity_path, headers)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
return resp
@public
@cors_validation
def POST(self, req):
"""HTTP POST request handler."""
error_response = \
self.clean_acls(req) or check_metadata(req, 'container')
if error_response:
return error_response
if not req.environ.get('swift_owner'):
for key in self.app.swift_owner_headers:
req.headers.pop(key, None)
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self.generate_request_headers(req, transfer=True)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'POST',
req.swift_entity_path, [headers] * len(containers))
return resp
@public
@cors_validation
def DELETE(self, req):
"""HTTP DELETE request handler."""
account_partition, accounts, container_count = \
self.account_info(self.account_name, req)
if not accounts:
return HTTPNotFound(request=req)
container_partition, containers = self.app.container_ring.get_nodes(
self.account_name, self.container_name)
headers = self._backend_requests(req, len(containers),
account_partition, accounts)
clear_info_cache(self.app, req.environ,
self.account_name, self.container_name)
resp = self.make_requests(
req, self.app.container_ring, container_partition, 'DELETE',
req.swift_entity_path, headers)
# Indicates no server had the container
if resp.status_int == HTTP_ACCEPTED:
return HTTPNotFound(request=req)
return resp
def _backend_requests(self, req, n_outgoing, account_partition, accounts,
policy_index=None):
additional = {'X-Timestamp': Timestamp.now().internal}
if policy_index is None:
additional['X-Backend-Storage-Policy-Default'] = \
int(POLICIES.default)
else:
additional['X-Backend-Storage-Policy-Index'] = str(policy_index)
headers = [self.generate_request_headers(req, transfer=True,
additional=additional)
for _junk in range(n_outgoing)]
for i, account in enumerate(accounts):
i = i % len(headers)
headers[i]['X-Account-Partition'] = account_partition
headers[i]['X-Account-Host'] = csv_append(
headers[i].get('X-Account-Host'),
'%(ip)s:%(port)s' % account)
headers[i]['X-Account-Device'] = csv_append(
headers[i].get('X-Account-Device'),
account['device'])
return headers
| |
#! /usr/bin/env python
from __future__ import unicode_literals
import datetime
import json
import unittest
from test_lib.datablob_factory import DataBlobFactory
from lib.constants import DATA_BLOB as d
from lib.constants import HTTP_CODE as http_code
from lib.constants import NFL as nfl
from lib.constants import SCOREBOARD as sb
from models.score import Score
from models.score import ScoreModel
from models.score import _ScoreDatastore as ScoreDatastore
from models.score import _ScoreMemcache as ScoreMemcache
from models.score import _ScoreSource as ScoreSource
from models.score import _ScoreFilter as ScoreFilter
from google.appengine.api import memcache
from google.appengine.ext import testbed
from test_lib.mock_service import UrlFetchMock
class TestScoreModel(unittest.TestCase):
def test_initialization(self):
model = ScoreModel()
test_points = {
'year': 999,
'week': 999,
'away_name': "",
'away_score': 0,
'home_name': "",
'home_score': 0,
'game_clock': "00:00",
'game_day': "Sun",
'game_status': "",
'game_tag': "",
'game_time': "00:00",
'game_id': 0,
'spread_odds': 0.000,
'spread_margin': 0.000,
}
self.assertIsNotNone(
model,
"Initialization")
for key in test_points:
self.assertTrue(
hasattr(model, key),
"Has " + key)
self.assertEqual(
getattr(model, key),
test_points[key],
"Default " + key)
# Test fields with non-default values
self.assertTrue(
hasattr(model, 'timestamp'),
"Has timestamp")
class TestScoreAbstract(unittest.TestCase):
def setUp(self):
self.score = Score()
self.week = 1
def tearDown(self):
pass
def test_initial(self):
self.assertIsNone(
self.score.next,
"Testing initial state")
def test_fetch_is_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.score.fetch(self.week),
def test_protected_fetch_is_unimplemented(self):
with self.assertRaises(NotImplementedError):
self.score._fetch_score(self.week)
def test_save_is_not_implemented(self):
data = {}
with self.assertRaises(NotImplementedError):
self.score.save(self.week, data),
def test_protected_save_is_unimplemented(self):
data = {}
with self.assertRaises(NotImplementedError):
self.score._save_score(self.week, data)
class TestScoreMemcache(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.score_memcache = ScoreMemcache()
self.factory = DataBlobFactory()
self.timestamp = int(datetime.datetime.now().strftime('%s'))
self.week = self.timestamp % 1000 + 50
def tearDown(self):
self.testbed.deactivate()
def test_super_class(self):
self.assertTrue(
issubclass(ScoreMemcache, Score),
"ScoreMemcache is a subclass of Score")
def test_missing_next_chain(self):
self.assertIsNone(
self.score_memcache.next,
"Default doesn't have next in chain")
def test_fetch_basic(self):
data = self.factory.generate_data(
timestamp=self.timestamp,
week=self.week)
tag = "SCORES_S2016W" + unicode(self.week)
result = {}
result = self.score_memcache.fetch(self.week)
self.assertIsNone(
result,
"Memcache miss")
memcache.add(tag, json.dumps(data))
result = self.score_memcache.fetch(self.week)
self.assertIsNotNone(
result,
"Memcache hit")
self.assertEqual(
result[d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result[d.NFL_GAME_ID],
data['data'][d.NFL_GAME_ID],
"NFL game ID matches")
def test_save(self):
data = [
self.factory.generate_data(week=self.week)
]
result = 0
result_dict = {}
result_str = ""
tag = "SCORES_S2016W" + unicode(self.week)
result = self.score_memcache.save(self.week, data)
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][0][d.NFL_GAME_ID],
data[0][d.NFL_GAME_ID],
"NFL game ID matches")
def test_tag_creation(self):
tag = "SCORES_S2016W" + unicode(self.week)
result_str = self.score_memcache._ScoreMemcache__tag(self.week)
self.assertEqual(
result_str,
tag,
"Tag creation")
def test_save_multiple(self):
data = [
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week+1),
self.factory.generate_data(week=self.week+2)
]
result = 0
result = self.score_memcache.save(self.week, data)
self.assertEqual(
result,
3,
"Memcached exactly 3 item")
@unittest.skip("Should not have to check for input type")
def test_save_non_list(self):
"""
Non-list data is passed in to save
"""
data = self.factory.generate_data(week=self.week)
result = 0
result_dict = {}
result_str = ""
tag = "SCORES_S2016W" + unicode(self.week)
result = self.score_memcache.save(self.week, data)
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][d.NFL_GAME_ID],
data[d.NFL_GAME_ID],
"NFL game ID matches")
def test_save_incremental(self):
"""
Subsequent saves should add or replace information, not delete.
"""
data = self.factory.generate_data(week=self.week)
result = 0
result_dict = {}
result_str = ""
tag = "SCORES_S2016W" + unicode(self.week)
update_data = {}
result = self.score_memcache.save(self.week, [data])
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][0][d.NFL_GAME_ID],
data[d.NFL_GAME_ID],
"NFL game ID matches")
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.HOME_SCORE],
data[d.HOME_SCORE],
"Home score is the same")
self.assertEqual(
result_dict['data'][0][d.HOME_NAME],
data[d.HOME_NAME],
"Home name is the same")
update_data = {
d.GAME_WEEK: self.week,
d.NFL_GAME_ID: data[d.NFL_GAME_ID],
d.HOME_SCORE: data[d.HOME_SCORE] + 7
}
result = self.score_memcache.save(self.week, [update_data])
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][0][d.NFL_GAME_ID],
update_data[d.NFL_GAME_ID],
"NFL game ID matches")
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.HOME_SCORE],
update_data[d.HOME_SCORE],
"Home score is the same")
self.assertIsNotNone(
result_dict['data'][0][d.HOME_NAME],
"Home name exists in result")
self.assertEqual(
result_dict['data'][0][d.HOME_NAME],
data[d.HOME_NAME],
"Home name is the same")
def test_validate_data(self):
"""
Verify helper function validates data
"""
data = self.factory.generate_data(week=self.week)
multiple_data = []
result = None
result = self.score_memcache._ScoreMemcache__validate_data([data])
for key in data:
self.assertEqual(
data[key],
result[0][key],
key + " matches")
data['GreateNess'] = 'awaits'
result = self.score_memcache._ScoreMemcache__validate_data([data])
self.assertFalse(
"GreateNess" in result[0],
"Extraneous data omitted")
mult_data = [
data,
self.factory.generate_data(week=self.week)
]
result = self.score_memcache._ScoreMemcache__validate_data(mult_data)
for index, game in enumerate(result):
for key in game:
self.assertEqual(
mult_data[index][key],
game[key],
key + " matches")
def test_sync_with_scores_no_data(self):
"""
Validate the helper function updates when there isn't
any data in Memcache
"""
data = [
self.factory.generate_data(week=self.week)
]
result = None
result = self.score_memcache._ScoreMemcache__sync_with_scores(
self.week,
data)
self.assertIsNotNone(
result,
"sync_with_scores returned a value")
self.assertEqual(
len(result),
len(data),
"Result contains the correct amount of elements")
for index, game in enumerate(data):
for key in data[index]:
self.assertEqual(
data[index][key],
result[index][key],
key + " matches")
def test_sync_with_scores(self):
"""
Validate the helper function updates
"""
data = [
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week)
]
to_update = 1
update_data = data[to_update]
result = None
result_int = 0
# Prepopulate memcache
result_int = self.score_memcache.save(self.week, data)
self.assertEqual(
len(data),
result_int,
"Correct number of elements saved")
update_data[d.HOME_SCORE] += 14
result = self.score_memcache._ScoreMemcache__sync_with_scores(
self.week,
[update_data])
self.assertIsNotNone(
result,
"sync_with_scores returned a value")
self.assertEqual(
len(result),
len(data),
"Result contains the correct amount of elements")
for index, game in enumerate(data):
if index != to_update:
for key in data[index]:
self.assertEqual(
data[index][key],
result[index][key],
key + " matches for index " + unicode(index))
else:
for key in update_data:
self.assertEqual(
update_data[key],
result[index][key],
key + " matches for index " + unicode(index))
def test_save_additional_field(self):
"""
Validate memcache updates when a field previously not recorded
(i.e., spread-related data) is given to be added.
"""
data = self.factory.generate_data(week=self.week)
result = 0
result_dict = {}
result_str = ""
spread_margin = data[d.SPREAD_MARGIN]
spread_odds = data[d.SPREAD_ODDS]
tag = "SCORES_S2016W" + unicode(self.week)
update_data = {}
# Remove fields to be added during this test
del data[d.SPREAD_MARGIN]
del data[d.SPREAD_ODDS]
result = self.score_memcache.save(self.week, [data])
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
# Validate preloaded data in memcache
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][0][d.NFL_GAME_ID],
data[d.NFL_GAME_ID],
"NFL game ID matches")
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.HOME_SCORE],
data[d.HOME_SCORE],
"Home score is the same")
self.assertEqual(
result_dict['data'][0][d.HOME_NAME],
data[d.HOME_NAME],
"Home name is the same")
update_data = {
d.GAME_WEEK: self.week,
d.NFL_GAME_ID: data[d.NFL_GAME_ID],
d.HOME_SCORE: data[d.HOME_SCORE],
d.SPREAD_ODDS: spread_odds,
d.SPREAD_MARGIN: spread_margin
}
result = self.score_memcache.save(self.week, [update_data])
self.assertEqual(
result,
1,
"Memcached exactly 1 item")
result_str = memcache.get(tag)
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.GAME_WEEK],
self.week,
"Week is the same")
self.assertEqual(
result_dict['data'][0][d.NFL_GAME_ID],
update_data[d.NFL_GAME_ID],
"NFL game ID matches")
result_dict = json.loads(result_str)
self.assertEqual(
result_dict['data'][0][d.HOME_SCORE],
update_data[d.HOME_SCORE],
"Home score is the same")
self.assertIsNotNone(
result_dict['data'][0][d.HOME_NAME],
"Home name exists in result")
self.assertEqual(
result_dict['data'][0][d.HOME_NAME],
data[d.HOME_NAME],
"Home name is the same")
self.assertEqual(
result_dict['data'][0][d.SPREAD_MARGIN],
spread_margin,
"Spread margin is the same")
self.assertEqual(
result_dict['data'][0][d.SPREAD_ODDS],
spread_odds,
"Spread odds are the same")
class TestScoreDatastore(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.score_datastore = ScoreDatastore()
self.factory = DataBlobFactory()
self.timestamp = int(datetime.datetime.now().strftime('%s'))
self.week = self.timestamp % 1000 + 50
def tearDown(self):
self.testbed.deactivate()
def test_init(self):
# Test super class
self.assertTrue(
issubclass(ScoreDatastore, Score),
"ScoreMemcache is a subclass of Score")
# Validate there's nothing in the chain by default
self.assertIsNone(
self.score_datastore.next,
"Default doesn't have next in chain")
def test_fetch_basic(self):
"""
Test basic fetch operation
"""
data = self.factory.generate_data(week=self.week)
result = 0
self.assertTrue(
ScoreModel(**data).put(),
"Put data into database")
result = self.score_datastore.fetch(self.week)
self.assertIsNotNone(
result,
"Datastore got us a result")
self.assertEqual(
len(result),
1,
"Only 1 game in the datastore")
self.assertEqual(
result[0][d.HOME_SCORE],
data[d.HOME_SCORE],
"Home score is the same")
self.assertEqual(
result[0][d.AWAY_SCORE],
data[d.AWAY_SCORE],
"Aways score is the same")
self.assertEqual(
result[0][d.NFL_GAME_ID],
data[d.NFL_GAME_ID],
"NFL game ID matches")
self.assertEqual(
result[0][d.GAME_WEEK],
data[d.GAME_WEEK],
"Season week matches")
def test_save_basic(self):
"""
Test basic save operation
"""
data = self.factory.generate_data(week=self.week)
result = 0
result_arr = []
result = self.score_datastore.save(self.week, [data])
self.assertEqual(
result,
1,
"Saved exactly 1 entry")
result_arr = ScoreModel().all().fetch(2)
self.assertEqual(
len(result_arr),
1,
"Fetch exactly 1 entry")
self.assertEqual(
result_arr[0].away_score,
data[d.AWAY_SCORE],
"Away score is the same")
self.assertEqual(
result_arr[0].home_score,
data[d.HOME_SCORE],
"Home score is the same")
self.assertEqual(
result_arr[0].game_id,
data[d.NFL_GAME_ID],
"NFL game ID matches")
self.assertEqual(
result_arr[0].week,
data[d.GAME_WEEK],
"Season week matches")
self.assertIsNotNone(
result_arr[0].timestamp,
"Timestamp is present")
def test_save_updates(self):
"""
Saving when a pre-existing entry is present leads to the data
being updated instead.
"""
data = self.factory.generate_data(week=self.week)
increment_score = 120
result = 0
result_arr = []
timestamp = None
# Preload data into the datastore
self.assertTrue(
ScoreModel(**data).put(),
"Preload successful")
timestamp = ScoreModel().all().fetch(1)[0].timestamp
data[d.HOME_SCORE] += increment_score
result = self.score_datastore.save(self.week, [data])
self.assertEqual(
result,
1,
"Saved exactly 1 entry")
result_arr = ScoreModel().all().fetch(2)
self.assertEqual(
len(result_arr),
1,
"Fetch exactly 1 entry")
self.assertEqual(
result_arr[0].home_score,
data[d.HOME_SCORE],
"Data was updated")
self.assertTrue(
timestamp < result_arr[0].timestamp,
"Timestamp was updated")
def test_save_multiple_updates(self):
"""
Multiple saves, when pre-existing entries are present, lead
to data being updated instead
"""
data = [
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week)
]
increment_score = 120
result = 0
result_arr = []
# Preload data
ScoreModel(**data[0]).put()
ScoreModel(**data[1]).put()
ScoreModel(**data[2]).put()
# Modify data
data[0][d.HOME_SCORE] += increment_score
data[1][d.AWAY_SCORE] += increment_score
result = self.score_datastore.save(self.week, data)
self.assertEqual(
result,
3,
"Saved exactly 3 entries [" + unicode(result) + "]")
result_arr = ScoreModel().all().fetch(4)
self.assertEqual(
len(result_arr),
3,
"Fetch exactly 3 entries")
self.assertEqual(
result_arr[0].home_score,
data[0][d.HOME_SCORE],
"Data was updated")
self.assertEqual(
result_arr[1].away_score,
data[1][d.AWAY_SCORE],
"Data was updated")
self.assertEqual(
result_arr[2].home_score,
data[2][d.HOME_SCORE],
"Data is untouched")
def test_saves_and_updates(self):
"""
Saving with a mix of data that's pre-existing and new
"""
data = [
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week),
self.factory.generate_data(week=self.week)
]
increment_score = 120
result = 0
result_arr = []
timestamp_dynamic = None
timestamp_static = None
# Order the data by increasing nfl_game_id
data[1][d.NFL_GAME_ID] += 10000
data[2][d.NFL_GAME_ID] += 30000
# Preload data
ScoreModel(**data[0]).put()
ScoreModel(**data[2]).put()
result_arr = ScoreModel().all().order("game_id").fetch(2)
timestamp_dynamic = result_arr[0].timestamp # 0'th data entry
timestamp_static = result_arr[1].timestamp # 2nd data entry
# Modify data
data[0][d.HOME_SCORE] += increment_score
data[1][d.AWAY_SCORE] += increment_score
result = self.score_datastore.save(123456, data)
self.assertEqual(
result,
3,
"Saved exactly 3 entries")
result_arr = ScoreModel().all().order("game_id").fetch(4)
self.assertEqual(
len(result_arr),
3,
"Fetch exactly 3 entries")
self.assertEqual(
result_arr[0].home_score,
data[0][d.HOME_SCORE],
"Home score was updated")
self.assertTrue(
timestamp_dynamic < result_arr[0].timestamp,
"Game 0 timestamp was updated")
self.assertEqual(
result_arr[1].away_score,
data[1][d.AWAY_SCORE],
"Game 1 is ok")
self.assertEqual(
result_arr[2].home_score,
data[2][d.HOME_SCORE],
"Data is untouched")
self.assertTrue(
timestamp_static < result_arr[2].timestamp,
"Game 2 timestamp is unchanged")
def test_stale_data_threshold(self):
"""
Test against the threshold property for considering data as stale
"""
self.assertEqual(
getattr(ScoreDatastore, "_ScoreDatastore__THRESHOLD"),
300,
"Threshold is at 300")
@unittest.skip("Timestamp is always unmodifiable via auto_now option")
def test_stale_data_fetch(self):
"""
Verify that the datstore rejects data that is considered stale.
"""
data = self.factory.generate_data(week=self.week)
threshold = datetime.timedelta(
seconds=getattr(ScoreDatastore, '_ScoreDatastore__THRESHOLD'))
data[d.TIMESTAMP] = datetime.datetime.utcnow() - threshold
result_arr = []
ScoreModel(**data).put()
result_arr = ScoreModel().all().fetch(2)
self.assertEqual(
len(result_arr),
1,
"One entry pre-loaded in the datastore")
result_arr = self.score_datastore.fetch(self.week)
self.assertEqual(
len(result_arr),
0,
"Datastore returned an empty list")
class TestScoreSource(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
# Create the mocked service & inject it into the testbed
self.fetch_mock = UrlFetchMock()
self.testbed._register_stub(testbed.URLFETCH_SERVICE_NAME, self.fetch_mock)
self.score_source = ScoreSource()
self.factory = DataBlobFactory()
self.timestamp = int(datetime.datetime.now().strftime('%s'))
self.week = self.timestamp % 1000 + 50
self.content_str = (
"{\"ss\":[[\"Sun\",\"8:00\",\"Pregame\",,\"SF\",,"
"\"BAL\",,,,\"56093\",,\"PRE0\",\"2013\"]]}").encode("UTF-8")
def tearDown(self):
self.testbed.deactivate()
def test_init(self):
# Test super class
self.assertTrue(
issubclass(ScoreSource, Score),
"ScoreMemcache is a subclass of Score")
# Validate there's nothing in the chain by default
self.assertIsNone(
self.score_source.next,
"Default doesn't have next in chain")
def test_fetch(self):
data = {
"content": self.content_str,
"final_url": (sb.URL_REG).encode("UTF-8"),
"status_code": 200
}
self.fetch_mock.set_return_values(**data)
result = self.score_source.fetch(self.week)
self.assertIsNotNone(
result,
"Source fetch was successful")
def test_fetch_not_ok(self):
data = {
"content": self.content_str,
"final_url": (sb.URL_REG).encode("UTF-8"),
"status_code": 201
}
self.fetch_mock.set_return_values(**data)
result = self.score_source.fetch(self.week)
self.assertIsNone(
result,
"Source fetch was (purposefully) unsuccessful.")
def test_save(self):
"""
Verify save returns the number of elements requested to be
saved.
"""
data = [
self.factory.generate_data(1234),
self.factory.generate_data(1235),
self.factory.generate_data(1236),
]
result = 0
result = self.score_source.save(self.week, data)
self.assertEqual(
result,
len(data),
"Received correct quantity")
class TestScoreFilter(unittest.TestCase):
class TestMockScore(Score):
def __init__(self, test_data):
self.test_data = test_data
super(TestScoreFilter.TestMockScore, self).__init__()
def _fetch_score(self, week):
#self.test_data['unit_test'].assertEqual(self.test_data['expected_week'],week,'week matches')
return self.test_data['expected_result']
def _save_score(self, week, data):
#self.test_data['unit_test'].assertEqual(self.test_data['expected_week'],week,'week matches')
return self.test_data['expected_result']
def setUp(self):
self.factory = DataBlobFactory()
self.timestamp = int(datetime.datetime.now().strftime('%s'))
self.week = self.timestamp % 40 + 50
self.test_data = {
"unit_test": self,
"expected_week": self.week + nfl.WEEK_PREFIX['REG'],
"expected_result": self.factory.generate_data(week=self.week)
}
self.score_filter = ScoreFilter(self.TestMockScore(self.test_data))
def test_init(self):
score_filter = ScoreFilter()
# Test super class
self.assertTrue(
issubclass(ScoreFilter, Score),
"ScoreMemcache is a subclass of Score")
# Validate there's nothing in the chain by default
self.assertIsNone(
score_filter.next,
"Default doesn't have next in chain")
def test_fetch_basic(self):
expected_result = self.test_data['expected_result']
result = []
result = self.score_filter.fetch(self.week)
for key in expected_result:
self.assertTrue(key in result, 'Result has the correct key')
self.assertEqual(expected_result[key],
result[key],
'Result values match')
self.assertEqual(self.week,
result['week'],
'Week seems unmodified')
def test_fetch_fail_if_not_chained(self):
score_filter = ScoreFilter()
self.assertIsNone(score_filter.fetch(self.week),
'Fetch (expectedly) returned nothing')
def test_save_basic(self):
expected_result = self.test_data['expected_result']
result = []
result = self.score_filter.save(self.week, expected_result)
for key in expected_result:
self.assertTrue(key in result, 'Result has the correct key')
self.assertEqual(expected_result[key],
result[key],
'Result values match')
self.assertEqual(self.week,
result['week'],
'Week seems unmodified')
def test_fetch_week_untouched_if_beyond_preseason_threshold(self):
week = nfl.WEEK_PREFIX['PRE'] + self.week
self.test_data['expected_week'] = week
expected_result = self.test_data['expected_result']
score_filter = ScoreFilter(self.TestMockScore(self.test_data))
result = self.score_filter.fetch(week)
for key in expected_result:
self.assertTrue(key in result, 'Result has the correct key')
self.assertEqual(expected_result[key],
result[key],
'Result values match')
def test_save_week_untouched_if_beyond_preseason_threshold(self):
week = nfl.WEEK_PREFIX['PRE'] + self.week
self.test_data['expected_week'] = week
expected_result = self.test_data['expected_result']
score_filter = ScoreFilter(self.TestMockScore(self.test_data))
result = self.score_filter.save(week, expected_result)
for key in expected_result:
self.assertTrue(key in result, 'Result has the correct key')
self.assertEqual(expected_result[key],
result[key],
'Result values match')
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
import os
from system_test import TestCase, Qdrouterd, DIR, main_module
from system_test import unittest
import proton
from proton import SSLDomain, Delivery
from proton.utils import BlockingConnection
from qpid_dispatch_internal.compat import BINARY
class QdSSLUseridTest(TestCase):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@classmethod
def setUpClass(cls):
super(QdSSLUseridTest, cls).setUpClass()
ssl_profile1_json = os.path.join(DIR, 'displayname_files', 'profile_names1.json')
ssl_profile2_json = os.path.join(DIR, 'displayname_files', 'profile_names2.json')
policy_config_path = os.path.join(DIR, 'policy-4')
config = Qdrouterd.Config([
('router', {'id': 'QDR', 'workerThreads': 1}),
('policy', {'maxConnections': 20, 'policyDir': policy_config_path, 'enableVhostPolicy': 'true'}),
# sha1
('sslProfile', {'name': 'server-ssl1',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'password': 'server-password'}),
# sha256
('sslProfile', {'name': 'server-ssl2',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2',
'password': 'server-password'}),
# sha512
('sslProfile', {'name': 'server-ssl3',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '5',
'password': 'server-password'}),
# sha256 combination
('sslProfile', {'name': 'server-ssl4',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2noucs',
'password': 'server-password'}),
# sha1 combination
('sslProfile', {'name': 'server-ssl5',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1cs',
'password': 'server-password'}),
# sha512 combination
('sslProfile', {'name': 'server-ssl6',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'cs5',
'password': 'server-password'}),
# no fingerprint field
('sslProfile', {'name': 'server-ssl7',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'nsuco',
'password': 'server-password'}),
# no fingerprint field variation
('sslProfile', {'name': 'server-ssl8',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'scounl',
'password': 'server-password'}),
# no uidFormat
('sslProfile', {'name': 'server-ssl9',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
# one component of uidFormat is invalid (x), the unrecognized component will be ignored,
# this will be treated like 'uidFormat': '1'
('sslProfile', {'name': 'server-ssl10',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1x',
'uidNameMappingFile': ssl_profile2_json,
'password': 'server-password'}),
# All components in the uidFormat are unrecognized, pn_get_transport_user will be returned
('sslProfile', {'name': 'server-ssl11',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': 'abxd',
'password': 'server-password'}),
('sslProfile', {'name': 'server-ssl12',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'uidNameMappingFile': ssl_profile1_json,
'password': 'server-password'}),
# should translate a display name
('sslProfile', {'name': 'server-ssl13',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '2',
# displayNameFile has been deprecated. We are using it here to test backward compatibility.
'displayNameFile': ssl_profile2_json,
'password': 'server-password'}),
('sslProfile', {'name': 'server-ssl14',
'caCertFile': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'uidFormat': '1',
'uidNameMappingFile': ssl_profile1_json,
'password': 'server-password'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl1', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl2', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl3', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl4', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl5', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl6', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl7', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl8', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl9', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl10', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl11', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
# peer is not being authenticated here. the user must "anonymous" which is what pn_transport_get_user
# returns
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl12', 'authenticatePeer': 'no',
'requireSsl': 'yes', 'saslMechanisms': 'ANONYMOUS'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl13', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'sslProfile': 'server-ssl14', 'authenticatePeer': 'yes',
'requireSsl': 'yes', 'saslMechanisms': 'EXTERNAL'}),
('listener', {'port': cls.tester.get_port(), 'authenticatePeer': 'no'})
])
cls.router = cls.tester.qdrouterd('ssl-test-router', config, wait=True)
def address(self, index):
return self.router.addresses[index]
def create_ssl_domain(self, ssl_options_dict, mode=SSLDomain.MODE_CLIENT):
"""Return proton.SSLDomain from command line options or None if no SSL options specified.
@param opts: Parsed optoins including connection_options()
"""
certificate, key, trustfile, password = ssl_options_dict.get('ssl-certificate'), \
ssl_options_dict.get('ssl-key'), \
ssl_options_dict.get('ssl-trustfile'), \
ssl_options_dict.get('ssl-password')
if not (certificate or trustfile):
return None
domain = SSLDomain(mode)
if trustfile:
domain.set_trusted_ca_db(str(trustfile))
domain.set_peer_authentication(SSLDomain.VERIFY_PEER, str(trustfile))
if certificate:
domain.set_credentials(str(certificate), str(key), str(password))
return domain
class QdSSLUseridProxy(QdSSLUseridTest):
def test_message_user_id_proxy_bad_name_disallowed(self):
ssl_opts = dict()
ssl_opts['ssl-trustfile'] = self.ssl_file('ca-certificate.pem')
ssl_opts['ssl-certificate'] = self.ssl_file('client-certificate.pem')
ssl_opts['ssl-key'] = self.ssl_file('client-private-key.pem')
ssl_opts['ssl-password'] = 'client-password'
# create the SSL domain object
domain = self.create_ssl_domain(ssl_opts)
# Send a message with bad user_id. This message should be rejected.
# Connection has user_id 'user13'.
addr = self.address(13).replace("amqp", "amqps")
blocking_connection = BlockingConnection(addr, ssl_domain=domain)
blocking_sender = blocking_connection.create_sender("$management")
request = proton.Message()
request.user_id = BINARY("bad-user-id")
result = Delivery.ACCEPTED
try:
delivery = blocking_sender.send(request, timeout=10)
result = delivery.remote_state
except proton.utils.SendException as e:
result = e.state
self.assertTrue(result == Delivery.REJECTED,
"Router accepted a message with user_id that did not match connection user_id")
def test_message_user_id_proxy_zzz_credit_handled(self):
# Test for DISPATCH-519. Make sure the REJECTED messages result
# in the client receiving credit.
credit_limit = 250 # router issues 250 credits
ssl_opts = dict()
ssl_opts['ssl-trustfile'] = self.ssl_file('ca-certificate.pem')
ssl_opts['ssl-certificate'] = self.ssl_file('client-certificate.pem')
ssl_opts['ssl-key'] = self.ssl_file('client-private-key.pem')
ssl_opts['ssl-password'] = 'client-password'
# create the SSL domain object
domain = self.create_ssl_domain(ssl_opts)
# Send a message with bad user_id. This message should be rejected.
# Connection has user_id 'user13'.
addr = self.address(13).replace("amqp", "amqps")
blocking_connection = BlockingConnection(addr, ssl_domain=domain)
blocking_sender = blocking_connection.create_sender("$management")
request = proton.Message()
request.user_id = BINARY("bad-user-id")
for i in range(0, credit_limit + 1):
result = Delivery.ACCEPTED
try:
delivery = blocking_sender.send(request, timeout=10)
result = delivery.remote_state
except proton.utils.SendException as e:
result = e.state
except proton.utils.Timeout as e:
self.fail("Timed out waiting for send credit")
self.assertTrue(result == Delivery.REJECTED,
"Router accepted a message with user_id that did not match connection user_id")
if __name__ == '__main__':
unittest.main(main_module())
| |
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
# http://aws.amazon.com/apache2.0/
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import struct
import unicodedata
import colorama
from awscli.compat import six
def get_text_length(text):
# `len(unichar)` measures the number of characters, so we use
# `unicodedata.east_asian_width` to measure the length of characters.
# Following responses are considered to be full-width length.
# * A(Ambiguous)
# * F(Fullwidth)
# * W(Wide)
text = six.text_type(text)
return sum(2 if unicodedata.east_asian_width(char) in 'WFA' else 1
for char in text)
def determine_terminal_width(default_width=80):
# If we can't detect the terminal width, the default_width is returned.
try:
from termios import TIOCGWINSZ
from fcntl import ioctl
except ImportError:
return default_width
try:
height, width = struct.unpack('hhhh', ioctl(sys.stdout,
TIOCGWINSZ, '\000' * 8))[0:2]
except Exception:
return default_width
else:
return width
def is_a_tty():
try:
return os.isatty(sys.stdout.fileno())
except Exception:
return False
def center_text(text, length=80, left_edge='|', right_edge='|',
text_length=None):
"""Center text with specified edge chars.
You can pass in the length of the text as an arg, otherwise it is computed
automatically for you. This can allow you to center a string not based
on it's literal length (useful if you're using ANSI codes).
"""
# postcondition: get_text_length(returned_text) == length
if text_length is None:
text_length = get_text_length(text)
output = []
char_start = (length // 2) - (text_length // 2) - 1
output.append(left_edge + ' ' * char_start + text)
length_so_far = get_text_length(left_edge) + char_start + text_length
right_side_spaces = length - get_text_length(right_edge) - length_so_far
output.append(' ' * right_side_spaces)
output.append(right_edge)
final = ''.join(output)
return final
def align_left(text, length, left_edge='|', right_edge='|', text_length=None,
left_padding=2):
"""Left align text."""
# postcondition: get_text_length(returned_text) == length
if text_length is None:
text_length = get_text_length(text)
computed_length = (
text_length + left_padding + \
get_text_length(left_edge) + get_text_length(right_edge))
if length - computed_length >= 0:
padding = left_padding
else:
padding = 0
output = []
length_so_far = 0
output.append(left_edge)
length_so_far += len(left_edge)
output.append(' ' * padding)
length_so_far += padding
output.append(text)
length_so_far += text_length
output.append(' ' * (length - length_so_far - len(right_edge)))
output.append(right_edge)
return ''.join(output)
def convert_to_vertical_table(sections):
# Any section that only has a single row is
# inverted, so:
# header1 | header2 | header3
# val1 | val2 | val2
#
# becomes:
#
# header1 | val1
# header2 | val2
# header3 | val3
for i, section in enumerate(sections):
if len(section.rows) == 1 and section.headers:
headers = section.headers
new_section = Section()
new_section.title = section.title
new_section.indent_level = section.indent_level
for header, element in zip(headers, section.rows[0]):
new_section.add_row([header, element])
sections[i] = new_section
class IndentedStream(object):
def __init__(self, stream, indent_level, left_indent_char='|',
right_indent_char='|'):
self._stream = stream
self._indent_level = indent_level
self._left_indent_char = left_indent_char
self._right_indent_char = right_indent_char
def write(self, text):
self._stream.write(self._left_indent_char * self._indent_level)
if text.endswith('\n'):
self._stream.write(text[:-1])
self._stream.write(self._right_indent_char * self._indent_level)
self._stream.write('\n')
else:
self._stream.write(text)
def __getattr__(self, attr):
return getattr(self._stream, attr)
class Styler(object):
def style_title(self, text):
return text
def style_header_column(self, text):
return text
def style_row_element(self, text):
return text
def style_indentation_char(self, text):
return text
class ColorizedStyler(Styler):
def __init__(self):
# `autoreset` allows us to not have to sent reset sequences for every
# string. `strip` lets us preserve color when redirecting.
colorama.init(autoreset=True, strip=False)
def style_title(self, text):
# Originally bold + underline
return text
#return colorama.Style.BOLD + text + colorama.Style.RESET_ALL
def style_header_column(self, text):
# Originally underline
return text
def style_row_element(self, text):
return (colorama.Style.BRIGHT + colorama.Fore.BLUE +
text + colorama.Style.RESET_ALL)
def style_indentation_char(self, text):
return (colorama.Style.DIM + colorama.Fore.YELLOW +
text + colorama.Style.RESET_ALL)
class MultiTable(object):
def __init__(self, terminal_width=None, initial_section=True,
column_separator='|', terminal=None,
styler=None, auto_reformat=True):
self._auto_reformat = auto_reformat
if initial_section:
self._current_section = Section()
self._sections = [self._current_section]
else:
self._current_section = None
self._sections = []
if styler is None:
# Move out to factory.
if is_a_tty():
self._styler = ColorizedStyler()
else:
self._styler = Styler()
else:
self._styler = styler
self._rendering_index = 0
self._column_separator = column_separator
if terminal_width is None:
self._terminal_width = determine_terminal_width()
def add_title(self, title):
self._current_section.add_title(title)
def add_row_header(self, headers):
self._current_section.add_header(headers)
def add_row(self, row_elements):
self._current_section.add_row(row_elements)
def new_section(self, title, indent_level=0):
self._current_section = Section()
self._sections.append(self._current_section)
self._current_section.add_title(title)
self._current_section.indent_level = indent_level
def render(self, stream):
max_width = self._calculate_max_width()
should_convert_table = self._determine_conversion_needed(max_width)
if should_convert_table:
convert_to_vertical_table(self._sections)
max_width = self._calculate_max_width()
stream.write('-' * max_width + '\n')
for section in self._sections:
self._render_section(section, max_width, stream)
def _determine_conversion_needed(self, max_width):
# If we don't know the width of the controlling terminal,
# then we don't try to resize the table.
if max_width > self._terminal_width:
return self._auto_reformat
def _calculate_max_width(self):
max_width = max(s.total_width(padding=4, with_border=True,
outer_padding=s.indent_level)
for s in self._sections)
return max_width
def _render_section(self, section, max_width, stream):
stream = IndentedStream(stream, section.indent_level,
self._styler.style_indentation_char('|'),
self._styler.style_indentation_char('|'))
max_width -= (section.indent_level * 2)
self._render_title(section, max_width, stream)
self._render_column_titles(section, max_width, stream)
self._render_rows(section, max_width, stream)
def _render_title(self, section, max_width, stream):
# The title consists of:
# title : | This is the title |
# bottom_border: ----------------------------
if section.title:
title = self._styler.style_title(section.title)
stream.write(center_text(title, max_width, '|', '|',
get_text_length(section.title)) + '\n')
if not section.headers and not section.rows:
stream.write('+%s+' % ('-' * (max_width - 2)) + '\n')
def _render_column_titles(self, section, max_width, stream):
if not section.headers:
return
# In order to render the column titles we need to know
# the width of each of the columns.
widths = section.calculate_column_widths(padding=4,
max_width=max_width)
# TODO: Built a list instead of +=, it's more efficient.
current = ''
length_so_far = 0
# The first cell needs both left and right edges '| foo |'
# while subsequent cells only need right edges ' foo |'.
first = True
for width, header in zip(widths, section.headers):
stylized_header = self._styler.style_header_column(header)
if first:
left_edge = '|'
first = False
else:
left_edge = ''
current += center_text(text=stylized_header, length=width,
left_edge=left_edge, right_edge='|',
text_length=get_text_length(header))
length_so_far += width
self._write_line_break(stream, widths)
stream.write(current + '\n')
def _write_line_break(self, stream, widths):
# Write out something like:
# +-------+---------+---------+
parts = []
first = True
for width in widths:
if first:
parts.append('+%s+' % ('-' * (width - 2)))
first = False
else:
parts.append('%s+' % ('-' * (width - 1)))
parts.append('\n')
stream.write(''.join(parts))
def _render_rows(self, section, max_width, stream):
if not section.rows:
return
widths = section.calculate_column_widths(padding=4,
max_width=max_width)
if not widths:
return
self._write_line_break(stream, widths)
for row in section.rows:
# TODO: Built the string in a list then join instead of using +=,
# it's more efficient.
current = ''
length_so_far = 0
first = True
for width, element in zip(widths, row):
if first:
left_edge = '|'
first = False
else:
left_edge = ''
stylized = self._styler.style_row_element(element)
current += align_left(text=stylized, length=width,
left_edge=left_edge,
right_edge=self._column_separator,
text_length=get_text_length(element))
length_so_far += width
stream.write(current + '\n')
self._write_line_break(stream, widths)
class Section(object):
def __init__(self):
self.title = ''
self.headers = []
self.rows = []
self.indent_level = 0
self._num_cols = None
self._max_widths = []
def __repr__(self):
return ("Section(title=%s, headers=%s, indent_level=%s, num_rows=%s)" %
(self.title, self.headers, self.indent_level, len(self.rows)))
def calculate_column_widths(self, padding=0, max_width=None):
# postcondition: sum(widths) == max_width
unscaled_widths = [w + padding for w in self._max_widths]
if max_width is None:
return unscaled_widths
if not unscaled_widths:
return unscaled_widths
else:
# Compute scale factor for max_width.
scale_factor = max_width / float(sum(unscaled_widths))
scaled = [int(round(scale_factor * w)) for w in unscaled_widths]
# Once we've scaled the columns, we may be slightly over/under
# the amount we need so we have to adjust the columns.
off_by = sum(scaled) - max_width
while off_by != 0:
iter_order = range(len(scaled))
if off_by < 0:
iter_order = reversed(iter_order)
for i in iter_order:
if off_by > 0:
scaled[i] -= 1
off_by -= 1
else:
scaled[i] += 1
off_by += 1
if off_by == 0:
break
return scaled
def total_width(self, padding=0, with_border=False, outer_padding=0):
total = 0
# One char on each side == 2 chars total to the width.
border_padding = 2
for w in self.calculate_column_widths():
total += w + padding
if with_border:
total += border_padding
total += outer_padding + outer_padding
return max(get_text_length(self.title) + border_padding + outer_padding +
outer_padding, total)
def add_title(self, title):
self.title = title
def add_header(self, headers):
self._update_max_widths(headers)
if self._num_cols is None:
self._num_cols = len(headers)
self.headers = self._format_headers(headers)
def _format_headers(self, headers):
return headers
def add_row(self, row):
if self._num_cols is None:
self._num_cols = len(row)
if len(row) != self._num_cols:
raise ValueError("Row should have %s elements, instead "
"it has %s" % (self._num_cols, len(row)))
row = self._format_row(row)
self.rows.append(row)
self._update_max_widths(row)
def _format_row(self, row):
return [six.text_type(r) for r in row]
def _update_max_widths(self, row):
if not self._max_widths:
self._max_widths = [get_text_length(el) for el in row]
else:
for i, el in enumerate(row):
self._max_widths[i] = max(get_text_length(el), self._max_widths[i])
| |
from django.db import connection
from django.conf import settings
import numpy as np
import yaml
import simplejson as json
from operator import itemgetter
import requests
import logging
logger = logging.getLogger(__name__)
def custom_sql(query, params):
cursor = connection.cursor()
cursor.execute(query, params)
data = cursor.fetchall()
return data
def sql_command(query, params):
cursor = connection.cursor()
cursor.execute(query, params)
return
def get_assay_meta(assay_1):
#assay_1 = assay_page.keys()[0]
metas = custom_sql("""
SELECT DISTINCT ass.chembl_id, dcs.pubmed_id, cs.accession, td.pref_name,
ass.description
FROM activities act
JOIN docs dcs
ON act.doc_id = dcs.doc_id
JOIN assays ass
ON ass.assay_id = act.assay_id
JOIN target_dictionary td
ON td.tid = ass.tid
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON tc.component_id = cs.component_id
WHERE ass.chembl_id = %s
""", [assay_1])
component_d = {}
pref_name_d = {}
pubmed_d = {}
assay_page = {}
for meta in metas:
assay_id = str(meta[0])
pubmed_id = meta[1]
uniprot = meta[2]
pref_name = meta[3]
description = meta[4]
try:
assay_page['components'].append(uniprot)
except KeyError:
assay_page['components'] = [uniprot]
assay_page['pref_name']= pref_name
assay_page['pubmed'] = pubmed_id
assay_page['description'] = description
metas = custom_sql("""
SELECT ass.chembl_id, pm.comment, pm.timestamp, pm.submitter
FROM activities act
JOIN assays ass
ON ass.assay_id = act.assay_id
JOIN pfam_maps pm
ON pm.activity_id = act.activity_id
WHERE ass.chembl_id = %s LIMIT 1
""", [assay_1])
assay_id = metas[0][0]
assay_page['comment'] = metas[0][1]
assay_page['timestamp'] = metas[0][2]
assay_page['submitter'] = metas[0][3]
return assay_page
def get_pfam_arch(assay_page):
for uniprot in assay_page['components']:
logger.info(uniprot)
r = requests.get('http://pfam.xfam.org/protein/%s/graphic'% uniprot)
logger.info(r.status_code)
doms = r.content
try:
json.loads(doms)
except ValueError:
logger.warning('No graphic for %s', uniprot)
doms = []
#doms = '[{"length":"950","regions":[{"colour":"#2dcf00", "endStyle":"jagged","end":"361","startStyle":"jagged","text":"Peptidase_S8","href":"/family/PF00082","type":"pfama","start": "159"},]}]' #this would be an example of a pfam-architecture obtained in this way.
try:
assay_page['pfam_archs'].append(doms)
except KeyError:
assay_page['pfam_archs']=[doms]
return assay_page
def perc(x,y):
"""
Return a formatted version of x and a percentage of x over y.
"""
x_form = "{:,}".format(x)
y_form = "{0:.2f}".format(100 * np.true_divide(x, y))
return x_form, y_form
def doi2json(doi):
"""
Return a bibTeX string of metadata for a given DOI.
"""
url = "http://www.ebi.ac.uk/europepmc/webservices/rest/search/query=%s&format=json" % doi
r = requests.get(url)
if r.status_code != 200:
return {'doi':doi}
else:
cit = json.loads(r.text)
try:
cit = cit['resultList']['result'][0] # only taking the first result should be fine with DOIs...
except IndexError:
return {'doi':doi}
return cit
def standardize_acts(acts):
std_acts = []
lkp = {}
for data in acts:
try:
standard_value = float(data[0])
except TypeError:
continue
pass_filter = False # set filter
standard_units = data[1]
standard_type = data[2]
act_id = data[3]
molregno = data[4]
accession = data[5]
# p-scaling.
if standard_type in ['Ki','Kd','IC50','EC50', 'AC50'] and standard_units == 'nM':
standard_value = -(np.log10(standard_value)-9)
standard_type = 'p' + standard_type
pass_filter = True
# p-scaling.
if standard_type in ['log Ki', 'log Kd', 'log IC50', 'log EC50', 'logAC50'] and standard_units is None:
standard_value = - standard_value
standard_type = 'p' + standard_type.split(' ')[1]
pass_filter = True
# adjusting Ki and Kd by Kalliokoski's factor, specified in settings.py
if standard_type in ['pKi', 'pKd']:
standard_value = standard_value - settings.GLOBAL_SETTINGS['ki_adjust']
pass_filter = True
# Filtering inactives.
if standard_value >= 3 and pass_filter:
std_acts.append((molregno, standard_value, accession, act_id))
try:
lkp[molregno] += 1
except KeyError:
lkp[molregno] = 1
return (std_acts, lkp)
def add_meta(top_acts):
if not top_acts:
return top_acts
act_ids = [x[3] for x in top_acts]
placeholder = "%s"
placeholder = ','.join([placeholder] * len(act_ids))
query = """
SELECT act.activity_id, td.pref_name, ass.chembl_id, td
.chembl_id, md.chembl_id
FROM activities act
JOIN assays ass
ON act.assay_id = ass.assay_id
JOIN target_dictionary td
ON ass.tid = td.tid
JOIN molecule_dictionary md
ON md.molregno = act.molregno
WHERE activity_id IN(%s)""" % placeholder
data = custom_sql(query, act_ids)
lkp = {}
for meta in data:
act = meta[0]
lkp[act] = (meta[1], meta[2], meta[3], meta[4])
top_acts = [x + lkp[x[3]] for x in top_acts]
return top_acts
def filter_acts(std_acts, lkp):
top_mols = [key for key,value in sorted(lkp.items(), key=itemgetter(1), reverse = True)][:10]
top_acts = [x for x in std_acts if x[0] in top_mols]
top_acts = add_meta(top_acts)
top_mols = json.dumps(top_mols)
top_acts = json.dumps(top_acts)
return(top_mols, top_acts)
def process_arch(data):
lkp = {}
for clash in data:
act_id = clash[0]
dom = str(clash[1])
try:
lkp[act_id] = ' vs. '.join(sorted([lkp[act_id], dom]))
except KeyError:
lkp[act_id] = dom
inv_lkp = dictinvert(lkp)
return inv_lkp
def arch_assays(data):
lkp = {}
for clash in data:
assay_id = clash[1]
dom = clash[0]
try:
lkp[assay_id] = ' vs. '.join(sorted([lkp[assay_id], dom]))
except KeyError:
lkp[assay_id] = dom
inv_lkp = dictinvert(lkp)
return inv_lkp
def mapped_dom(data):
mapped_dom = {}
for row in data:
assay_id = row[1]
status = row[2]
if status == 0:
dom = row[0]
mapped_dom[assay_id]=dom
return mapped_dom
def dictinvert(d):
inv = {}
for k, v in d.iteritems():
try:
inv[v].append(k)
except KeyError:
inv[v] = [k]
return inv
def group_acts(arch_assays, data):
assay_hier = {}
for clash in data:
assay_id = clash[1]
if assay_id in arch_assays and not assay_id in assay_hier.keys():
description = clash[2]
assay_hier[assay_id] = {'description':description}
return assay_hier
def count_votes(dom_l, arch_acts):
domstr = "','".join(dom_l)
counts = custom_sql("""
SELECT activity_id, domain_name
FROM pfam_maps
WHERE manual_flag = 1 AND domain_name IN('%s')
"""%domstr, [])
doms = {}
for act in arch_acts:
doms[act]={}
for dom in domL:
doms[act][dom]=0
for ent in counts:
dom = ent[1]
act = ent[0]
try:
doms[act][dom] +=1
except KeyError:
pass
return doms
# Currently not in use.
#def get_arch(data):
# trc = []
# #[(domain_name, start, end), ...]
# for ent in data:
#
# domain_name = ent[6]
# start = ent[4]
# end = ent[5]
# trc.append((domain_name, start, end))
# return trc
#def process_acts(arch_acts, data):
# arch_mols = {}
# for clash in data:
# act_id = clash[0]
# if act_id in arch_acts:
# molregno = clash[2]
# try:
# arch_mols[molregno][act_id]=0
# except KeyError:
# arch_mols[molregno] = {act_id:0}
# return arch_mols
| |
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
# Note: function here are deprecated. We don't call the new versions because
# the API slightly changes (namely partial_dependence does not have the grid
# parameter anymore.)
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from joblib import Parallel, delayed
from ..utils.extmath import cartesian
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..tree._tree import DTYPE
from ..utils import deprecated
from .gradient_boosting import BaseGradientBoosting
__all__ = [
'partial_dependence',
'plot_partial_dependence',
]
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
@deprecated("The function ensemble.partial_dependence has been deprecated "
"in favour of inspection.partial_dependence in 0.21 "
"and will be removed in 0.23.")
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
.. deprecated:: 0.21
This function was deprecated in version 0.21 in favor of
:func:`sklearn.inspection.partial_dependence` and will be
removed in 0.23.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependency should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependency should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features_) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features_ - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
tree.compute_partial_dependence(grid, target_variables, pdp[k])
pdp *= gbrt.learning_rate
return pdp, axes
@deprecated("The function ensemble.plot_partial_dependence has been "
"deprecated in favour of "
"sklearn.inspection.plot_partial_dependence in "
" 0.21 and will be removed in 0.23.")
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=None,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
.. deprecated:: 0.21
This function was deprecated in version 0.21 in favor of
:func:`sklearn.inspection.plot_partial_dependence` and will be
removed in 0.23.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of ints, strings, or tuples of ints or strings
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
If feature_names is specified and seq[i] is an int, seq[i]
must be < len(feature_names).
If seq[i] is a string, feature_names must be specified, and
seq[i] must be in feature_names.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
grid_resolution : int, default=100
The number of equally spaced points on the axes.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
n_jobs : int or None, optional (default=None)
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
**fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features_ != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features_')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features_)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, str):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| |
"""CherryPy tools. A "tool" is any helper, adapted to CP.
Tools are usually designed to be used in a variety of ways (although some
may only offer one if they choose):
Library calls
All tools are callables that can be used wherever needed.
The arguments are straightforward and should be detailed within the
docstring.
Function decorators
All tools, when called, may be used as decorators which configure
individual CherryPy page handlers (methods on the CherryPy tree).
That is, "@tools.anytool()" should "turn on" the tool via the
decorated function's _cp_config attribute.
CherryPy config
If a tool exposes a "_setup" callable, it will be called
once per Request (if the feature is "turned on" via config).
Tools may be implemented as any object with a namespace. The builtins
are generally either modules or instances of the tools.Tool class.
"""
import sys
import warnings
import cherrypy
def _getargs(func):
"""Return the names of all static arguments to the given function."""
# Use this instead of importing inspect for less mem overhead.
import types
if sys.version_info >= (3, 0):
if isinstance(func, types.MethodType):
func = func.__func__
co = func.__code__
else:
if isinstance(func, types.MethodType):
func = func.im_func
co = func.func_code
return co.co_varnames[:co.co_argcount]
_attr_error = (
"CherryPy Tools cannot be turned on directly. Instead, turn them "
"on via config, or use them as decorators on your page handlers."
)
class Tool(object):
"""A registered function for use with CherryPy request-processing hooks.
help(tool.callable) should give you more information about this Tool.
"""
namespace = "tools"
def __init__(self, point, callable, name=None, priority=50):
self._point = point
self.callable = callable
self._name = name
self._priority = priority
self.__doc__ = self.callable.__doc__
self._setargs()
def _get_on(self):
raise AttributeError(_attr_error)
def _set_on(self, value):
raise AttributeError(_attr_error)
on = property(_get_on, _set_on)
def _setargs(self):
"""Copy func parameter names to obj attributes."""
try:
for arg in _getargs(self.callable):
setattr(self, arg, None)
except (TypeError, AttributeError):
if hasattr(self.callable, "__call__"):
for arg in _getargs(self.callable.__call__):
setattr(self, arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _merged_args(self, d=None):
"""Return a dict of configuration entries for this Tool."""
if d:
conf = d.copy()
else:
conf = {}
tm = cherrypy.serving.request.toolmaps[self.namespace]
if self._name in tm:
conf.update(tm[self._name])
if "on" in conf:
del conf["on"]
return conf
def __call__(self, *args, **kwargs):
"""Compile-time decorator (turn on the tool in config).
For example::
@tools.proxy()
def whats_my_base(self):
return cherrypy.request.base
whats_my_base.exposed = True
"""
if args:
raise TypeError("The %r Tool does not accept positional "
"arguments; you must use keyword arguments."
% self._name)
def tool_decorator(f):
if not hasattr(f, "_cp_config"):
f._cp_config = {}
subspace = self.namespace + "." + self._name + "."
f._cp_config[subspace + "on"] = True
for k, v in kwargs.items():
f._cp_config[subspace + k] = v
return f
return tool_decorator
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self.callable,
priority=p, **conf)
class HandlerTool(Tool):
"""Tool which is called 'before main', that may skip normal handlers.
If the tool successfully handles the request (by setting response.body),
if should return True. This will cause CherryPy to skip any 'normal' page
handler. If the tool did not handle the request, it should return False
to tell CherryPy to continue on and call the normal page handler. If the
tool is declared AS a page handler (see the 'handler' method), returning
False will raise NotFound.
"""
def __init__(self, callable, name=None):
Tool.__init__(self, 'before_handler', callable, name)
def handler(self, *args, **kwargs):
"""Use this tool as a CherryPy page handler.
For example::
class Root:
nav = tools.staticdir.handler(section="/nav", dir="nav",
root=absDir)
"""
def handle_func(*a, **kw):
handled = self.callable(*args, **self._merged_args(kwargs))
if not handled:
raise cherrypy.NotFound()
return cherrypy.serving.response.body
handle_func.exposed = True
return handle_func
def _wrapper(self, **kwargs):
if self.callable(**kwargs):
cherrypy.serving.request.handler = None
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self._wrapper,
priority=p, **conf)
class HandlerWrapperTool(Tool):
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
'next_handler' argument, plus ``*args`` and ``**kwargs``. Like all
page handler
functions, it must return an iterable for use as cherrypy.response.body.
For example, to allow your 'inner' page handlers to return dicts
which then get interpolated into a template::
def interpolator(next_handler, *args, **kwargs):
filename = cherrypy.request.config.get('template')
cherrypy.response.template = env.get_template(filename)
response_dict = next_handler(*args, **kwargs)
return cherrypy.response.template.render(**response_dict)
cherrypy.tools.jinja = HandlerWrapperTool(interpolator)
"""
def __init__(self, newhandler, point='before_handler', name=None,
priority=50):
self.newhandler = newhandler
self._point = point
self._name = name
self._priority = priority
def callable(self, debug=False):
innerfunc = cherrypy.serving.request.handler
def wrap(*args, **kwargs):
return self.newhandler(innerfunc, *args, **kwargs)
cherrypy.serving.request.handler = wrap
class ErrorTool(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
from cherrypy.lib import cptools, encoding, auth, static, jsontools
from cherrypy.lib import sessions as _sessions, xmlrpcutil as _xmlrpc
from cherrypy.lib import caching as _caching
from cherrypy.lib import auth_basic, auth_digest
class SessionTool(Tool):
"""Session Tool for CherryPy.
sessions.locking
When 'implicit' (the default), the session will be locked for you,
just before running the page handler.
When 'early', the session will be locked before reading the request
body. This is off by default for safety reasons; for example,
a large upload would block the session, denying an AJAX
progress meter
(`issue <https://bitbucket.org/cherrypy/cherrypy/issue/630>`_).
When 'explicit' (or any other value), you need to call
cherrypy.session.acquire_lock() yourself before using
session data.
"""
def __init__(self):
# _sessions.init must be bound after headers are read
Tool.__init__(self, 'before_request_body', _sessions.init)
def _lock_session(self):
cherrypy.serving.session.acquire_lock()
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
hooks.attach(self._point, self.callable, priority=p, **conf)
locking = conf.pop('locking', 'implicit')
if locking == 'implicit':
hooks.attach('before_handler', self._lock_session)
elif locking == 'early':
# Lock before the request body (but after _sessions.init runs!)
hooks.attach('before_request_body', self._lock_session,
priority=60)
else:
# Don't lock
pass
hooks.attach('before_finalize', _sessions.save)
hooks.attach('on_end_request', _sessions.close)
def regenerate(self):
"""Drop the current session and make a new one (with a new id)."""
sess = cherrypy.serving.session
sess.regenerate()
# Grab cookie-relevant tool args
conf = dict([(k, v) for k, v in self._merged_args().items()
if k in ('path', 'path_header', 'name', 'timeout',
'domain', 'secure')])
_sessions.set_response_cookie(**conf)
class XMLRPCController(object):
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
turn on the tool for you).
You can also supply the following optional config entries::
tools.xmlrpc.encoding: 'utf-8'
tools.xmlrpc.allow_none: 0
XML-RPC is a rather discontinuous layer over HTTP; dispatching to the
appropriate handler must first be performed according to the URL, and
then a second dispatch step must take place according to the RPC method
specified in the request body. It also allows a superfluous "/RPC2"
prefix in the URL, supplies its own handler args in the body, and
requires a 200 OK "Fault" response instead of 404 when the desired
method is not found.
Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone.
This Controller acts as the dispatch target for the first half (based
on the URL); it then reads the RPC method from the request body and
does its own second dispatch step based on that method. It also reads
body params, and returns a Fault on error.
The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2
in your URL's, you can safely skip turning on the XMLRPCDispatcher.
Otherwise, you need to use declare it in config::
request.dispatch: cherrypy.dispatch.XMLRPCDispatcher()
"""
# Note we're hard-coding this into the 'tools' namespace. We could do
# a huge amount of work to make it relocatable, but the only reason why
# would be if someone actually disabled the default_toolbox. Meh.
_cp_config = {'tools.xmlrpc.on': True}
def default(self, *vpath, **params):
rpcparams, rpcmethod = _xmlrpc.process_body()
subhandler = self
for attr in str(rpcmethod).split('.'):
subhandler = getattr(subhandler, attr, None)
if subhandler and getattr(subhandler, "exposed", False):
body = subhandler(*(vpath + rpcparams), **params)
else:
# https://bitbucket.org/cherrypy/cherrypy/issue/533
# if a method is not found, an xmlrpclib.Fault should be returned
# raising an exception here will do that; see
# cherrypy.lib.xmlrpcutil.on_error
raise Exception('method "%s" is not supported' % attr)
conf = cherrypy.serving.request.toolmaps['tools'].get("xmlrpc", {})
_xmlrpc.respond(body,
conf.get('encoding', 'utf-8'),
conf.get('allow_none', 0))
return cherrypy.serving.response.body
default.exposed = True
class SessionAuthTool(HandlerTool):
def _setargs(self):
for name in dir(cptools.SessionAuth):
if not name.startswith("__"):
setattr(self, name, None)
class CachingTool(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach('before_finalize', _caching.tee_output,
priority=90)
_wrapper.priority = 20
def _setup(self):
"""Hook caching into cherrypy.request."""
conf = self._merged_args()
p = conf.pop("priority", None)
cherrypy.serving.request.hooks.attach('before_handler', self._wrapper,
priority=p, **conf)
class Toolbox(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes dict.
"""
def __init__(self, namespace):
self.namespace = namespace
def __setattr__(self, name, value):
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split(".", 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get("on", False):
tool = getattr(self, name)
tool._setup()
class DeprecatedTool(Tool):
_name = None
warnmsg = "This Tool is deprecated."
def __init__(self, point, warnmsg=None):
self.point = point
if warnmsg is not None:
self.warnmsg = warnmsg
def __call__(self, *args, **kwargs):
warnings.warn(self.warnmsg)
def tool_decorator(f):
return f
return tool_decorator
def _setup(self):
warnings.warn(self.warnmsg)
default_toolbox = _d = Toolbox("tools")
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.allow = Tool('on_start_resource', cptools.allow)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.tidy = DeprecatedTool(
'before_finalize',
"The tidy tool has been removed from the standard distribution of "
"CherryPy. The most recent version can be found at "
"http://tools.cherrypy.org/browser.")
_d.nsgmls = DeprecatedTool(
'before_finalize',
"The nsgmls tool has been removed from the standard distribution of "
"CherryPy. The most recent version can be found at "
"http://tools.cherrypy.org/browser.")
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.basic_auth = Tool('on_start_resource', auth.basic_auth)
_d.digest_auth = Tool('on_start_resource', auth.digest_auth)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
del _d, cptools, encoding, auth, static
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualWansOperations(object):
"""VirtualWansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualWAN"
"""Retrieves the details of a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being retrieved.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWAN, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VirtualWAN
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
wan_parameters, # type: "_models.VirtualWAN"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualWAN"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'VirtualWAN')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
wan_parameters, # type: "_models.VirtualWAN"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualWAN"]
"""Creates a VirtualWAN resource if it doesn't exist else updates the existing VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being created or updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to create or update VirtualWAN.
:type wan_parameters: ~azure.mgmt.network.v2020_11_01.models.VirtualWAN
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualWAN or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.VirtualWAN]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
wan_parameters=wan_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
wan_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualWAN"
"""Updates a VirtualWAN tags.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being updated.
:type virtual_wan_name: str
:param wan_parameters: Parameters supplied to Update VirtualWAN tags.
:type wan_parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualWAN, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VirtualWAN
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualWAN"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(wan_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualWAN', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_wan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualWAN.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWAN being deleted.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'VirtualWANName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{VirtualWANName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualWANsResult"]
"""Lists all the VirtualWANs in a resource group.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualWANsResult"]
"""Lists all the VirtualWANs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualWANsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.ListVirtualWANsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualWANsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualWANsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualWans'} # type: ignore
| |
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a utility for converting JavaScript source code into C-style
# char arrays. It is used for embedded JavaScript code in the V8
# library.
import os, re, sys, string
import optparse
import jsmin
import bz2
import textwrap
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
def ToCArray(byte_sequence):
result = []
for chr in byte_sequence:
result.append(str(ord(chr)))
joined = ", ".join(result)
return textwrap.fill(joined, 80)
def RemoveCommentsAndTrailingWhitespace(lines):
lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
return lines
def ReadFile(filename):
file = open(filename, "rt")
try:
lines = file.read()
finally:
file.close()
return lines
EVAL_PATTERN = re.compile(r'\beval\s*\(')
WITH_PATTERN = re.compile(r'\bwith\s*\(')
def Validate(lines):
# Because of simplified context setup, eval and with is not
# allowed in the natives files.
if EVAL_PATTERN.search(lines):
raise Error("Eval disallowed in natives.")
if WITH_PATTERN.search(lines):
raise Error("With statements disallowed in natives.")
# Pass lines through unchanged.
return lines
def ExpandConstants(lines, constants):
for key, value in constants:
lines = key.sub(str(value), lines)
return lines
def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
pattern_match = name_pattern.search(lines, pos)
while pattern_match is not None:
# Scan over the arguments
height = 1
start = pattern_match.start()
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = [0] # Wrap state into array, to work around Python "scoping"
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
while end < len(lines) and height > 0:
# We don't count commas at higher nesting levels.
if lines[end] == ',' and height == 1:
add_arg(lines[last_match:end])
last_match = end + 1
elif lines[end] in ['(', '{', '[']:
height = height + 1
elif lines[end] in [')', '}', ']']:
height = height - 1
end = end + 1
# Remember to add the last match.
add_arg(lines[last_match:end-1])
result = macro.expand(mapping)
# Replace the occurrence of the macro with the expansion
lines = lines[:start] + result + lines[end:]
pattern_match = name_pattern.search(lines, start + len(result))
return lines
def ExpandMacros(lines, macros):
# We allow macros to depend on the previously declared macros, but
# we don't allow self-dependecies or recursion.
for name_pattern, macro in reversed(macros):
def expander(s):
return ExpandMacros(s, macros)
lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
return lines
class TextMacro:
def __init__(self, args, body):
self.args = args
self.body = body
def expand(self, mapping):
result = self.body
for key, value in mapping.items():
result = result.replace(key, value)
return result
class PythonMacro:
def __init__(self, args, fun):
self.args = args
self.fun = fun
def expand(self, mapping):
args = []
for arg in self.args:
args.append(mapping[arg])
return str(self.fun(*args))
CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = []
macros = []
for line in lines.split('\n'):
hash = line.find('#')
if hash != -1: line = line[:hash]
line = line.strip()
if len(line) is 0: continue
const_match = CONST_PATTERN.match(line)
if const_match:
name = const_match.group(1)
value = const_match.group(2).strip()
constants.append((re.compile("\\b%s\\b" % name), value))
else:
macro_match = MACRO_PATTERN.match(line)
if macro_match:
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
body = macro_match.group(3).strip()
macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
else:
python_match = PYTHON_MACRO_PATTERN.match(line)
if python_match:
name = python_match.group(1)
args = [match.strip() for match in python_match.group(2).split(',')]
body = python_match.group(3).strip()
fun = eval("lambda " + ",".join(args) + ': ' + body)
macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun)))
else:
raise Error("Illegal line: " + line)
return (constants, macros)
INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
def ExpandInlineMacros(lines):
pos = 0
while True:
macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
if macro_match is None:
# no more macros
return lines
name = macro_match.group(1)
args = [match.strip() for match in macro_match.group(2).split(',')]
end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
if end_macro_match is None:
raise Error("Macro %s unclosed" % name)
body = lines[macro_match.end():end_macro_match.start()]
# remove macro definition
lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
name_pattern = re.compile("\\b%s\\(" % name)
macro = TextMacro(args, body)
# advance position to where the macro defintion was
pos = macro_match.start()
def non_expander(s):
return s
lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
INLINE_CONSTANT_PATTERN = re.compile(r'const\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]')
def ExpandInlineConstants(lines):
pos = 0
while True:
const_match = INLINE_CONSTANT_PATTERN.search(lines, pos)
if const_match is None:
# no more constants
return lines
name = const_match.group(1)
replacement = const_match.group(2)
name_pattern = re.compile("\\b%s\\b" % name)
# remove constant definition and replace
lines = (lines[:const_match.start()] +
re.sub(name_pattern, replacement, lines[const_match.end():]))
# advance position to where the constant defintion was
pos = const_match.start()
HEADER_TEMPLATE = """\
// Copyright 2011 Google Inc. All Rights Reserved.
// This file was generated from .js source files by GYP. If you
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
#include "src/v8.h"
#include "src/natives.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
%(sources_declaration)s\
template <>
int NativesCollection<%(type)s>::GetBuiltinsCount() {
return %(builtin_count)i;
}
template <>
int NativesCollection<%(type)s>::GetDebuggerCount() {
return %(debugger_count)i;
}
template <>
int NativesCollection<%(type)s>::GetIndex(const char* name) {
%(get_index_cases)s\
return -1;
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
%(get_script_source_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
%(get_script_name_cases)s\
return Vector<const char>("", 0);
}
template <>
Vector<const char> NativesCollection<%(type)s>::GetScriptsSource() {
return Vector<const char>(sources, %(total_length)i);
}
} // internal
} // v8
"""
SOURCES_DECLARATION = """\
static const char sources[] = { %s };
"""
GET_INDEX_CASE = """\
if (strcmp(name, "%(id)s") == 0) return %(i)i;
"""
GET_SCRIPT_SOURCE_CASE = """\
if (index == %(i)i) return Vector<const char>(sources + %(offset)i, %(source_length)i);
"""
GET_SCRIPT_NAME_CASE = """\
if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
"""
def BuildFilterChain(macro_filename):
"""Build the chain of filter functions to be applied to the sources.
Args:
macro_filename: Name of the macro file, if any.
Returns:
A function (string -> string) that reads a source file and processes it.
"""
filter_chain = [ReadFile]
if macro_filename:
(consts, macros) = ReadMacros(ReadFile(macro_filename))
filter_chain.append(lambda l: ExpandConstants(l, consts))
filter_chain.append(lambda l: ExpandMacros(l, macros))
filter_chain.extend([
RemoveCommentsAndTrailingWhitespace,
ExpandInlineMacros,
ExpandInlineConstants,
Validate,
jsmin.JavaScriptMinifier().JSMinify
])
def chain(f1, f2):
return lambda x: f2(f1(x))
return reduce(chain, filter_chain)
class Sources:
def __init__(self):
self.names = []
self.modules = []
self.is_debugger_id = []
def IsDebuggerFile(filename):
return filename.endswith("-debugger.js")
def IsMacroFile(filename):
return filename.endswith("macros.py")
def PrepareSources(source_files):
"""Read, prepare and assemble the list of source files.
Args:
sources: List of Javascript-ish source files. A file named macros.py
will be treated as a list of macros.
Returns:
An instance of Sources.
"""
macro_file = None
macro_files = filter(IsMacroFile, source_files)
assert len(macro_files) in [0, 1]
if macro_files:
source_files.remove(macro_files[0])
macro_file = macro_files[0]
filters = BuildFilterChain(macro_file)
# Sort 'debugger' sources first.
source_files = sorted(source_files,
lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
result = Sources()
for source in source_files:
try:
lines = filters(source)
except Error as e:
raise Error("In file %s:\n%s" % (source, str(e)))
result.modules.append(lines);
is_debugger = IsDebuggerFile(source)
result.is_debugger_id.append(is_debugger);
name = os.path.basename(source)[:-3]
result.names.append(name if not is_debugger else name[:-9]);
return result
def BuildMetadata(sources, source_bytes, native_type):
"""Build the meta data required to generate a libaries file.
Args:
sources: A Sources instance with the prepared sources.
source_bytes: A list of source bytes.
(The concatenation of all sources; might be compressed.)
native_type: The parameter for the NativesCollection template.
Returns:
A dictionary for use with HEADER_TEMPLATE.
"""
total_length = len(source_bytes)
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
assert not filter(lambda value: ord(value) >= 128, raw_sources)
# Loop over modules and build up indices into the source blob:
get_index_cases = []
get_script_name_cases = []
get_script_source_cases = []
offset = 0
for i in xrange(len(sources.modules)):
native_name = "native %s.js" % sources.names[i]
d = {
"i": i,
"id": sources.names[i],
"name": native_name,
"length": len(native_name),
"offset": offset,
"source_length": len(sources.modules[i]),
}
get_index_cases.append(GET_INDEX_CASE % d)
get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
get_script_source_cases.append(GET_SCRIPT_SOURCE_CASE % d)
offset += len(sources.modules[i])
assert offset == len(raw_sources)
metadata = {
"builtin_count": len(sources.modules),
"debugger_count": sum(sources.is_debugger_id),
"sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
"total_length": total_length,
"get_index_cases": "".join(get_index_cases),
"get_script_source_cases": "".join(get_script_source_cases),
"get_script_name_cases": "".join(get_script_name_cases),
"type": native_type,
}
return metadata
def PutInt(blob_file, value):
assert(value >= 0 and value < (1 << 28))
if (value < 1 << 6):
size = 1
elif (value < 1 << 14):
size = 2
elif (value < 1 << 22):
size = 3
else:
size = 4
value_with_length = (value << 2) | (size - 1)
byte_sequence = bytearray()
for i in xrange(size):
byte_sequence.append(value_with_length & 255)
value_with_length >>= 8;
blob_file.write(byte_sequence)
def PutStr(blob_file, value):
PutInt(blob_file, len(value));
blob_file.write(value);
def WriteStartupBlob(sources, startup_blob):
"""Write a startup blob, as expected by V8 Initialize ...
TODO(vogelheim): Add proper method name.
Args:
sources: A Sources instance with the prepared sources.
startup_blob_file: Name of file to write the blob to.
"""
output = open(startup_blob, "wb")
debug_sources = sum(sources.is_debugger_id);
PutInt(output, debug_sources)
for i in xrange(debug_sources):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
PutInt(output, len(sources.names) - debug_sources)
for i in xrange(debug_sources, len(sources.names)):
PutStr(output, sources.names[i]);
PutStr(output, sources.modules[i]);
output.close()
def JS2C(source, target, native_type, raw_file, startup_blob):
sources = PrepareSources(source)
sources_bytes = "".join(sources.modules)
metadata = BuildMetadata(sources, sources_bytes, native_type)
# Optionally emit raw file.
if raw_file:
output = open(raw_file, "w")
output.write(sources_bytes)
output.close()
if startup_blob:
WriteStartupBlob(sources, startup_blob);
# Emit resulting source file.
output = open(target, "w")
output.write(HEADER_TEMPLATE % metadata)
output.close()
def main():
parser = optparse.OptionParser()
parser.add_option("--raw", action="store",
help="file to write the processed sources array to.")
parser.add_option("--startup_blob", action="store",
help="file to write the startup blob to.")
parser.set_usage("""js2c out.cc type sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
sources.js: JS internal sources or macros.py.""")
(options, args) = parser.parse_args()
JS2C(args[2:], args[0], args[1], options.raw, options.startup_blob)
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import six
import pytest
from sentry.grouping.enhancer import Enhancements, InvalidEnhancerConfig
def dump_obj(obj):
if not isinstance(getattr(obj, "__dict__", None), dict):
return obj
rv = {}
for (key, value) in six.iteritems(obj.__dict__):
if isinstance(value, list):
rv[key] = [dump_obj(x) for x in value]
elif isinstance(value, dict):
rv[key] = {k: dump_obj(v) for k, v in six.iteritems(value)}
else:
rv[key] = value
return rv
def test_basic_parsing(insta_snapshot):
enhancement = Enhancements.from_config_string(
"""
# This is a config
path:*/code/game/whatever/* +app
function:panic_handler ^-group -group
function:ThreadStartWin32 v-group
function:ThreadStartLinux v-group
function:ThreadStartMac v-group
family:native module:std::* -app
module:core::* -app
family:javascript path:*/test.js -app
family:javascript app:1 path:*/test.js -app
family:native max-frames=3
""",
bases=["common:v1"],
)
dumped = enhancement.dumps()
insta_snapshot(dump_obj(enhancement))
assert Enhancements.loads(dumped).dumps() == dumped
assert Enhancements.loads(dumped)._to_config_structure() == enhancement._to_config_structure()
assert isinstance(dumped, six.string_types)
def test_parsing_errors():
with pytest.raises(InvalidEnhancerConfig):
Enhancements.from_config_string("invalid.message:foo -> bar")
def test_basic_path_matching():
enhancement = Enhancements.from_config_string(
"""
path:**/test.js +app
"""
)
js_rule = enhancement.rules[0]
assert bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/test.js", "filename": "/foo/test.js"}, "javascript"
)
)
assert not bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/bar.js", "filename": "/foo/bar.js"}, "javascript"
)
)
assert bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/test.js"}, "javascript"
)
)
assert not bool(js_rule.get_matching_frame_actions({"filename": "/foo/bar.js"}, "javascript"))
assert bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/TEST.js"}, "javascript"
)
)
assert not bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/bar.js"}, "javascript"
)
)
def test_family_matching():
enhancement = Enhancements.from_config_string(
"""
family:javascript path:**/test.js +app
family:native function:std::* -app
"""
)
js_rule, native_rule = enhancement.rules
assert bool(
js_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/TEST.js"}, "javascript"
)
)
assert not bool(
js_rule.get_matching_frame_actions({"abs_path": "http://example.com/foo/TEST.js"}, "native")
)
assert not bool(
native_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/TEST.js", "function": "std::whatever"},
"javascript",
)
)
assert bool(native_rule.get_matching_frame_actions({"function": "std::whatever"}, "native"))
def test_app_matching():
enhancement = Enhancements.from_config_string(
"""
family:javascript path:**/test.js app:yes +app
family:native path:**/test.c app:no -group
"""
)
app_yes_rule, app_no_rule = enhancement.rules
assert bool(
app_yes_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/TEST.js", "in_app": True}, "javascript"
)
)
assert not bool(
app_yes_rule.get_matching_frame_actions(
{"abs_path": "http://example.com/foo/TEST.js", "in_app": False}, "javascript"
)
)
assert bool(
app_no_rule.get_matching_frame_actions({"abs_path": "/test.c", "in_app": False}, "native")
)
assert not bool(
app_no_rule.get_matching_frame_actions({"abs_path": "/test.c", "in_app": True}, "native")
)
def test_package_matching():
# This tests a bunch of different rules from the default in-app logic that
# was ported from the former native plugin.
enhancement = Enhancements.from_config_string(
"""
family:native package:/var/**/Frameworks/** -app
family:native package:**/*.app/Contents/** +app
family:native package:linux-gate.so -app
family:native package:?:/Windows/** -app
"""
)
bundled_rule, macos_rule, linux_rule, windows_rule = enhancement.rules
assert bool(
bundled_rule.get_matching_frame_actions(
{"package": "/var/containers/MyApp/Frameworks/libsomething"}, "native"
)
)
assert bool(
macos_rule.get_matching_frame_actions(
{"package": "/Applications/MyStuff.app/Contents/MacOS/MyStuff"}, "native"
)
)
assert bool(linux_rule.get_matching_frame_actions({"package": "linux-gate.so"}, "native"))
assert bool(
windows_rule.get_matching_frame_actions(
{"package": "D:\\Windows\\System32\\kernel32.dll"}, "native"
)
)
assert bool(
windows_rule.get_matching_frame_actions(
{"package": "d:\\windows\\System32\\kernel32.dll"}, "native"
)
)
assert not bool(
bundled_rule.get_matching_frame_actions(
{"package": "/var2/containers/MyApp/Frameworks/libsomething"}, "native"
)
)
assert not bool(
bundled_rule.get_matching_frame_actions(
{"package": "/var/containers/MyApp/MacOs/MyApp"}, "native"
)
)
assert not bool(
bundled_rule.get_matching_frame_actions({"package": "/usr/lib/linux-gate.so"}, "native")
)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbmonitor_service_binding(base_resource) :
""" Binding class showing the service that can be bound to lbmonitor.
"""
def __init__(self) :
self._monitorname = ""
self._servicename = ""
self._dup_state = ""
self._dup_weight = 0
self._servicegroupname = ""
self._state = ""
self._weight = 0
@property
def servicegroupname(self) :
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def dup_state(self) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
return self._dup_state
except Exception as e:
raise e
@dup_state.setter
def dup_state(self, dup_state) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
self._dup_state = dup_state
except Exception as e:
raise e
@property
def servicename(self) :
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def state(self) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
ur"""State of the monitor. The state setting for a monitor of a given type affects all monitors of that type. For example, if an HTTP monitor is enabled, all HTTP monitors on the appliance are (or remain) enabled. If an HTTP monitor is disabled, all HTTP monitors on the appliance are disabled.
"""
try :
self._state = state
except Exception as e:
raise e
@property
def dup_weight(self) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
return self._dup_weight
except Exception as e:
raise e
@dup_weight.setter
def dup_weight(self, dup_weight) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
self._dup_weight = dup_weight
except Exception as e:
raise e
@property
def monitorname(self) :
ur"""Name of the monitor.<br/>Minimum length = 1.
"""
try :
return self._monitorname
except Exception as e:
raise e
@monitorname.setter
def monitorname(self, monitorname) :
ur"""Name of the monitor.<br/>Minimum length = 1
"""
try :
self._monitorname = monitorname
except Exception as e:
raise e
@property
def weight(self) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
return self._weight
except Exception as e:
raise e
@weight.setter
def weight(self, weight) :
ur"""Weight to assign to the binding between the monitor and service.
"""
try :
self._weight = weight
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbmonitor_service_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbmonitor_service_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.monitorname is not None :
return str(self.monitorname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = lbmonitor_service_binding()
updateresource.monitorname = resource.monitorname
updateresource.servicename = resource.servicename
updateresource.dup_state = resource.dup_state
updateresource.dup_weight = resource.dup_weight
updateresource.servicegroupname = resource.servicegroupname
updateresource.state = resource.state
updateresource.weight = resource.weight
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbmonitor_service_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].monitorname = resource[i].monitorname
updateresources[i].servicename = resource[i].servicename
updateresources[i].dup_state = resource[i].dup_state
updateresources[i].dup_weight = resource[i].dup_weight
updateresources[i].servicegroupname = resource[i].servicegroupname
updateresources[i].state = resource[i].state
updateresources[i].weight = resource[i].weight
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = lbmonitor_service_binding()
deleteresource.monitorname = resource.monitorname
deleteresource.servicename = resource.servicename
deleteresource.servicegroupname = resource.servicegroupname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbmonitor_service_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].monitorname = resource[i].monitorname
deleteresources[i].servicename = resource[i].servicename
deleteresources[i].servicegroupname = resource[i].servicegroupname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
class Dup_state:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class lbmonitor_service_binding_response(base_response) :
def __init__(self, length=1) :
self.lbmonitor_service_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbmonitor_service_binding = [lbmonitor_service_binding() for _ in range(length)]
| |
# Copyright (c) 2015 Thales Services SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
import functools
import os
import random
import re
import select
import shlex
import signal
import subprocess
import fixtures
import netaddr
from oslo_utils import uuidutils
import six
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.tests import base as tests_base
from neutron.tests.common import base as common_base
from neutron.tests import tools
NS_PREFIX = 'func-'
BR_PREFIX = 'test-br'
PORT_PREFIX = 'test-port'
VETH0_PREFIX = 'test-veth0'
VETH1_PREFIX = 'test-veth1'
SS_SOURCE_PORT_PATTERN = re.compile(
r'^.*\s+\d+\s+.*:(?P<port>\d+)\s+[0-9:].*')
READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5)
CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20)
CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5)
TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP)
def get_rand_port_name():
return tests_base.get_rand_name(max_length=n_const.DEVICE_NAME_MAX_LEN,
prefix=PORT_PREFIX)
def increment_ip_cidr(ip_cidr, offset=1):
"""Increment ip_cidr offset times.
example: increment_ip_cidr("1.2.3.4/24", 2) ==> "1.2.3.6/24"
"""
net0 = netaddr.IPNetwork(ip_cidr)
net = netaddr.IPNetwork(ip_cidr)
net.value += offset
if not net0.network < net.ip < net0.broadcast:
tools.fail(
'Incorrect ip_cidr,offset tuple (%s,%s): "incremented" ip_cidr is '
'outside ip_cidr' % (ip_cidr, offset))
return str(net)
def set_namespace_gateway(port_dev, gateway_ip):
"""Set gateway for the namespace associated to the port."""
if not port_dev.namespace:
tools.fail('tests should not change test machine gateway')
port_dev.route.add_gateway(gateway_ip)
def assert_ping(src_namespace, dst_ip, timeout=1, count=1):
ipversion = netaddr.IPAddress(dst_ip).version
ping_command = 'ping' if ipversion == 4 else 'ping6'
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout,
dst_ip])
def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1):
try:
assert_ping(src_namespace, dst_ip, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to ping from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
"""Send arp request using arping executable.
NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery
Protocol instead.
"""
ns_ip_wrapper = ip_lib.IPWrapper(src_namespace)
arping_cmd = ['arping', '-c', count, '-w', timeout]
if source:
arping_cmd.extend(['-s', source])
arping_cmd.append(dst_ip)
ns_ip_wrapper.netns.execute(arping_cmd)
def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1):
try:
assert_arping(src_namespace, dst_ip, source, timeout, count)
except RuntimeError:
pass
else:
tools.fail("destination ip %(destination)s is replying to arp from "
"namespace %(ns)s, but it shouldn't" %
{'ns': src_namespace, 'destination': dst_ip})
def _get_source_ports_from_ss_output(output):
ports = set()
for line in output.splitlines():
match = SS_SOURCE_PORT_PATTERN.match(line)
if match:
ports.add(match.group('port'))
return ports
def get_unused_port(used, start=1024, end=65535):
candidates = set(range(start, end + 1))
return random.choice(list(candidates - used))
def get_free_namespace_port(protocol, namespace=None):
"""Return an unused port from given namespace
WARNING: This function returns a port that is free at the execution time of
this function. If this port is used later for binding then there
is a potential danger that port will be no longer free. It's up to
the programmer to handle error if port is already in use.
:param protocol: Return free port for given protocol. Supported protocols
are 'tcp' and 'udp'.
"""
if protocol == n_const.PROTO_NAME_TCP:
param = '-tna'
elif protocol == n_const.PROTO_NAME_UDP:
param = '-una'
else:
raise ValueError("Unsupported procotol %s" % protocol)
ip_wrapper = ip_lib.IPWrapper(namespace=namespace)
output = ip_wrapper.netns.execute(['ss', param])
used_ports = _get_source_ports_from_ss_output(output)
return get_unused_port(used_ports)
class RootHelperProcess(subprocess.Popen):
def __init__(self, cmd, *args, **kwargs):
for arg in ('stdin', 'stdout', 'stderr'):
kwargs.setdefault(arg, subprocess.PIPE)
self.namespace = kwargs.pop('namespace', None)
self.cmd = cmd
if self.namespace is not None:
cmd = ['ip', 'netns', 'exec', self.namespace] + cmd
root_helper = config.get_root_helper(utils.cfg.CONF)
cmd = shlex.split(root_helper) + cmd
self.child_pid = None
super(RootHelperProcess, self).__init__(cmd, *args, **kwargs)
self._wait_for_child_process()
def kill(self, sig=signal.SIGKILL):
pid = self.child_pid or str(self.pid)
utils.execute(['kill', '-%d' % sig, pid], run_as_root=True)
def read_stdout(self, timeout=None):
return self._read_stream(self.stdout, timeout)
@staticmethod
def _read_stream(stream, timeout):
if timeout:
poller = select.poll()
poller.register(stream.fileno())
poll_predicate = functools.partial(poller.poll, 1)
utils.wait_until_true(poll_predicate, timeout, 0.1,
RuntimeError(
'No output in %.2f seconds' % timeout))
return stream.readline()
def writeline(self, data):
self.stdin.write(data + os.linesep)
self.stdin.flush()
def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT,
sleep=CHILD_PROCESS_SLEEP):
def child_is_running():
child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
if utils.pid_invoked_with_cmdline(child_pid, self.cmd):
return True
utils.wait_until_true(
child_is_running,
timeout,
exception=RuntimeError("Process %s hasn't been spawned "
"in %d seconds" % (self.cmd, timeout)))
self.child_pid = utils.get_root_helper_child_pid(
self.pid, run_as_root=True)
class NetcatTester(object):
TESTING_STRING = 'foo'
TCP = n_const.PROTO_NAME_TCP
UDP = n_const.PROTO_NAME_UDP
def __init__(self, client_namespace, server_namespace, address,
dst_port, protocol, server_address='0.0.0.0', src_port=None):
"""
Tool for testing connectivity on transport layer using netcat
executable.
The processes are spawned lazily.
:param client_namespace: Namespace in which netcat process that
connects to other netcat will be spawned
:param server_namespace: Namespace in which listening netcat process
will be spawned
:param address: Server address from client point of view
:param dst_port: Port on which netcat listens
:param protocol: Transport protocol, either 'tcp' or 'udp'
:param server_address: Address in server namespace on which netcat
should listen
:param src_port: Source port of netcat process spawned in client
namespace - packet will have src_port in TCP/UDP
header with this value
"""
self.client_namespace = client_namespace
self.server_namespace = server_namespace
self._client_process = None
self._server_process = None
self.address = address
self.server_address = server_address
self.dst_port = str(dst_port)
self.src_port = str(src_port) if src_port else None
if protocol not in TRANSPORT_PROTOCOLS:
raise ValueError("Unsupported protocol %s" % protocol)
self.protocol = protocol
@property
def client_process(self):
if not self._client_process:
self.establish_connection()
return self._client_process
@property
def server_process(self):
if not self._server_process:
self._spawn_server_process()
return self._server_process
def _spawn_server_process(self):
self._server_process = self._spawn_nc_in_namespace(
self.server_namespace,
address=self.server_address,
listen=True)
def establish_connection(self):
if self._client_process:
raise RuntimeError('%(proto)s connection to $(ip_addr)s is already'
' established' %
{'proto': self.protocol,
'ip_addr': self.address})
if not self._server_process:
self._spawn_server_process()
self._client_process = self._spawn_nc_in_namespace(
self.client_namespace,
address=self.address)
if self.protocol == self.UDP:
# Create an entry in conntrack table for UDP packets
self.client_process.writeline(self.TESTING_STRING)
def test_connectivity(self, respawn=False):
stop_required = (respawn and self._client_process and
self._client_process.poll() is not None)
if stop_required:
self.stop_processes()
self.client_process.writeline(self.TESTING_STRING)
message = self.server_process.read_stdout(READ_TIMEOUT).strip()
self.server_process.writeline(message)
message = self.client_process.read_stdout(READ_TIMEOUT).strip()
return message == self.TESTING_STRING
def _spawn_nc_in_namespace(self, namespace, address, listen=False):
cmd = ['nc', address, self.dst_port]
if self.protocol == self.UDP:
cmd.append('-u')
if listen:
cmd.append('-l')
if self.protocol == self.TCP:
cmd.append('-k')
else:
cmd.extend(['-w', '20'])
if self.src_port:
cmd.extend(['-p', self.src_port])
proc = RootHelperProcess(cmd, namespace=namespace)
return proc
def stop_processes(self):
for proc_attr in ('_client_process', '_server_process'):
proc = getattr(self, proc_attr)
if proc:
if proc.poll() is None:
proc.kill()
proc.wait()
setattr(self, proc_attr, None)
class NamespaceFixture(fixtures.Fixture):
"""Create a namespace.
:ivar ip_wrapper: created namespace
:type ip_wrapper: IPWrapper
:ivar name: created namespace name
:type name: str
"""
def __init__(self, prefix=NS_PREFIX):
super(NamespaceFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ip = ip_lib.IPWrapper()
self.name = self.prefix + uuidutils.generate_uuid()
self.addCleanup(self.destroy)
self.ip_wrapper = ip.ensure_namespace(self.name)
def destroy(self):
if self.ip_wrapper.netns.exists(self.name):
self.ip_wrapper.netns.delete(self.name)
class VethFixture(fixtures.Fixture):
"""Create a veth.
:ivar ports: created veth ports
:type ports: IPDevice 2-uplet
"""
def _setUp(self):
ip_wrapper = ip_lib.IPWrapper()
self.ports = common_base.create_resource(
VETH0_PREFIX,
lambda name: ip_wrapper.add_veth(name, self.get_peer_name(name)))
self.addCleanup(self.destroy)
def destroy(self):
for port in self.ports:
ip_wrapper = ip_lib.IPWrapper(port.namespace)
try:
ip_wrapper.del_veth(port.name)
break
except RuntimeError:
# NOTE(cbrandily): It seems a veth is automagically deleted
# when a namespace owning a veth endpoint is deleted.
pass
@staticmethod
def get_peer_name(name):
if name.startswith(VETH0_PREFIX):
return name.replace(VETH0_PREFIX, VETH1_PREFIX)
elif name.startswith(VETH1_PREFIX):
return name.replace(VETH1_PREFIX, VETH0_PREFIX)
else:
tools.fail('%s is not a valid VethFixture veth endpoint' % name)
@six.add_metaclass(abc.ABCMeta)
class PortFixture(fixtures.Fixture):
"""Create a port.
:ivar port: created port
:type port: IPDevice
:ivar bridge: port bridge
"""
def __init__(self, bridge=None, namespace=None):
super(PortFixture, self).__init__()
self.bridge = bridge
self.namespace = namespace
@abc.abstractmethod
def _create_bridge_fixture(self):
pass
@abc.abstractmethod
def _setUp(self):
super(PortFixture, self)._setUp()
if not self.bridge:
self.bridge = self.useFixture(self._create_bridge_fixture()).bridge
@classmethod
def get(cls, bridge, namespace=None):
"""Deduce PortFixture class from bridge type and instantiate it."""
if isinstance(bridge, ovs_lib.OVSBridge):
return OVSPortFixture(bridge, namespace)
if isinstance(bridge, bridge_lib.BridgeDevice):
return LinuxBridgePortFixture(bridge, namespace)
if isinstance(bridge, VethBridge):
return VethPortFixture(bridge, namespace)
tools.fail('Unexpected bridge type: %s' % type(bridge))
class OVSBridgeFixture(fixtures.Fixture):
"""Create an OVS bridge.
:ivar prefix: bridge name prefix
:type prefix: str
:ivar bridge: created bridge
:type bridge: OVSBridge
"""
def __init__(self, prefix=BR_PREFIX):
super(OVSBridgeFixture, self).__init__()
self.prefix = prefix
def _setUp(self):
ovs = ovs_lib.BaseOVS()
self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge)
self.addCleanup(self.bridge.destroy)
class OVSPortFixture(PortFixture):
def __init__(self, bridge=None, namespace=None, attrs=None):
super(OVSPortFixture, self).__init__(bridge, namespace)
if attrs is None:
attrs = []
self.attrs = attrs
def _create_bridge_fixture(self):
return OVSBridgeFixture()
def _setUp(self):
super(OVSPortFixture, self)._setUp()
port_name = common_base.create_resource(PORT_PREFIX, self.create_port)
self.addCleanup(self.bridge.delete_port, port_name)
self.port = ip_lib.IPDevice(port_name)
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
def create_port(self, name):
self.attrs.insert(0, ('type', 'internal'))
self.bridge.add_port(name, *self.attrs)
return name
class LinuxBridgeFixture(fixtures.Fixture):
"""Create a linux bridge.
:ivar bridge: created bridge
:type bridge: BridgeDevice
:ivar namespace: created bridge namespace
:type namespace: str
"""
def _setUp(self):
self.namespace = self.useFixture(NamespaceFixture()).name
self.bridge = common_base.create_resource(
BR_PREFIX,
bridge_lib.BridgeDevice.addbr,
namespace=self.namespace)
self.addCleanup(self.bridge.delbr)
self.bridge.link.set_up()
self.addCleanup(self.bridge.link.set_down)
class LinuxBridgePortFixture(PortFixture):
"""Create a linux bridge port.
:ivar port: created port
:type port: IPDevice
:ivar br_port: bridge side veth peer port
:type br_port: IPDevice
"""
def _create_bridge_fixture(self):
return LinuxBridgeFixture()
def _setUp(self):
super(LinuxBridgePortFixture, self)._setUp()
self.port, self.br_port = self.useFixture(VethFixture()).ports
# bridge side
br_ip_wrapper = ip_lib.IPWrapper(self.bridge.namespace)
br_ip_wrapper.add_device_to_namespace(self.br_port)
self.bridge.addif(self.br_port)
self.br_port.link.set_up()
# port side
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
class VethBridge(object):
def __init__(self, ports):
self.ports = ports
self.unallocated_ports = set(self.ports)
def allocate_port(self):
try:
return self.unallocated_ports.pop()
except KeyError:
tools.fail('All FakeBridge ports (%s) are already allocated.' %
len(self.ports))
class VethBridgeFixture(fixtures.Fixture):
"""Simulate a bridge with a veth.
:ivar bridge: created bridge
:type bridge: FakeBridge
"""
def _setUp(self):
ports = self.useFixture(VethFixture()).ports
self.bridge = VethBridge(ports)
class VethPortFixture(PortFixture):
"""Create a veth bridge port.
:ivar port: created port
:type port: IPDevice
"""
def _create_bridge_fixture(self):
return VethBridgeFixture()
def _setUp(self):
super(VethPortFixture, self)._setUp()
self.port = self.bridge.allocate_port()
ns_ip_wrapper = ip_lib.IPWrapper(self.namespace)
ns_ip_wrapper.add_device_to_namespace(self.port)
self.port.link.set_up()
| |
"""
RTFM:
* http://docs.python.org/2/library/cookielib.html#cookie-objects
Some code got from
https://github.com/kennethreitz/requests/blob/master/requests/cookies.py
"""
from __future__ import absolute_import
from six.moves.http_cookiejar import CookieJar, Cookie
import json
import logging
from grab.error import GrabMisuseError
from six.moves.urllib.parse import urlparse
logger = logging.getLogger('grab.cookie')
COOKIE_ATTRS = ('name', 'value', 'version', 'port', 'domain',
'path', 'secure', 'expires', 'discard', 'comment',
'comment_url', 'rfc2109')
# Source: https://github.com/kennethreitz/requests/blob/master/requests/cookies.py
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
# https://github.com/kennethreitz/requests/blob/master/requests/cookies.py
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def create_cookie(name, value, domain, httponly=None, **kwargs):
"Creates `cookielib.Cookie` instance"
if domain == 'localhost':
domain = ''
config = dict(
name=name,
value=value,
version=0,
port=None,
domain=domain,
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rfc2109=False,
rest={'HttpOnly': httponly},
)
for key in kwargs.keys():
if key not in config:
raise GrabMisuseError('Function `create_cookie` does not accept '
'`%s` argument' % key)
config.update(**kwargs)
config['rest']['HttpOnly'] = httponly
config['port_specified'] = bool(config['port'])
config['domain_specified'] = bool(config['domain'])
config['domain_initial_dot'] = (config['domain'] or '').startswith('.')
config['path_specified'] = bool(config['path'])
return Cookie(**config)
class CookieManager(object):
"""
Each Grab instance has `cookies` attribute that is instance of
`CookieManager` class.
That class contains helpful methods to create, load, save cookies from/to
different places.
"""
__slots__ = ('cookiejar',)
def __init__(self, cookiejar=None):
if cookiejar is not None:
self.cookiejar = cookiejar
else:
self.cookiejar = CookieJar()
# self.disable_cookiejar_lock(self.cookiejar)
# def disable_cookiejar_lock(self, cj):
# cj._cookies_lock = dummy_threading.RLock()
def set(self, name, value, domain, **kwargs):
"""Add new cookie or replace existing cookie with same parameters.
:param name: name of cookie
:param value: value of cookie
:param kwargs: extra attributes of cookie
"""
if domain == 'localhost':
domain = ''
self.cookiejar.set_cookie(create_cookie(name, value, domain, **kwargs))
def update(self, cookies):
if isinstance(cookies, CookieJar):
for cookie in cookies:
self.cookiejar.set_cookie(cookie)
elif isinstance(cookies, CookieManager):
for cookie in cookies.cookiejar:
self.cookiejar.set_cookie(cookie)
else:
raise GrabMisuseError('Unknown type of cookies argument: %s'
% type(cookies))
@classmethod
def from_cookie_list(cls, clist):
cj = CookieJar()
for cookie in clist:
cj.set_cookie(cookie)
return cls(cj)
def clear(self):
self.cookiejar = CookieJar()
def __getstate__(self):
state = {}
for cls in type(self).mro():
cls_slots = getattr(cls, '__slots__', ())
for slot in cls_slots:
if slot != '__weakref__':
if hasattr(self, slot):
state[slot] = getattr(self, slot)
state['_cookiejar_cookies'] = list(self.cookiejar)
del state['cookiejar']
return state
def __setstate__(self, state):
state['cookiejar'] = CookieJar()
for cookie in state['_cookiejar_cookies']:
state['cookiejar'].set_cookie(cookie)
del state['_cookiejar_cookies']
for slot, value in state.items():
setattr(self, slot, value)
def __getitem__(self, key):
for cookie in self.cookiejar:
if cookie.name == key:
return cookie.value
raise KeyError
def items(self):
res = []
for cookie in self.cookiejar:
res.append((cookie.name, cookie.value))
return res
def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra)
def get_dict(self):
res = []
for cookie in self.cookiejar:
res.append(dict((x, getattr(cookie, x)) for x in COOKIE_ATTRS))
return res
def save_to_file(self, path):
"""
Dump all cookies to file.
Cookies are dumped as JSON-serialized dict of keys and values.
"""
with open(path, 'w') as out:
out.write(json.dumps(self.get_dict()))
def get_cookie_header(self, req):
"""
:param req: object with httplib.Request interface
Actually, it have to have `url` and `headers` attributes
"""
mocked_req = MockRequest(req)
self.cookiejar.add_cookie_header(mocked_req)
return mocked_req.get_new_headers().get('Cookie')
| |
# device.py
# Author: Meghan Clark
# This file contains a Device object that exposes a high-level API for interacting
# with a LIFX device, and which caches some of the more persistent state attributes
# so that you don't always need to spam the light with packets.
#
# The Device object also provides the low-level workflow functions for sending
# LIFX unicast packets to the specific device. LIFX unicast packets are sent
# via UDP broadcast, but by including the device's MAC other LIFX devices will
# ignore the packet.
#
# Import note: Every time you call a `get` method you are sending packets to the
# real device. If you want to access the last known (cached) value of an attribute
# just access the attribute directly, e.g., mydevice.label instead of mydevice.get_label()
#
# Currently service and port are set during initialization and never updated.
# This may need to change in the future to support multiple (service, port) pairs
# per device, and also to capture in real time when a service is down (port = 0).
from socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, SO_BROADCAST, timeout, error
from .msgtypes import *
from .unpack import unpack_lifx_message
from time import time, sleep
from datetime import datetime
DEFAULT_TIMEOUT = 0.5
DEFAULT_ATTEMPTS = 5
UDP_BROADCAST_IP = b'255.255.255.255'
UDP_BROADCAST_PORT = 56700
VERBOSE = False
class Device(object):
# mac_addr is a string, with the ':' and everything.
# service is an integer that maps to a service type. See SERVICE_IDS in msgtypes.py
# source_id is a number unique to this client, will appear in responses to this client
def __init__(self, mac_addr, service, port, source_id, ip_addr, verbose=False):
self.verbose = verbose
self.mac_addr = mac_addr
self.port = port
self.service = service
self.source_id = source_id
self.ip_addr = ip_addr # IP addresses can change, though...
# The following attributes can be set by calling refresh(), but that
# takes time so it is not done by default during initialization.
# However, refresh() will be called each time __str__ is called.
# Printing the device is therefore accurate but expensive.
self.label = None
self.power_level = None
self.host_firmware_build_timestamp = None
self.host_firmware_version = None
self.wifi_firmware_build_timestamp = None
self.wifi_firmware_version = None
self.vendor = None
self.product = None
self.version = None
# For completeness, the following are state attributes of the device
# that become stale too fast to bother caching in the device object,
# though they can be accessed directly from the real device using the
# methods below:
# wifi signal mw
# wifi tx bytes
# wifi rx bytes
# time
# uptime
# downtime
############################################################################
# #
# Device API Methods #
# #
############################################################################
# update the device's (relatively) persistent attributes
def refresh(self):
self.label = self.get_label()
self.power_level = self.get_power()
self.host_firmware_build_timestamp, self.host_firmware_version = self.get_host_firmware_tuple()
self.wifi_firmware_build_timestamp, self.wifi_firmware_version = self.get_wifi_firmware_tuple()
self.vendor, self.product, self.version = self.get_version_tuple()
def get_mac_addr(self):
return self.mac_addr
def get_service(self):
return self.service
def get_port(self):
return self.port
def get_ip_addr(self):
return self.ip_addr
def get_label(self):
try:
response = self.req_with_resp(GetLabel, StateLabel)
self.label = response.label.replace(b'\x00', b'')
except:
pass
return self.label
def set_label(self, label):
if len(label) > 32:
label = label[:32]
self.req_with_ack(SetLabel, {'label': label})
def get_power(self):
try:
response = self.req_with_resp(GetPower, StatePower)
self.power_level = response.power_level
except:
pass
return self.power_level
def set_power(self, power, rapid=False):
on = [True, 1, 'on']
off = [False, 0, 'off']
if power in on and not rapid:
success = self.req_with_ack(SetPower, {'power_level': 65535})
elif power in off and not rapid:
success = self.req_with_ack(SetPower, {'power_level': 0})
elif power in on and rapid:
success = self.fire_and_forget(SetPower, {'power_level': 65535})
elif power in off and rapid:
success = self.fire_and_forget(SetPower, {'power_level': 0})
def get_host_firmware_tuple(self):
build = None
version = None
try:
response = self.req_with_resp(GetHostFirmware, StateHostFirmware)
build = response.build
version = response.version
except:
pass
return build, version
def get_host_firmware_build_timestamp(self):
self.host_firmware_build_timestamp, self.host_firmware_version = self.get_host_firmware_tuple()
return self.host_firmware_build_timestamp
def get_host_firmware_version(self):
self.host_firmware_build_timestamp, self.host_firmware_version = self.get_host_firmware_tuple()
return self.host_firmware_version
def get_wifi_info_tuple(self):
signal = None
tx = None
rx = None
try:
response = self.req_with_resp(GetWifiInfo, StateWifiInfo)
signal = response.signal
tx = response.tx
rx = response.rx
except:
pass
return signal, tx, rx
def get_wifi_signal_mw(self):
signal, tx, rx = self.get_wifi_info_tuple()
return signal
def get_wifi_tx_bytes(self):
signal, tx, rx = self.get_wifi_info_tuple()
return tx
def get_wifi_rx_bytes(self):
signal, tx, rx = self.get_wifi_info_tuple()
return rx
def get_wifi_firmware_tuple(self):
build = None
version = None
try:
response = self.req_with_resp(GetWifiFirmware, StateWifiFirmware)
build = response.build
version = response.version
except:
pass
return build, version
def get_wifi_firmware_build_timestamp(self):
self.wifi_firmware_build_timestamp, self.wifi_firmware_version = self._get_wifi_firmware_tuple()
return self.wifi_firmware_build_timestamp
def get_wifi_firmware_version(self):
self.wifi_firmware_build_timestamp, self.wifi_firmware_version = self._get_wifi_firmware_tuple()
return self.wifi_firmware_version
def get_version_tuple(self):
vendor = None
product = None
version = None
try:
response = self.req_with_resp(GetVersion, StateVersion)
vendor = response.vendor
product = response.product
version = response.version
except:
pass
return vendor, product, version
def get_vendor(self):
self.vendor, self.product, self.version = self.get_version_tuple()
return self.vendor
def get_product(self):
self.vendor, self.product, self.version = self.get_version_tuple()
return self.product
def get_version(self):
self.vendor, self.product, self.version = self.get_version_tuple()
return self.version
def get_info_tuple(self):
time = None
uptime = None
downtime = None
try:
response = self.req_with_resp(GetInfo, StateInfo)
time = response.time
uptime = response.uptime
downtime = response.downtime
except:
pass
return time, uptime, downtime
def get_time(self):
time, uptime, downtime = self.get_info_tuple()
return time
def get_uptime(self):
time, uptime, downtime = self.get_info_tuple()
return uptime
def get_downtime(self):
time, uptime, downtime = self.get_info_tuple()
return downtime
############################################################################
# #
# String Formatting #
# #
############################################################################
def device_characteristics_str(self, indent):
s = '{}\n'.format(self.label.decode('utf8'))
s += indent + 'MAC Address: {}\n'.format(self.mac_addr.decode('utf8'))
s += indent + 'IP Address: {}\n'.format(self.ip_addr)
s += indent + 'Port: {}\n'.format(self.port)
s += indent + 'Service: {}\n'.format(SERVICE_IDS[self.service])
s += indent + 'Power: {}\n'.format(str_map(self.power_level))
return s
def device_firmware_str(self, indent):
host_build_ns = self.host_firmware_build_timestamp
host_build_s = host_build_ns/1000000000
wifi_build_ns = self.wifi_firmware_build_timestamp
wifi_build_s = wifi_build_ns/1000000000
s = 'Host Firmware Build Timestamp: {} ({} UTC)\n'.format(host_build_ns, datetime.utcfromtimestamp(host_build_s))
s += indent + 'Host Firmware Build Version: {}\n'.format(self.host_firmware_version)
s += indent + 'Wifi Firmware Build Timestamp: {} ({} UTC)\n'.format(wifi_build_ns, datetime.utcfromtimestamp(wifi_build_s))
s += indent + 'Wifi Firmware Build Version: {}\n'.format(self.wifi_firmware_version)
return s
def device_product_str(self, indent):
s = 'Vendor: {}\n'.format(self.vendor)
s += indent + 'Product: {}\n'.format(self.product)
s += indent + 'Version: {}\n'.format(self.version)
return s
def device_time_str(self, indent):
time, uptime, downtime = self.get_info_tuple()
s = 'Current Time: {} ({} UTC)\n'.format(time, datetime.utcfromtimestamp(time/1000000000))
s += indent + 'Uptime (ns): {} ({} hours)\n'.format(uptime, round(nanosec_to_hours(uptime), 2))
s += indent + 'Last Downtime Duration +/-5s (ns): {} ({} hours)\n'.format(downtime, round(nanosec_to_hours(downtime), 2))
return s
def device_radio_str(self, indent):
signal, tx, rx = self.get_wifi_info_tuple()
s = 'Wifi Signal Strength (mW): {}\n'.format(signal)
s += indent + 'Wifi TX (bytes): {}\n'.format(tx)
s += indent + 'Wifi RX (bytes): {}\n'.format(rx)
return s
def __str__(self):
self.refresh()
indent = ' '
s = self.device_characteristics_str(indent)
s += indent + self.device_firmware_str(indent)
s += indent + self.device_product_str(indent)
s += indent + self.device_time_str(indent)
s += indent + self.device_radio_str(indent)
return s
############################################################################
# #
# Workflow Methods #
# #
############################################################################
# Don't wait for Acks or Responses, just send the same message repeatedly as fast as possible
def fire_and_forget(self, msg_type, payload={}, timeout_secs=DEFAULT_TIMEOUT, num_repeats=DEFAULT_ATTEMPTS):
self.initialize_socket(timeout_secs)
msg = msg_type(self.mac_addr, self.source_id, seq_num=0, payload=payload, ack_requested=False, response_requested=False)
sent_msg_count = 0
sleep_interval = 0.05 if num_repeats > 20 else 0
while(sent_msg_count < num_repeats):
self.sock.sendto(msg.packed_message, (UDP_BROADCAST_IP, self.port))
if self.verbose:
print('SEND: ' + str(msg))
sent_msg_count += 1
sleep(sleep_interval) # Max num of messages device can handle is 20 per second.
self.close_socket()
# Usually used for Set messages
def req_with_ack(self, msg_type, payload, timeout_secs=DEFAULT_TIMEOUT, max_attempts=DEFAULT_ATTEMPTS):
self.req_with_resp(msg_type, Acknowledgement, payload, timeout_secs, max_attempts)
# Usually used for Get messages, or for state confirmation after Set (hence the optional payload)
def req_with_resp(self, msg_type, response_type, payload={}, timeout_secs=DEFAULT_TIMEOUT, max_attempts=DEFAULT_ATTEMPTS):
success = False
device_response = None
self.initialize_socket(timeout_secs)
if response_type == Acknowledgement:
msg = msg_type(self.mac_addr, self.source_id, seq_num=0, payload=payload, ack_requested=True, response_requested=False)
else:
msg = msg_type(self.mac_addr, self.source_id, seq_num=0, payload=payload, ack_requested=False, response_requested=True)
response_seen = False
attempts = 0
while not response_seen and attempts < max_attempts:
sent = False
start_time = time()
timedout = False
while not response_seen and not timedout:
if not sent:
self.sock.sendto(msg.packed_message, (UDP_BROADCAST_IP, self.port))
sent = True
if self.verbose:
print('SEND: ' + str(msg))
try:
data, (ip_addr, port) = self.sock.recvfrom(1024)
response = unpack_lifx_message(data)
if self.verbose:
print('RECV: ' + str(response))
if type(response) == response_type:
if response.origin == 1 and response.source_id == self.source_id and response.target_addr == self.mac_addr:
response_seen = True
device_response = response
self.ip_addr = ip_addr
success = True
except timeout:
pass
elapsed_time = time() - start_time
timedout = True if elapsed_time > timeout_secs else False
attempts += 1
if not success:
raise WorkflowException('WorkflowException: Did not receive {} in response to {}'.format(str(response_type), str(msg_type)))
self.close_socket()
return device_response
# Not currently implemented, although the LIFX LAN protocol supports this kind of workflow natively
def req_with_ack_resp(self, msg_type, response_type, payload, timeout_secs=DEFAULT_TIMEOUT, max_attempts=DEFAULT_ATTEMPTS):
pass
############################################################################
# #
# Socket Methods #
# #
############################################################################
def initialize_socket(self, timeout):
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.sock.settimeout(timeout)
port = UDP_BROADCAST_PORT
success = False
while not success:
try:
self.sock.bind((b'', port))
success = True
except: # address (port) already in use, maybe another client on the same computer...
port += 1
def close_socket(self):
self.sock.close()
################################################################################
# #
# Custom Exceptions #
# #
################################################################################
class WorkflowException(Exception):
pass
################################################################################
# #
# Formatting Functions #
# #
################################################################################
def nanosec_to_hours(ns):
return ns/(1000000000.0*60*60)
| |
import pytest
@pytest.fixture
def base_url():
return 'https://www.encodeproject.org/'
@pytest.fixture
def assembly():
return 'GRCh38'
@pytest.fixture
def keypair():
return ('ABC', '123')
@pytest.fixture
def histone_qc():
return {
"@type": [
"HistoneChipSeqQualityMetric",
"QualityMetric",
"Item"
],
"quality_metric_of": [
"/files/ENCFF660DGD/"
],
"uuid": "be6c1386-eb16-46db-a6fa-57164b5abd11",
"Ft": 0.681145282904541,
"@id": "/histone-chipseq-quality-metrics/be6c1386-eb16-46db-a6fa-57164b5abd11/",
"npeak_overlap": 94053,
"step_run": "/analysis-step-runs/bbd6e0ed-9081-4874-8f37-3323c6311ec7/",
"assay_term_name": "ChIP-seq",
"date_created": "2018-03-27T23:07:30.718507+00:00",
"award": "/awards/U41HG006992/",
"lab": "/labs/encode-processing-pipeline/",
"nreads": 30399406,
"nreads_in_peaks": 20706412,
"submitted_by": "/users/6800d05f-7213-48b1-9ad8-254c73c5b83f/",
"status": "released",
"assay_term_id": "OBI:0000716"
}
@pytest.fixture
def file_query():
return {
"@graph": [
{
"@id": "/files/ENCFF660DGD/",
"@type": [
"File",
"Item"
],
"notes": '{"dx-createdBy": {"job": "job-FBZp3z80369GYzY2Jz7FX8Qq", "executable": "applet-F9YvJzj0369JPFjp7zFkkvXg", "user": "user-mkajco"}, "qc": {"npeaks_in": 131917, "npeaks_rejected": 38043, "npeaks_out": 93874}, "dx-parentAnalysis": "analysis-FBZp3z80369GYzY2Jz7FX8Qg", "dx-id": "file-FBZy98j0bY1G7j3xFJ0pGJ2F"}',
"quality_metrics": [
{
"@type": [
"HistoneChipSeqQualityMetric",
"QualityMetric",
"Item"
],
"quality_metric_of": [
"/files/ENCFF660DGD/"
],
"uuid": "be6c1386-eb16-46db-a6fa-57164b5abd11",
"Ft": 0.681145282904541,
"@id": "/histone-chipseq-quality-metrics/be6c1386-eb16-46db-a6fa-57164b5abd11/",
"npeak_overlap": 94053,
"step_run": "/analysis-step-runs/bbd6e0ed-9081-4874-8f37-3323c6311ec7/",
"assay_term_name": "ChIP-seq",
"date_created": "2018-03-27T23:07:30.718507+00:00",
"award": "/awards/U41HG006992/",
"lab": "/labs/encode-processing-pipeline/",
"nreads": 30399406,
"nreads_in_peaks": 20706412,
"submitted_by": "/users/6800d05f-7213-48b1-9ad8-254c73c5b83f/",
"status": "released",
"assay_term_id": "OBI:0000716"
}
],
"assembly": "GRCh38",
"status": "released",
"dataset": "/experiments/ENCSR656SIB/",
"accession": "ENCFF660DGD",
"step_run": {
"schema_version": "4",
"@id": "/analysis-step-runs/8936804d-d3b8-4207-a40b-c207bed96ae4/",
"dx_applet_details": [{'dx_job_id': 'dnanexus:job-123'}],
"date_created": "2018-03-15T03:29:17.649046+00:00",
"status": "released",
"@type": [],
"analysis_step_version": "/analysis-step-versions/histone-overlap-peaks-step-v-1-1/",
"submitted_by": "/users/85978cd9-131e-48e2-a389-f752ab05b0a6/",
"uuid": "8936804d-d3b8-4207-a40b-c207bed96ae4",
"aliases": [
"dnanexus:job-FBZp3z80369GYzY2Jz7FX8Qq"
]
},
"analysis_step_version": {
"name": "histone-overlap-peaks-step-v-1-1"
}
},
{
"@id": "/files/ENCFF111DMJ/",
"@type": [
"File",
"Item"
],
"notes": '{"dx-createdBy": {"job": "job-FBB6BKQ03699JBZqGQFVb3JZ", "executable": "applet-F9YvJzj0369JPFjp7zFkkvXg", "user": "user-keenangraham"}, "qc": {"npeaks_in": 185535, "npeaks_rejected": 36732, "npeaks_out": 148803}, "dx-parentAnalysis": "analysis-FBB6BKQ03699JBZqGQFVb3JQ", "dx-id": "file-FBB8GK001838fKvY2qK8K5q6"}',
"quality_metrics": [ ],
"assembly": "GRCh38",
"status": "released",
"dataset": "/experiments/ENCSR486GER/",
"accession": "ENCFF111DMJ",
"step_run": {
"schema_version": "4",
"@id": "/analysis-step-runs/8a136540-234d-4a87-b481-8c238bd559a5/",
"dx_applet_details": [{'dx_job_id': 'dnanexus:job-321'}],
"aliases": [],
"analysis_step_version": "/analysis-step-versions/histone-unreplicated-partition-concordance-step-v-1-0/",
"status": "released",
"@type": [],
"date_created": "2018-03-01T20:03:52.764975+00:00",
"uuid": "8a136540-234d-4a87-b481-8c238bd559a5",
"submitted_by": "/users/7e95dcd6-9c35-4082-9c53-09d14c5752be/"
},
"analysis_step_version": {
"name": "histone-unreplicated-partition-concordance-step-v-1-0"
}
}
]
}
@pytest.fixture
def experiment_query():
return {
"@graph": [
{
"biosample_term_name": "HepG2",
"accession": "ENCSR656SIB",
"replication_type": "isogenic",
"@id": "/experiments/ENCSR656SIB/",
"award": {
"rfa": "ENCODE3"
},
"status": "released",
"target": {
"name": "FLAG-ZBED5-human"
},
"@type": [
"Experiment",
"Dataset",
"Item"
],
"lab": {
"name": "richard-myers"
},
"biosample_type": "cell line"
},
{
"biosample_term_name": "Parathyroid adenoma",
"accession": "ENCSR486GER",
"replication_type": "unreplicated",
"@id": "/experiments/ENCSR486GER/",
"award": {
"rfa": "ENCODE3"
},
"status": "released",
"target": {
"name": "H3K4me1-human"
},
"@type": [
"Experiment",
"Dataset",
"Item"
],
"lab": {
"name": "bradley-bernstein"
},
"biosample_type": "tissue"
}
]
}
@pytest.fixture
def references_query():
return {
"@graph": [
{
"description": "profile C1_1 ERCC spike-in concentrations used for C1 fluidigm",
"@id": "/references/ENCSR535LMC/"
}
]
}
@pytest.fixture
def spikeins_used():
return {
"@id": "/experiments/ENCSR974RYS/",
"replicates": [
{
"library": {
"spikeins_used": [
"/references/ENCSR535LMC/"
]
}
}
]
}
@pytest.fixture
def dx_describe():
return {
'analysis': 'analysis-123456',
'project': 'project-123',
'output': {'frip': 123}
}
@pytest.fixture
def test_args():
import argparse
args = argparse.Namespace()
args.__dict__.update({
'assembly': 'GRCh38',
'report_type': 'histone_qc',
'output_type': 'google_sheets',
'skip_dnanexus': True
})
return args
@pytest.fixture
def test_df():
import pandas as pd
values = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]
return pd.DataFrame(values)
@pytest.fixture
def test_rna_mapping_df():
import pandas as pd
values = [
{
'star_uniquely_mapped_reads_number': 1,
'star_number_of_reads_mapped_to_multiple_loci': 2
},
{
'star_uniquely_mapped_reads_number': 3,
'star_number_of_reads_mapped_to_multiple_loci': 4
}
]
return pd.DataFrame(values)
| |
from random import randint
def simple_patch():
"""Return a one-oscillator patch."""
patch = ESQ1Patch()
patch.oscillators[0].dca_enable.set_maximum()
patch.oscillators[0].dca_level.set_maximum()
patch.miscellaneous.frequency.set_maximum()
patch.miscellaneous.dca4_modulation_amount.set_maximum()
patch.envelopes[3].levels[0].set_maximum()
patch.envelopes[3].levels[2].set_maximum()
return patch
class Parameter(object):
"""A patch parameter.
Prevents the user from specifying an out-of-range value and allows the
parameter to be set to its minimum, maximum, default, or random value.
Attributes:
minimum -- the minimum value. Must be <= maximum.
maximum -- the maximum value. Must be >= minimum.
default -- the default value (if None, set to minimum). Must be >= minimum
and <= maximum.
value -- the current value.
"""
def __init__(self, minimum, maximum, default=None):
if minimum > maximum:
raise ValueError('Minimum (%d) must be less than maximum (%d).' %
(minimum, maximum))
self.minimum = minimum
self.maximum = maximum
if default is not None:
self.default = default
else:
self.default = self.minimum
self.reset()
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __repr__(self):
return str(self.value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
"""Set the value property.
value -- the new value. Must be >= minimum and <= maximum.
"""
if value < self.minimum:
raise ValueError('Value (%d) is less than minimum (%d).' %
(value, self.minimum))
elif value > self.maximum:
raise ValueError('Value (%d) is more than maximum (%d).' %
(value, self.maximum))
else:
self._value = value
def randomize(self):
"""Randomize the value property.
Sets the value to a random number >= minimum and <= maximum.
"""
self.value = randint(self.minimum, self.maximum)
def reset(self):
"""Reset the value property to its default value."""
self.value = self.default
def set_minimum(self):
self.value = self.minimum
def set_maximum(self):
self.value = self.maximum
class ModulationSource(Parameter):
"""A parameter representing a modulation source.
The value must be one of the class properties specified below.
Defaults to OFF.
"""
LFO_1 = 0
LFO_2 = 1
LFO_3 = 2
ENV_1 = 3
ENV_2 = 4
ENV_3 = 5
ENV_4 = 6
VEL = 7 # linear keyboard velocity.
VEL_2 = 8 # non-linear keyboard velocity.
KYBD = 9 # position of note on keyboard (partial range, 0 to 127).
KYBD_2 = 10 # position of note on keyboard, -63 to 63.
WHEEL = 11
PEDAL = 12
XCTRL = 13
PRESS = 14 # aftertouch (not sent by ESQ-1!).
OFF = 15
def __init__(self):
super(ModulationSource, self).__init__(self.LFO_1, self.OFF, self.OFF)
class ModulationAmount(Parameter):
"""A parameter representing a modulation amount."""
def __init__(self):
super(ModulationAmount, self).__init__(-63, 63, 0)
class Boolean(Parameter):
"""A parameter representing a boolean."""
def __init__(self):
super(Boolean, self).__init__(False, True)
def display_to_pcb(value):
"""Convert the value as shown on the ESQ-1's display (-63 to 63) to the
value stored in the bytearray (65 to 127, 0 to 63).
"""
if value < -63:
raise ValueError('Display value (%d) must be >= -63.' % value)
elif value < 0:
return 128 + value
elif value <= 63:
return value
else:
raise ValueError('Display value (%d) must be <= -63.' % value)
def pcb_to_display(value):
"""Convert the value as stored in the PCB (65 to 127, 0 to 63) to the
value shown on the ESQ-1's display (-63 to 63).
"""
if value < 0:
raise ValueError('PCB value (%d) must be >= 0.' % value)
elif value < 64:
return value
if value == 64:
raise ValueError('PCB value must not be 64.')
elif value <= 127:
return -128 + value
else:
raise ValueError('PCB value (%d) must be <= 127.' % value)
class ParameterCollection(object):
"""A collection of parameters, grouped for easy randomization
and comparison.
A comparison between two instances will compare each instance's parameters
when determining if they are equal.
"""
def randomize(self):
"""Walk through each attribute and randomize."""
for attribute in self.__dict__:
attr = getattr(self, attribute)
if getattr(attr, "randomize", None):
attr.randomize()
elif type(attr) is list:
for item in attr:
item.randomize()
def __eq__(self, other):
"""Comparison operator for one == two"""
for attribute in self.__dict__:
if getattr(self, attribute) != getattr(other, attribute):
return False
return True
def __ne__(self, other):
"""Comparison operator for one != two"""
for attribute in self.__dict__:
if getattr(self, attribute) != getattr(other, attribute):
return True
return False
class Envelope(ParameterCollection):
"""The parameters for one envelope. There are four envelopes in each ESQ-1
patch.
Attributes:
levels -- a list of each level in the envelope. Three in total.
times -- a list of each time in the envelope. Four in total.
velocity_level -- the amount by which the envelope's levels will be lowered
by the velocity of a note.
velocity_attack_control -- the amount by which the envelope's time[0] is
decreased by the velocity of a note.
keyboard_decay_scaling -- the amount by which the envelope's time[1] and
time[2] are decreased by the height of a note.
"""
def __init__(self):
self.levels = [ModulationAmount() for i in range(3)]
self.times = [Parameter(0, 63) for i in range(4)]
self.velocity_level = Parameter(0, 63)
self.velocity_attack_control = Parameter(0, 63)
self.keyboard_decay_scaling = Parameter(0, 63)
def serialize(self):
"""Serialize the class's attributes into a bytearray."""
bytes = bytearray()
for level in self.levels:
bytes.append(display_to_pcb(level.value) << 1)
for time in self.times:
bytes.append(time.value)
bytes.append(self.velocity_level.value << 2)
bytes.append(self.velocity_attack_control.value)
bytes.append(self.keyboard_decay_scaling.value)
return bytes
def deserialize(self, bytes):
"""Deserialize the bytearray into the class's attributes."""
for level in self.levels:
level.value = pcb_to_display(next(bytes) >> 1)
for time in self.times:
time.value = next(bytes)
self.velocity_level.value = next(bytes) >> 2
self.velocity_attack_control.value = next(bytes)
self.keyboard_decay_scaling.value = next(bytes)
class LFO(ParameterCollection):
"""The parameters for one LFO. There are three LFOs in each ESQ-1 patch.
Attributes:
levels -- a list of each level in the LFO. Two in total.
levels[0] is the level when the key is struck.
levels[1] is the level reached at the end of the ramp defined by the
delay (see below).
frequency -- the frequency of the LFO.
reset -- enable/disable whether the LFO returns to the beginning of its
cycle each time a new key is struck.
human -- enable/disable random element being introduced to the frequency.
waveform -- either TRIANGLE, SAW, SQR, or NOISE.
delay -- determines the rate at which the LFO's amplitude will go from
level[0] to level[1]. This is a rate of change, not a fixed time.
A value of 0 causes the LFO to remain at level[0].
modulation_source -- modulation source for LFO depth.
"""
TRIANGLE = 0
SAW = 1
SQR = 2
NOISE = 3
def __init__(self):
self.levels = [Parameter(0, 63), Parameter(0, 63)]
self.frequency = Parameter(0, 63)
self.reset = Boolean()
self.humanize = Boolean()
self.waveform = Parameter(self.TRIANGLE, self.NOISE)
self.delay = Parameter(0, 63)
self.modulation_source = ModulationSource()
def serialize(self):
"""Serialize the class's attributes into a bytearray."""
bytes = bytearray()
waveform = (self.waveform.value & 0b00000011) << 6
modulation_source_0 = (self.modulation_source.value & 0b00000011) << 6
modulation_source_1 = (self.modulation_source.value & 0b00001100) << 4
reset = (self.reset.value & 0b00000001) << 7
humanize = (self.humanize.value & 0b00000001) << 6
bytes.append(waveform + self.frequency.value)
bytes.append(modulation_source_1 + self.levels[0].value)
bytes.append(modulation_source_0 + self.levels[1].value)
bytes.append(reset + humanize + self.delay.value)
return bytes
def deserialize(self, bytes):
"""Deserialize the bytearray into the class's attributes."""
byte = next(bytes)
self.waveform.value = (byte & 0b11000000) >> 6
self.frequency.value = byte & 0b00111111
byte = next(bytes)
modulation_source_0 = (byte & 0b11000000) >> 6
self.levels[0].value = byte & 0b00111111
byte = next(bytes)
modulation_source_1 = (byte & 0b11000000) >> 6
self.levels[1].value = byte & 0b00111111
self.modulation_source.value = modulation_source_1 +\
(modulation_source_0 << 2)
byte = next(bytes)
self.reset.value = (byte & 0b10000000) >> 7
self.humanize.value = (byte & 0b01000000) >> 6
self.delay.value = byte & 0b00111111
class Oscillator(ParameterCollection):
"""The parameters for one oscillator. There are three oscillators in each
ESQ-1 patch.
Attributes:
semitone -- the semitone. Can also be modified using set_octave().
fine_tune -- the finetune/detune.
frequency_modulation_sources -- a list containing the two frequency
modulation sources.
frequency_modulation_amounts -- a list containing the two frequency
modulation amounts.
waveform -- the waveform of the oscillator. SAW, BELL, etc. See class
properties below.
dca_enable -- enable/disable this oscillator.
dca_level -- the amplitude level of this oscillator.
dca_modulation_sources -- a list containing the two amplitude modulation
sources.
dca_modulation_amounts -- a list containing the two amplitude modulation
amounts.
"""
SAW = 0
BELL = 1
SINE = 2
SQUARE = 3
PULSE = 4
NOISE_1 = 5
NOISE_2 = 6
NOISE_3 = 7
BASS = 8
PIANO = 9
EL_PNO = 10
VOICE_1 = 11
VOICE_2 = 12
VOICE_3 = 13
KICK = 14
REED = 15
ORGAN = 16
SYNTH_1 = 17
SYNTH_2 = 18
SYNTH_3 = 19
FORMT_1 = 20
FORMT_2 = 21
FORMT_3 = 22
FORMT_4 = 23
FORMT_5 = 24
PULSE2 = 25
SQR_2 = 26
FOUR_OCTS = 27
PRIME = 28
BASS_2 = 29
E_PNO2 = 30
OCTAVE = 31
OCT_5 = 32
def __init__(self):
self.semitone = Parameter(0, 96, 36)
self.fine_tune = Parameter(0, 31)
self.frequency_modulation_sources = [ModulationSource(),
ModulationSource()]
self.frequency_modulation_amounts = [ModulationAmount(),
ModulationAmount()]
self.waveform = Parameter(self.SAW, self.OCT_5)
self.dca_enable = Boolean()
self.dca_level = Parameter(0, 63)
self.dca_modulation_sources = [ModulationSource(), ModulationSource()]
self.dca_modulation_amounts = [ModulationAmount(), ModulationAmount()]
def set_octave(self, value):
"""Set the semitone value using an octave value (-3 to 5)."""
self.semitone.value = (value + 3) * 12
def serialize(self):
"""Serialize the class's attributes into a bytearray."""
bytes = bytearray()
fine_tune = self.fine_tune.value << 3
frequency_modulation_sources =\
(self.frequency_modulation_sources[1].value << 4) +\
self.frequency_modulation_sources[0].value
frequency_modulation_amounts = [
display_to_pcb(self.frequency_modulation_amounts[0].value) << 1,
display_to_pcb(self.frequency_modulation_amounts[1].value) << 1
]
dca_enable = (self.dca_enable.value & 0b00000001) << 7
dca_level = self.dca_level.value << 1
dca_modulation_sources =\
(self.dca_modulation_sources[1].value << 4) +\
self.dca_modulation_sources[0].value
dca_modulation_amounts = [
display_to_pcb(self.dca_modulation_amounts[0].value) << 1,
display_to_pcb(self.dca_modulation_amounts[1].value) << 1
]
bytes.append(self.semitone.value)
bytes.append(fine_tune)
bytes.append(frequency_modulation_sources)
bytes.append(frequency_modulation_amounts[0])
bytes.append(frequency_modulation_amounts[1])
bytes.append(self.waveform.value)
bytes.append(dca_enable + dca_level)
bytes.append(dca_modulation_sources)
bytes.append(dca_modulation_amounts[0])
bytes.append(dca_modulation_amounts[1])
return bytes
def deserialize(self, bytes):
"""Deserialize the bytearray into the class's attributes."""
self.semitone.value = next(bytes)
self.fine_tune.value = next(bytes) >> 3
byte = next(bytes)
self.frequency_modulation_sources[1].value = byte >> 4
self.frequency_modulation_sources[0].value = byte & 0b00001111
self.frequency_modulation_amounts[0].value = pcb_to_display(
next(bytes) >> 1)
self.frequency_modulation_amounts[1].value = pcb_to_display(
next(bytes) >> 1)
self.waveform.value = next(bytes)
byte = next(bytes)
self.dca_enable.value = (byte & 0b10000000) >> 7
self.dca_level.value = (byte & 0b01111111) >> 1
byte = next(bytes)
self.dca_modulation_sources[1].value = byte >> 4
self.dca_modulation_sources[0].value = byte & 0b00001111
self.dca_modulation_amounts[0].value = pcb_to_display(
next(bytes) >> 1)
self.dca_modulation_amounts[1].value = pcb_to_display(
next(bytes) >> 1)
class Miscellaneous(ParameterCollection):
"""The miscellaneous section of the ESQ-1 patch.
Attributes:
sync -- enable/disable syncing of phase between oscillator 2 and
oscillator 1; when oscillator 1 finishes one complete cycle of its
waveform and begins another, oscillator 2 will reset too.
am -- enable/disable amplitude of oscillator 1 modulating amplitude of
oscillator 2.
mono -- enable/disable monophonic mode.
glide -- set glide/portamento.
reset_voice -- enable/disable 'voice stealing' when same note is played
twice.
reset_envelope -- when True, all envelopes will restart when a key is
re-struck. When False, each envelope will start its cycle at the present
level.
reset_oscillator -- enable/disable the oscillators restarting when a key
is struck.
cycle -- when True, all envelopes are run through their full cycles,
ignoring the key being released.
pan -- pan.
pan_modulation_source -- pan modulation source.
pan_modulation_amount -- pan modulation amount.
dca4_modulation_amount -- the amount dca 4 is modulated by envelope 4.
frequency -- filter cut-off frequency.
resonance -- filter resonance.
filter_modulation_sources -- a list containing the two
filter modulation sources.
filter_modulation_amounts -- a list containing the two
filter modulation amounts.
filter_keyboard_tracking -- the amount the location a note on the keyboard
modulates the the filter cut-off frequency.
The following attributes deal with splitting/layering programs:
split_direction
split_point
layer_flag
layer_program
split_flag
split_program
split_layer_flag
split_layer_program
"""
def __init__(self):
self.sync = Boolean()
self.am = Boolean()
self.mono = Boolean()
self.glide = Parameter(0, 63)
self.reset_voice = Boolean()
self.reset_envelope = Boolean()
self.reset_oscillator = Boolean()
self.cycle = Boolean()
self.pan = Parameter(0, 15, 8)
self.pan_modulation_source = ModulationSource()
self.pan_modulation_amount = ModulationAmount()
self.dca4_modulation_amount = Parameter(0, 63)
self.frequency = Parameter(0, 127)
self.resonance = Parameter(0, 31)
self.filter_modulation_sources = [ModulationSource(),
ModulationSource()]
self.filter_modulation_amount = [ModulationAmount(),
ModulationAmount()]
self.filter_keyboard_tracking = Parameter(0, 63)
self.split_direction = Boolean()
self.split_point = Parameter(0, 108)
self.layer_flag = Boolean()
self.layer_program = Parameter(0, 39)
self.split_flag = Boolean()
self.split_program = Parameter(0, 39)
self.split_layer_flag = Boolean()
self.split_layer_program = Parameter(0, 39)
def serialize(self):
"""Serialize the class's attributes into a bytearray."""
bytes = bytearray()
am = (self.am.value & 0b00000001) << 7
dca4_modulation_amount = self.dca4_modulation_amount.value << 1
sync = (self.sync.value & 0b00000001) << 7
filter_modulation_sources = [
self.filter_modulation_sources[0].value,
self.filter_modulation_sources[1].value << 4
]
reset_voice = (self.reset_voice.value & 0b00000001) << 7
mono = (self.mono.value & 0x01) << 7
reset_envelope = (self.reset_envelope.value & 0b00000001) << 7
filter_keyboard_tracking = self.filter_keyboard_tracking.value << 1
reset_oscillator = (self.reset_oscillator.value & 0b00000001) << 7
split_direction = (self.split_direction.value & 0b00000001) << 7
layer_flag = (self.layer_flag.value & 0b00000001) << 7
split_flag = (self.split_flag.value & 0b00000001) << 7
split_layer_flag = (self.split_layer_flag.value & 0b00000001) << 7
pan = self.pan.value << 4
cycle = (self.cycle.value & 0b00000001) << 7
bytes.append(am + dca4_modulation_amount)
bytes.append(sync + self.frequency.value)
bytes.append(self.resonance.value)
bytes.append(filter_modulation_sources[0] +
filter_modulation_sources[1])
bytes.append(reset_voice + display_to_pcb(
self.filter_modulation_amount[0].value))
bytes.append(mono + display_to_pcb(
self.filter_modulation_amount[1].value))
bytes.append(reset_envelope + filter_keyboard_tracking)
bytes.append(reset_oscillator + self.glide.value)
bytes.append(split_direction + self.split_point.value)
bytes.append(layer_flag + self.layer_program.value)
bytes.append(split_flag + self.split_program.value)
bytes.append(split_layer_flag + self.split_layer_program.value)
bytes.append(pan + self.pan_modulation_source.value)
bytes.append(cycle +
display_to_pcb(self.pan_modulation_amount.value))
return bytes
def deserialize(self, bytes):
"""Deserialize the bytearray into the class's attributes."""
byte = next(bytes)
self.am.value = (byte & 0b10000000) >> 7
self.dca4_modulation_amount.value = (byte & 0b01111111) >> 1
byte = next(bytes)
self.sync.value = (byte & 0b10000000) >> 7
self.frequency.value = byte & 0b01111111
self.resonance.value = next(bytes)
byte = next(bytes)
self.filter_modulation_sources[0].value = byte & 0b00001111
self.filter_modulation_sources[1].value = byte >> 4
byte = next(bytes)
self.reset_voice.value = (byte & 0b10000000) >> 7
self.filter_modulation_amount[0].value = pcb_to_display(
byte & 0b01111111)
byte = next(bytes)
self.mono.value = (byte & 0b10000000) >> 7
self.filter_modulation_amount[1].value = pcb_to_display(
byte & 0b01111111)
byte = next(bytes)
self.reset_envelope.value = (byte & 0b10000000) >> 7
self.filter_keyboard_tracking.value = (byte & 0b01111111) >> 1
byte = next(bytes)
self.reset_oscillator.value = (byte & 0b10000000) >> 7
self.glide.value = byte & 0b01111111
byte = next(bytes)
self.split_direction.value = (byte & 0b10000000) >> 7
self.split_point.value = byte & 0b01111111
byte = next(bytes)
self.layer_flag.value = (byte & 0b10000000) >> 7
self.layer_program.value = byte & 0b01111111
byte = next(bytes)
self.split_flag.value = (byte & 0b10000000) >> 7
self.split_program.value = byte & 0b01111111
byte = next(bytes)
self.split_layer_flag.value = (byte & 0b10000000) >> 7
self.split_layer_program.value = byte & 0b01111111
byte = next(bytes)
self.pan.value = (byte & 0b11110000) >> 4
self.pan_modulation_source.value = byte & 0b00001111
byte = next(bytes)
self.cycle.value = (byte & 0b10000000) >> 7
self.pan_modulation_amount.value = pcb_to_display(byte & 0b01111111)
class ESQ1Patch(ParameterCollection):
"""The entire ESQ-1 patch.
Attributes:
name -- the name of the patch.
envelopes -- a list of four Envelope instances.
lfos -- a list of three LFO instances.
oscillators -- a list of three Oscillator instances.
miscellaneous -- a Miscellaneous instance.
"""
NAME_LENGTH = 6 # name must be 6 characters long.
def __init__(self):
self.name = ' '
self.envelopes = [Envelope() for i in range(4)]
self.lfos = [LFO() for i in range(3)]
self.oscillators = [Oscillator() for i in range(3)]
self.miscellaneous = Miscellaneous()
def clean_name(self):
"""Ensure the patch name is six characters long and uppercase.
Return a bytearray.
"""
if len(self.name) >= self.NAME_LENGTH:
name_cleaned = self.name[:self.NAME_LENGTH]
else:
name_cleaned = self.name.ljust(self.NAME_LENGTH)
return bytearray([ord(c) for c in name_cleaned.upper()])
def serialize(self):
"""Serialize the class's attributes into a bytearray."""
bytes = self.clean_name()
for envelope in self.envelopes:
bytes += envelope.serialize()
for lfo in self.lfos:
bytes += lfo.serialize()
for oscillator in self.oscillators:
bytes += oscillator.serialize()
bytes += self.miscellaneous.serialize()
return bytes
def deserialize(self, bytes):
"""Deserialize the bytearray into the class's attributes."""
name = [chr(next(bytes)) for i in range(self.NAME_LENGTH)]
self.name = "".join(name)
for envelope in self.envelopes:
envelope.deserialize(bytes)
for lfo in self.lfos:
lfo.deserialize(bytes)
for oscillator in self.oscillators:
oscillator.deserialize(bytes)
self.miscellaneous.deserialize(bytes)
def sysex_to_esq1_patches(filename):
"""Read a SYSEX file and return a list of patches.
If the SYSEX file is in the 'single program dump' format, the list will
contain one patch. If the SYSEX file is in the 'all program dump' format,
the list will contain 40 patches.
"""
with open(filename, 'rb') as sysex_file:
sysex = iter(bytearray(sysex_file.read()))
# SYSEX, Ensoniq ID, ESQ-1 ID.
assert next(sysex) == 0xF0
assert next(sysex) == 0x0F
assert next(sysex) == 0x02
# the channel is not used.
channel = next(sysex)
dump_type = next(sysex)
if dump_type == 0x01:
# single program format.
patch_count = 1
elif dump_type == 0x02:
# all program dump.
patch_count = 40
else:
raise ValueError('Invalid dump type - %s' % dump_type)
patches = []
# use a generator to combine the two bytes into one.
def _unpacker(sysex):
while True:
low = next(sysex)
high = next(sysex)
yield low + (high << 4)
unpacker = _unpacker(sysex)
# create a patch and unpack the bytes into it.
for i in range(patch_count):
patch = ESQ1Patch()
patches.append(patch)
patch.deserialize(unpacker)
# ensure the end of the SYSEX file has been reached.
assert next(sysex) == 0xF7
return patches
def esq1_patches_to_sysex(patches, filename, channel=0):
"""Write a list of patches to the specified filename as a SYSEX file.
If list contains one patch, the SYSEX file will be in the 'single program
dump' format.
Otherwise it will be in the 'all program dump' format. If the list contains
fewer than 40 patches, it will be padded with blank patches. If the list
contains more than 40 patches, only the first 40 will be saved.
"""
# SYSEX, Ensoniq ID, ESQ-1 ID, channel.
result = bytearray([0xF0, 0x0F, 0x02, channel])
# create copy of patches in local scope.
patches = list(patches)
if len(patches) == 1:
# single program dump.
result.append(0x01)
number_of_patches = 1
elif len(patches) > 1:
# all program dump.
result.append(0x02)
number_of_patches = 40
else:
raise ValueError('Must supply at least one patch.')
# add blank patches, if necessary.
patches += [ESQ1Patch() for i in range(number_of_patches - len(patches))]
# if there are more than 40 patches, ignore them.
for patch in patches[:40]:
for bytes in patch.serialize():
# append last four bits...
result.append(bytes & 0b00001111)
# ...then first four bits.
result.append(bytes >> 4)
# end of SYSEX.
result.append(0xF7)
with open(filename, 'wb') as output_file:
output_file.write(result)
| |
from copy import copy
__author__ = 'Anthony'
import numpy as np
import cv2
import cv
from scipy.cluster.hierarchy import fclusterdata
from scipy.spatial.distance import pdist, squareform
from hungarian import linear_assignment
show_sub_img = False
show_raw_img = False
show_cluster_img = True
show_kalman_img = True
sub_window = "No background"
cap = cv2.VideoCapture("overpass.mp4")
fourcc = cv2.cv.CV_FOURCC('P', 'I', 'M', '1')
diff_out = cv2.VideoWriter("overpass_diff.avi", fourcc, 30, (1920, 1080), isColor=False)
cluster_out = cv2.VideoWriter("overpass_cluster.avi", fourcc, 30, (1920, 1080))
kalman_out = cv2.VideoWriter("overpass_kalman.avi", fourcc, 30, (1920, 1080))
if show_cluster_img:
cluster_window = "Clusters"
def frame_diff(old, new):
diff_frame = cv2.absdiff(cv2.cvtColor(old, cv2.COLOR_BGR2GRAY), cv2.cvtColor(new, cv2.COLOR_BGR2GRAY))
#kernel = np.ones((3,3),np.uint8)
#new = cv2.cvtColor(new, cv2.COLOR_BGR2GRAY)
#blur_frame = new #cv2.morphologyEx(new, cv2.MORPH_OPEN, kernel)
if show_sub_img:
cv2.imshow(sub_window, diff_frame)
diff_out.write(diff_frame)
return diff_frame
def make_2d_kalman(x, y):
kalman = cv.CreateKalman(4, 2, 0)
kalman_state = cv.CreateMat(4, 1, cv.CV_32FC1)
kalman_process_noise = cv.CreateMat(4, 1, cv.CV_32FC1)
kalman_measurement = cv.CreateMat(2, 1, cv.CV_32FC1)
# set previous state for prediction
kalman.state_pre[0, 0] = x
kalman.state_pre[1, 0] = y
kalman.state_pre[2, 0] = 0
kalman.state_pre[3, 0] = 0
# set kalman transition matrix
kalman.transition_matrix[0, 0] = 1
kalman.transition_matrix[0, 1] = 0
kalman.transition_matrix[0, 2] = .5
kalman.transition_matrix[0, 3] = 0
kalman.transition_matrix[1, 0] = 0
kalman.transition_matrix[1, 1] = 1
kalman.transition_matrix[1, 2] = 0
kalman.transition_matrix[1, 3] = .5
kalman.transition_matrix[2, 0] = 0
kalman.transition_matrix[2, 1] = 0
kalman.transition_matrix[2, 2] = 0
kalman.transition_matrix[2, 3] = 1
kalman.transition_matrix[3, 0] = 0
kalman.transition_matrix[3, 1] = 0
kalman.transition_matrix[3, 2] = 0
kalman.transition_matrix[3, 3] = 1
# set Kalman Filter
cv.SetIdentity(kalman.measurement_matrix, cv.RealScalar(1))
cv.SetIdentity(kalman.process_noise_cov, cv.RealScalar(.01))
cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(.01))
cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1))
return kalman, kalman_measurement, kalman_state, kalman_process_noise
#
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=500,
qualityLevel=.5,
minDistance=10,)
#blockSize=7)
# params for subpix corner refinement.
subpix_params = dict(zeroZone=(-1,-1),winSize=(10,10),
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,20,0.03))
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0, 255, (100, 3))
#
count = 0
tracks = []
features = []
kalmans = []
ret, raw_frame = cap.read()
ret, raw_frame2 = cap.read()
frame = frame_diff(raw_frame, raw_frame2)
while True:
cv2.imshow("raw_video", raw_frame)
ret, raw_frame2 = cap.read()
old_frame = frame
frame = frame_diff(raw_frame, raw_frame2)
raw_frame = raw_frame2
if True:#features is None or len(features) <= 2:
features = cv2.goodFeaturesToTrack(frame, **feature_params)
if features is None:
continue
if features is not None and len(features) > 3:
cv2.cornerSubPix(frame, features, **subpix_params)
tracks = [[p] for p in features.reshape((-1,2))] # reshape features into pairs.
#else:
tmp = np.float32(features).reshape(-1, 1, 2)
# calculate optical flow
new_features, lk_status, lk_error = cv2.calcOpticalFlowPyrLK(old_frame,
frame,
tmp,
None,
**lk_params)
# remove points that are "lost"
features = [point[0] for (status, point) in zip(lk_status, new_features) if status]
new_features = np.array(new_features).reshape((-1, 2))
if not len(features) > 2:
continue
cluster_assignments = fclusterdata(features, 50, criterion='distance')
if show_cluster_img:
cluster_frame = copy(raw_frame2)
for assignment, feature in zip(cluster_assignments, features):
if assignment < len(color):
cv2.circle(cluster_frame, (int(feature[0]),
int(feature[1])), 5, color[assignment], 10)
#cv2.imshow('Clusters', cluster_frame)
cluster_out.write(cluster_frame)
clusters = []
for i in range(max(cluster_assignments)):
clusters.append([])
for assignment, data in zip(cluster_assignments, features):
clusters[assignment-1].append(data)
large_clusters = [cluster for cluster in clusters if len(cluster) > 1]
cluster_means = []
for cluster in large_clusters:
mean = np.mean(cluster, axis=0)
cluster_means.append(mean)
if not kalmans: # if we aren't tracking any cars, see if there are any cars to track
kalmans = [make_2d_kalman(point[0], point[1]) for point in cluster_means]
lost = [0] * len(kalmans)
# kalman predict
predictions = [cv.KalmanPredict(kalman[0]) for kalman in kalmans]
estimates = [(prediction[0, 0], prediction[1, 0]) for prediction in predictions]
# perform linear assignment
if estimates:
dist = pdist(cluster_means + estimates)
points_found = len(cluster_means)
#dist = pdist([[1,1], [1.2,1.2], [3,3], [25,25], [24,26],[1.25,1.25], [1.3,1.3]])
square_dist = squareform(dist)
chopped = square_dist[:points_found, points_found:] #
assignments = linear_assignment(chopped) # we now have a list of pairs for each point.
#print assignments
new = range(points_found)
successfully_tracked = []
for assignment in assignments:
new.remove(assignment[0])
if square_dist[assignment[0], assignment[1]] < 50:
successfully_tracked.append(assignment)
else:
lost[assignment[1]] += 1
else:
assignments = np.ndarray([])
if assignments.size == 0:
lost = [l+1 for l in lost]
#next loops estimates
# kalman measurement updates
states = []
for assignment in successfully_tracked: # measurement update
x = cluster_means[assignment[0]][0]
y = cluster_means[assignment[0]][1]
assigned_kalman = kalmans[assignment[1]]
assigned_kalman[1][0, 0] = x
assigned_kalman[1][1, 0] = y
corrected = cv.KalmanCorrect(assigned_kalman[0], assigned_kalman[1])
states.append((corrected[0, 0], corrected[1, 0]))
lost[assignment[1]] = 0
if estimates:
for new_point in new:
new_filter = make_2d_kalman(*cluster_means[new_point])
prediction = cv.KalmanPredict(kalman[0])
estimates.append((prediction[0, 0], prediction[1, 0]))
kalmans.append(new_filter)
lost.append(0)
remove_idxs = []
#print lost
for idx, lost_count in enumerate(lost):
if lost_count > 6:
remove_idxs.append(idx)
for idx in remove_idxs[::-1]:
lost.pop(idx)
kalmans.pop(idx)
kal_idx = 0
#print estimates
if show_kalman_img:
kalman_img = copy(raw_frame2)
for point in estimates:
point = int(point[0]), int(point[1])
cv2.circle(kalman_img, point, 6, (255, 0, 0),3)
#cv2.imshow("Kalman Centers", kalman_img)
kalman_out.write(kalman_img)
k = cv2.waitKey(30)
if k == 27:
break
diff_out.release()
cluster_out.release()
kalman_out.release()
print "goodbye"
| |
# encoding: utf-8
"""
Step implementations for document-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.shape import InlineShapes
from docx.shared import Inches
from docx.section import Sections
from docx.styles.styles import Styles
from docx.table import Table
from docx.text.paragraph import Paragraph
from helpers import test_docx, test_file
# given ===================================================
@given('a blank document')
def given_a_blank_document(context):
context.document = Document(test_docx('doc-word-default-blank'))
@given('a document having built-in styles')
def given_a_document_having_builtin_styles(context):
context.document = Document()
@given('a document having inline shapes')
def given_a_document_having_inline_shapes(context):
context.document = Document(test_docx('shp-inline-shape-access'))
@given('a document having sections')
def given_a_document_having_sections(context):
context.document = Document(test_docx('doc-access-sections'))
@given('a document having styles')
def given_a_document_having_styles(context):
context.document = Document(test_docx('sty-having-styles-part'))
@given('a document having three tables')
def given_a_document_having_three_tables(context):
context.document = Document(test_docx('tbl-having-tables'))
@given('a single-section document having portrait layout')
def given_a_single_section_document_having_portrait_layout(context):
context.document = Document(test_docx('doc-add-section'))
section = context.document.sections[-1]
context.original_dimensions = (section.page_width, section.page_height)
@given("a single-section Document object with headers and footers as document")
def given_a_single_section_Document_object_with_headers_and_footers(context):
context.document = Document(test_docx("doc-add-section"))
# when ====================================================
@when('I add a 2 x 2 table specifying only row and column count')
def when_add_2x2_table_specifying_only_row_and_col_count(context):
document = context.document
document.add_table(rows=2, cols=2)
@when('I add a 2 x 2 table specifying style \'{style_name}\'')
def when_add_2x2_table_specifying_style_name(context, style_name):
document = context.document
document.add_table(rows=2, cols=2, style=style_name)
@when('I add a heading specifying level={level}')
def when_add_heading_specifying_level(context, level):
context.document.add_heading(level=int(level))
@when('I add a heading specifying only its text')
def when_add_heading_specifying_only_its_text(context):
document = context.document
context.heading_text = text = 'Spam vs. Eggs'
document.add_heading(text)
@when('I add a page break to the document')
def when_add_page_break_to_document(context):
document = context.document
document.add_page_break()
@when('I add a paragraph specifying its style as a {kind}')
def when_I_add_a_paragraph_specifying_its_style_as_a(context, kind):
document = context.document
style = context.style = document.styles['Heading 1']
style_spec = {
'style object': style,
'style name': 'Heading 1',
}[kind]
document.add_paragraph(style=style_spec)
@when('I add a paragraph specifying its text')
def when_add_paragraph_specifying_text(context):
document = context.document
context.paragraph_text = 'foobar'
document.add_paragraph(context.paragraph_text)
@when('I add a paragraph without specifying text or style')
def when_add_paragraph_without_specifying_text_or_style(context):
document = context.document
document.add_paragraph()
@when('I add a picture specifying 1.75" width and 2.5" height')
def when_add_picture_specifying_width_and_height(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'),
width=Inches(1.75), height=Inches(2.5)
)
@when('I add a picture specifying a height of 1.5 inches')
def when_add_picture_specifying_height(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'), height=Inches(1.5)
)
@when('I add a picture specifying a width of 1.5 inches')
def when_add_picture_specifying_width(context):
document = context.document
context.picture = document.add_picture(
test_file('monty-truth.png'), width=Inches(1.5)
)
@when('I add a picture specifying only the image file')
def when_add_picture_specifying_only_image_file(context):
document = context.document
context.picture = document.add_picture(test_file('monty-truth.png'))
@when('I add an even-page section to the document')
def when_I_add_an_even_page_section_to_the_document(context):
context.section = context.document.add_section(WD_SECTION.EVEN_PAGE)
@when('I change the new section layout to landscape')
def when_I_change_the_new_section_layout_to_landscape(context):
new_height, new_width = context.original_dimensions
section = context.section
section.orientation = WD_ORIENT.LANDSCAPE
section.page_width = new_width
section.page_height = new_height
@when("I execute section = document.add_section()")
def when_I_execute_section_eq_document_add_section(context):
context.section = context.document.add_section()
# then ====================================================
@then('document.inline_shapes is an InlineShapes object')
def then_document_inline_shapes_is_an_InlineShapes_object(context):
document = context.document
inline_shapes = document.inline_shapes
assert isinstance(inline_shapes, InlineShapes)
@then('document.paragraphs is a list containing three paragraphs')
def then_document_paragraphs_is_a_list_containing_three_paragraphs(context):
document = context.document
paragraphs = document.paragraphs
assert isinstance(paragraphs, list)
assert len(paragraphs) == 3
for paragraph in paragraphs:
assert isinstance(paragraph, Paragraph)
@then('document.sections is a Sections object')
def then_document_sections_is_a_Sections_object(context):
sections = context.document.sections
msg = 'document.sections not instance of Sections'
assert isinstance(sections, Sections), msg
@then('document.styles is a Styles object')
def then_document_styles_is_a_Styles_object(context):
styles = context.document.styles
assert isinstance(styles, Styles)
@then('document.tables is a list containing three tables')
def then_document_tables_is_a_list_containing_three_tables(context):
document = context.document
tables = document.tables
assert isinstance(tables, list)
assert len(tables) == 3
for table in tables:
assert isinstance(table, Table)
@then('the document contains a 2 x 2 table')
def then_the_document_contains_a_2x2_table(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
assert len(table.rows) == 2
assert len(table.columns) == 2
context.table_ = table
@then('the document has two sections')
def then_the_document_has_two_sections(context):
assert len(context.document.sections) == 2
@then('the first section is portrait')
def then_the_first_section_is_portrait(context):
first_section = context.document.sections[0]
expected_width, expected_height = context.original_dimensions
assert first_section.orientation == WD_ORIENT.PORTRAIT
assert first_section.page_width == expected_width
assert first_section.page_height == expected_height
@then('the last paragraph contains only a page break')
def then_last_paragraph_contains_only_a_page_break(context):
document = context.document
paragraph = document.paragraphs[-1]
assert len(paragraph.runs) == 1
assert len(paragraph.runs[0]._r) == 1
assert paragraph.runs[0]._r[0].type == 'page'
@then('the last paragraph contains the heading text')
def then_last_p_contains_heading_text(context):
document = context.document
text = context.heading_text
paragraph = document.paragraphs[-1]
assert paragraph.text == text
@then('the second section is landscape')
def then_the_second_section_is_landscape(context):
new_section = context.document.sections[-1]
expected_height, expected_width = context.original_dimensions
assert new_section.orientation == WD_ORIENT.LANDSCAPE
assert new_section.page_width == expected_width
assert new_section.page_height == expected_height
@then('the style of the last paragraph is \'{style_name}\'')
def then_the_style_of_the_last_paragraph_is_style(context, style_name):
document = context.document
paragraph = document.paragraphs[-1]
assert paragraph.style.name == style_name, (
'got %s' % paragraph.style.name
)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn)
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
fn_node = node.body[0]
self.assertEqual(visitor.counts[fn_node.args], 1)
self.assertEqual(visitor.counts[fn_node.body[0].test], 1)
self.assertEqual(visitor.counts[fn_node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[fn_node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertTrue(fn_node.body[0].body[2] not in visitor.counts)
self.assertEqual(visitor.counts[fn_node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
fn_node = node.body[0]
self.assertEqual(visitor.counts[fn_node.args], 1)
self.assertEqual(visitor.counts[fn_node.body[0].test], 1)
self.assertEqual(visitor.counts[fn_node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[fn_node.body[0].body[1]], 1)
self.assertTrue(visitor.counts[fn_node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[fn_node.body[1]], 1)
class AstToCfgTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn)
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = b + 1'),
('a = b + 1', 'a += max(a)', None),
),
)
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += -1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
def test_branch_straightline_semi(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
if __name__ == '__main__':
test.main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModelLoader class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from absl.testing import parameterized
from tensorflow.python.client import session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import utils
from tensorflow.python.training import saver as tf_saver
def _get_export_dir(label):
return os.path.join(test.get_temp_dir(), label)
def _tensor_name(name):
if variable_scope.resource_variables_enabled():
return name + "/Read/ReadVariableOp:0"
return name + ":0"
SIMPLE_ADD_SAVED_MODEL = _get_export_dir("simple_add_saved_model")
SAVED_MODEL_WITH_MAIN_OP = _get_export_dir("saved_model_with_main_op")
def build_graph_helper():
g = ops.Graph()
with g.as_default():
x = variables.VariableV1(5, name="x")
y = variables.VariableV1(11, name="y")
z = x + y
foo_sig_def = signature_def_utils.build_signature_def({
"foo_input": utils.build_tensor_info(x)
}, {"foo_output": utils.build_tensor_info(z)})
bar_sig_def = signature_def_utils.build_signature_def({
"bar_x": utils.build_tensor_info(x),
"bar_y": utils.build_tensor_info(y)
}, {"bar_z": utils.build_tensor_info(z)})
return g, {"foo": foo_sig_def, "bar": bar_sig_def}, y
@parameterized.parameters((saved_model_builder.SavedModelBuilder,),
(saved_model_builder._SavedModelBuilder,))
class SavedModelLoaderTest(test.TestCase, parameterized.TestCase):
def export_simple_graph(self, builder_cls):
g, sig_def_map, _ = build_graph_helper()
with session.Session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
builder = builder_cls(SIMPLE_ADD_SAVED_MODEL)
builder.add_meta_graph_and_variables(sess, ["foo_graph"], sig_def_map)
builder.save()
def export_graph_with_main_op(self, builder_cls):
g, sig_def_map, y = build_graph_helper()
with session.Session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
assign_op = control_flow_ops.group(state_ops.assign(y, 7))
builder = builder_cls(SAVED_MODEL_WITH_MAIN_OP)
if builder_cls == saved_model_builder._SavedModelBuilder:
builder.add_meta_graph_and_variables(
sess, ["foo_graph"], sig_def_map, init_op=assign_op)
else:
builder.add_meta_graph_and_variables(
sess, ["foo_graph"], sig_def_map, main_op=assign_op)
builder.save()
def tearDown(self):
super(SavedModelLoaderTest, self).tearDown()
shutil.rmtree(test.get_temp_dir(), ignore_errors=True)
def test_load_function(self, builder_cls):
# Force test to run in graph mode.
# The SaveModelLoader.load method is a v1-only API that requires a session
# to work.
with ops.Graph().as_default():
self.export_simple_graph(builder_cls)
loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo_graph"])
self.assertEqual(5, sess.run(_tensor_name("x")))
self.assertEqual(11, sess.run(_tensor_name("y")))
self.export_graph_with_main_op(builder_cls)
loader2 = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
with self.session(graph=ops.Graph()) as sess:
loader2.load(sess, ["foo_graph"])
self.assertEqual(5, sess.run(_tensor_name("x")))
self.assertEqual(7, sess.run(_tensor_name("y")))
def test_load_graph(self, builder_cls):
self.export_simple_graph(builder_cls)
loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
graph = ops.Graph()
loader.load_graph(graph, ["foo_graph"])
x = graph.get_tensor_by_name(_tensor_name("x"))
y = graph.get_tensor_by_name(_tensor_name("y"))
with self.assertRaises(KeyError):
graph.get_tensor_by_name(_tensor_name("z"))
with graph.as_default(), self.session():
# Check that x and y are not initialized
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(x)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(y)
def test_load_with_import_scope(self, builder_cls):
# Force test to run in graph mode.
# The SaveModelLoader.restore_variables ahd SaveModelLoader.run_init_ops
# methods are v1-only APIs that require a session to work.
with ops.Graph().as_default():
self.export_graph_with_main_op(builder_cls)
loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
with self.session(graph=ops.Graph()) as sess:
saver, _ = loader.load_graph(
sess.graph, ["foo_graph"], import_scope="baz")
# The default saver should not work when the import scope is set.
with self.assertRaises(errors.NotFoundError):
loader.restore_variables(sess, tf_saver.Saver())
loader.restore_variables(sess, saver)
if builder_cls == saved_model_builder._SavedModelBuilder:
with self.assertRaises(errors.NotFoundError):
loader.run_init_ops(sess, ["foo_graph"])
loader.run_init_ops(sess, ["foo_graph"], import_scope="baz")
else:
loader.run_init_ops(sess, ["foo_graph"])
self.assertEqual(5, sess.run(_tensor_name("baz/x")))
self.assertEqual(7, sess.run(_tensor_name("baz/y")))
# Test combined load function.
loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo_graph"], import_scope="baa")
self.assertEqual(5, sess.run(_tensor_name("baa/x")))
self.assertEqual(7, sess.run(_tensor_name("baa/y")))
def test_restore_variables(self, builder_cls):
# Force test to run in graph mode.
# The SaveModelLoader.restore_variables method is a v1-only API requiring a
# session to work.
with ops.Graph().as_default():
self.export_graph_with_main_op(builder_cls)
loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
with self.session() as sess:
x = variables.VariableV1(0, name="x")
y = variables.VariableV1(0, name="y")
z = x * y
self.evaluate(variables.global_variables_initializer())
# There are variables to restore, so a saver must be created.
with self.assertRaises(ValueError):
loader.restore_variables(sess, None)
loader.restore_variables(sess, tf_saver.Saver())
self.assertEqual(55, self.evaluate(z))
def test_run_init_op(self, builder_cls):
# Force test to run in graph mode.
# The SaveModelLoader.restore_variables ahd SaveModelLoader.run_init_ops
# methods are v1-only APIs that require a session to work.
with ops.Graph().as_default():
self.export_graph_with_main_op(builder_cls)
loader = loader_impl.SavedModelLoader(SAVED_MODEL_WITH_MAIN_OP)
graph = ops.Graph()
saver, _ = loader.load_graph(graph, ["foo_graph"])
with self.session(graph=graph) as sess:
loader.restore_variables(sess, saver)
self.assertEqual(5, sess.run(_tensor_name("x")))
self.assertEqual(11, sess.run(_tensor_name("y")))
loader.run_init_ops(sess, ["foo_graph"])
self.assertEqual(5, sess.run(_tensor_name("x")))
self.assertEqual(7, sess.run(_tensor_name("y")))
def test_parse_saved_model(self, builder_cls):
self.export_simple_graph(builder_cls)
loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
meta_graph = loader.get_meta_graph_def_from_tags(["foo_graph"])
self.assertIsNotNone(meta_graph)
self.assertIn("foo", meta_graph.signature_def)
self.assertIn("bar", meta_graph.signature_def)
def test_load_invalid_meta_graph(self, builder_cls):
self.export_simple_graph(builder_cls)
loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
with self.assertRaises(RuntimeError):
loader.get_meta_graph_def_from_tags([])
with self.assertRaises(RuntimeError):
loader.get_meta_graph_def_from_tags([""])
with self.assertRaises(RuntimeError):
loader.get_meta_graph_def_from_tags(["not_a_graph"])
def test_load_saved_model_with_no_variables(self, builder_cls):
"""Test that SavedModel runs saver when there appear to be no variables.
When no variables are detected, this may mean that the variables were saved
to different collections, or the collections weren't saved to the
SavedModel. If the SavedModel MetaGraphDef contains a saver, it should still
run in either of these cases.
Args:
builder_cls: SavedModelBuilder or _SavedModelBuilder class
"""
# Force test to run in graph mode.
# The SaveModelBuilder.add_meta_graph_and_variables and SaveModelLoader.load
# methods are v1-only APIs that require a session to work.
with ops.Graph().as_default():
path = _get_export_dir("no_variable_saved_model")
with session.Session(graph=ops.Graph()) as sess:
x = variables.VariableV1(
5, name="x", collections=["not_global_variable"])
y = variables.VariableV1(
11, name="y", collections=["not_global_variable"])
self.assertFalse(variables._all_saveable_objects())
z = x + y
self.evaluate(variables.variables_initializer([x, y]))
foo_sig_def = signature_def_utils.build_signature_def(
{"foo_input": utils.build_tensor_info(x)},
{"foo_output": utils.build_tensor_info(z)})
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess, ["foo_graph"], {"foo": foo_sig_def},
saver=tf_saver.Saver([x, y]))
builder.save()
loader = loader_impl.SavedModelLoader(path)
with self.session(graph=ops.Graph()) as sess:
saver, _ = loader.load_graph(sess.graph, ["foo_graph"])
self.assertFalse(variables._all_saveable_objects())
self.assertIsNotNone(saver)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo_graph"])
self.assertEqual(5, sess.run(_tensor_name("x")))
self.assertEqual(11, sess.run(_tensor_name("y")))
def test_load_saved_model_graph_with_return_elements(self, builder_cls):
"""Ensure that the correct elements are returned."""
self.export_simple_graph(builder_cls)
loader = loader_impl.SavedModelLoader(SIMPLE_ADD_SAVED_MODEL)
graph = ops.Graph()
_, ret = loader.load_graph(graph, ["foo_graph"],
return_elements=["y:0", "x:0"])
self.assertEqual(graph.get_tensor_by_name("y:0"), ret[0])
self.assertEqual(graph.get_tensor_by_name("x:0"), ret[1])
with self.assertRaisesRegex(ValueError, "not found in graph"):
loader.load_graph(graph, ["foo_graph"], return_elements=["z:0"])
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import re
from mic import msger
from mic.utils import misc, fs_related, runner
from mic.utils.errors import CreatorError, MountError
from mic.utils.partitionedfs import PartitionedMount
from mic.imager.livecd import LiveCDImageCreator
class LiveUSBImageCreator(LiveCDImageCreator):
def __init__(self, *args):
LiveCDImageCreator.__init__(self, *args)
self._dep_checks.extend(["kpartx", "parted"])
# remove dependency of genisoimage in parent class
if "genisoimage" in self._dep_checks:
self._dep_checks.remove("genisoimage")
def _create_usbimg(self, isodir):
overlaysizemb = 64 #default
#skipcompress = self.skip_compression?
fstype = "vfat"
homesizemb=0
swapsizemb=0
homefile="home.img"
plussize=128
kernelargs=None
if fstype == 'vfat':
if overlaysizemb > 2047:
raise CreatorError("Can't have an overlay of 2048MB or "
"greater on VFAT")
if homesizemb > 2047:
raise CreatorError("Can't have an home overlay of 2048MB or "
"greater on VFAT")
if swapsizemb > 2047:
raise CreatorError("Can't have an swap overlay of 2048MB or "
"greater on VFAT")
livesize = misc.get_file_size(isodir + "/LiveOS")
usbimgsize = (overlaysizemb + \
homesizemb + \
swapsizemb + \
livesize + \
plussize) * 1024L * 1024L
disk = fs_related.SparseLoopbackDisk("%s/%s.usbimg" \
% (self._outdir, self.name),
usbimgsize)
usbmnt = self._mkdtemp("usb-mnt")
usbloop = PartitionedMount(usbmnt)
usbloop.add_disk('/dev/sdb', disk)
usbloop.add_partition(usbimgsize/1024/1024,
"/dev/sdb",
"/",
fstype,
boot=True)
usbloop.mount()
try:
fs_related.makedirs(usbmnt + "/LiveOS")
if os.path.exists(isodir + "/LiveOS/squashfs.img"):
shutil.copyfile(isodir + "/LiveOS/squashfs.img",
usbmnt + "/LiveOS/squashfs.img")
else:
fs_related.mksquashfs(os.path.dirname(self._image),
usbmnt + "/LiveOS/squashfs.img")
if os.path.exists(isodir + "/LiveOS/osmin.img"):
shutil.copyfile(isodir + "/LiveOS/osmin.img",
usbmnt + "/LiveOS/osmin.img")
if fstype == "vfat" or fstype == "msdos":
uuid = usbloop.partitions[0]['mount'].uuid
label = usbloop.partitions[0]['mount'].fslabel
usblabel = "UUID=%s-%s" % (uuid[0:4], uuid[4:8])
overlaysuffix = "-%s-%s-%s" % (label, uuid[0:4], uuid[4:8])
else:
diskmount = usbloop.partitions[0]['mount']
usblabel = "UUID=%s" % diskmount.uuid
overlaysuffix = "-%s-%s" % (diskmount.fslabel, diskmount.uuid)
args = ['cp', "-Rf", isodir + "/isolinux", usbmnt + "/syslinux"]
rc = runner.show(args)
if rc:
raise CreatorError("Can't copy isolinux directory %s" \
% (isodir + "/isolinux/*"))
if os.path.isfile("/usr/share/syslinux/isolinux.bin"):
syslinux_path = "/usr/share/syslinux"
elif os.path.isfile("/usr/lib/syslinux/isolinux.bin"):
syslinux_path = "/usr/lib/syslinux"
else:
raise CreatorError("syslinux not installed : "
"cannot find syslinux installation path")
for f in ("isolinux.bin", "vesamenu.c32"):
path = os.path.join(syslinux_path, f)
if os.path.isfile(path):
args = ['cp', path, usbmnt + "/syslinux/"]
rc = runner.show(args)
if rc:
raise CreatorError("Can't copy syslinux file " + path)
else:
raise CreatorError("syslinux not installed: "
"syslinux file %s not found" % path)
fd = open(isodir + "/isolinux/isolinux.cfg", "r")
text = fd.read()
fd.close()
pattern = re.compile('CDLABEL=[^ ]*')
text = pattern.sub(usblabel, text)
pattern = re.compile('rootfstype=[^ ]*')
text = pattern.sub("rootfstype=" + fstype, text)
if kernelargs:
text = text.replace("rd.live.image", "rd.live.image " + kernelargs)
if overlaysizemb > 0:
msger.info("Initializing persistent overlay file")
overfile = "overlay" + overlaysuffix
if fstype == "vfat":
args = ['dd',
"if=/dev/zero",
"of=" + usbmnt + "/LiveOS/" + overfile,
"count=%d" % overlaysizemb,
"bs=1M"]
else:
args = ['dd',
"if=/dev/null",
"of=" + usbmnt + "/LiveOS/" + overfile,
"count=1",
"bs=1M",
"seek=%d" % overlaysizemb]
rc = runner.show(args)
if rc:
raise CreatorError("Can't create overlay file")
text = text.replace("rd.live.image", "rd.live.image rd.live.overlay=" + usblabel)
text = text.replace(" ro ", " rw ")
if swapsizemb > 0:
msger.info("Initializing swap file")
swapfile = usbmnt + "/LiveOS/" + "swap.img"
args = ['dd',
"if=/dev/zero",
"of=" + swapfile,
"count=%d" % swapsizemb,
"bs=1M"]
rc = runner.show(args)
if rc:
raise CreatorError("Can't create swap file")
args = ["mkswap", "-f", swapfile]
rc = runner.show(args)
if rc:
raise CreatorError("Can't mkswap on swap file")
if homesizemb > 0:
msger.info("Initializing persistent /home")
homefile = usbmnt + "/LiveOS/" + homefile
if fstype == "vfat":
args = ['dd',
"if=/dev/zero",
"of=" + homefile,
"count=%d" % homesizemb,
"bs=1M"]
else:
args = ['dd',
"if=/dev/null",
"of=" + homefile,
"count=1",
"bs=1M",
"seek=%d" % homesizemb]
rc = runner.show(args)
if rc:
raise CreatorError("Can't create home file")
mkfscmd = fs_related.find_binary_path("/sbin/mkfs." + fstype)
if fstype == "ext2" or fstype == "ext3":
args = [mkfscmd, "-F", "-j", homefile]
else:
args = [mkfscmd, homefile]
rc = runner.show(args)
if rc:
raise CreatorError("Can't mke2fs home file")
if fstype == "ext2" or fstype == "ext3":
tune2fs = fs_related.find_binary_path("tune2fs")
args = [tune2fs,
"-c0",
"-i0",
"-ouser_xattr,acl",
homefile]
rc = runner.show(args)
if rc:
raise CreatorError("Can't tune2fs home file")
if fstype == "vfat" or fstype == "msdos":
syslinuxcmd = fs_related.find_binary_path("syslinux")
syslinuxcfg = usbmnt + "/syslinux/syslinux.cfg"
args = [syslinuxcmd,
"-d",
"syslinux",
usbloop.partitions[0]["device"]]
elif fstype == "ext2" or fstype == "ext3":
extlinuxcmd = fs_related.find_binary_path("extlinux")
syslinuxcfg = usbmnt + "/syslinux/extlinux.conf"
args = [extlinuxcmd,
"-i",
usbmnt + "/syslinux"]
else:
raise CreatorError("Invalid file system type: %s" % (fstype))
os.unlink(usbmnt + "/syslinux/isolinux.cfg")
fd = open(syslinuxcfg, "w")
fd.write(text)
fd.close()
rc = runner.show(args)
if rc:
raise CreatorError("Can't install boot loader.")
finally:
usbloop.unmount()
usbloop.cleanup()
# Need to do this after image is unmounted and device mapper is closed
msger.info("set MBR")
mbrfile = "/usr/lib/syslinux/mbr.bin"
if not os.path.exists(mbrfile):
mbrfile = "/usr/share/syslinux/mbr.bin"
if not os.path.exists(mbrfile):
raise CreatorError("mbr.bin file didn't exist.")
mbrsize = os.path.getsize(mbrfile)
outimg = "%s/%s.usbimg" % (self._outdir, self.name)
args = ['dd',
"if=" + mbrfile,
"of=" + outimg,
"seek=0",
"conv=notrunc",
"bs=1",
"count=%d" % (mbrsize)]
rc = runner.show(args)
if rc:
raise CreatorError("Can't set MBR.")
def _stage_final_image(self):
try:
isodir = self._get_isodir()
fs_related.makedirs(isodir + "/LiveOS")
minimal_size = self._resparse()
if not self.skip_minimize:
fs_related.create_image_minimizer(isodir + "/LiveOS/osmin.img",
self._image,
minimal_size)
if self.skip_compression:
shutil.move(self._image,
isodir + "/LiveOS/ext3fs.img")
else:
fs_related.makedirs(os.path.join(
os.path.dirname(self._image),
"LiveOS"))
shutil.move(self._image,
os.path.join(os.path.dirname(self._image),
"LiveOS", "ext3fs.img"))
fs_related.mksquashfs(os.path.dirname(self._image),
isodir + "/LiveOS/squashfs.img")
self._create_usbimg(isodir)
if self.pack_to:
usbimg = os.path.join(self._outdir, self.name + ".usbimg")
packimg = os.path.join(self._outdir, self.pack_to)
misc.packing(packimg, usbimg)
os.unlink(usbimg)
finally:
shutil.rmtree(isodir, ignore_errors = True)
self._set_isodir(None)
| |
import argparse
import time
from tornado import ioloop, httpclient
from elasticsearch import Elasticsearch
from datetime import datetime, timedelta
import subprocess
import logging
import json
import copy
import uuid
p_uid = uuid.uuid4().hex
num_reqs = 0 # global variable to keep track of how many requests has been processed
logger = logging.getLogger("logger")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'[%(asctime)s] [%(process)s] [%(levelname)s] [%(funcName)s - %(lineno)d] %(message)s')
handler.setFormatter(formatter)
logger.propagate = False
logger.addHandler(handler)
es = Elasticsearch(['localhost:9200'])
def handle(r):
global num_reqs
try:
assert r.code == 200
num_reqs -= 1
if num_reqs == 0:
logger.info("Stopping tornado I/O loop")
ioloop.IOLoop.instance().stop()
except AssertionError:
num_reqs == 0
ioloop.IOLoop.instance().stop()
logger.error(
"Bad response, aborting: {} - {} ({})".format(r.code, r.error, r.request_time))
def run_process(lang):
if lang == 'node':
cmd = ['node', '../../nodejs/express/app.js']
elif lang == 'python':
cmd = ['gunicorn', '-w', '4', '-b', '0.0.0.0:5000', 'app:app']
p = subprocess.Popen(cmd)
return p
def send_batch(nreqs, port=None):
global num_reqs
global p_uid
mix_lang = port is None
http_client = httpclient.AsyncHTTPClient(max_clients=4)
for _ in range(nreqs):
num_reqs += 1
if mix_lang:
num_reqs += 1
url_python = "http://localhost:5000/foo"
url_node = "http://localhost:8081/bar"
for url in [url_node, url_python]:
http_client.fetch(url, handle, method='GET',
connect_timeout=90, request_timeout=120)
else:
endpoint = "foo" if num_reqs % 2 == 0 else "bar"
url = "http://localhost:" + port + "/" + endpoint + '?' + p_uid
http_client.fetch(url, handle, method='GET',
connect_timeout=90, request_timeout=120)
logger.info("Starting tornado I/O loop")
ioloop.IOLoop.instance().start()
class Worker:
def __init__(self, lang):
self.l = lang
def __enter__(self):
self.p = run_process(self.l)
time.sleep(3)
logger.info("Process ready {}".format(self.p.pid))
def __exit__(self, *args):
logger.info("Flushing queues")
time.sleep(2)
self.p.terminate()
time.sleep(10)
logger.info("Process terminated")
def load_test(lang, nreqs):
with Worker(lang):
send_batch(nreqs, str(PORTS[lang]))
def load_test_mixed_lang(nreqs):
with Worker('python'):
with Worker('node'):
send_batch(nreqs)
PORTS = {'node': 8081, 'python': 5000}
EXPECTATIONS = {
'node': {
'url_search': '?',
'agent_name': 'nodejs',
'framework': 'express',
'lang_key': 'runtime'
},
'python': {
'url_search': '',
'agent_name': 'elasticapm-python',
'framework': 'flask',
'lang_key': 'language'
}
}
def check_counts(index_name, size, it):
count = size * it
es.indices.refresh(index_name)
err = "queried for {}, expected {}, got {}"
for doc_type in ['transaction', 'trace']:
rs = es.count(index=index_name,
body=es_query("processor.event", doc_type))
assert rs['count'] == count, err.format(doc_type, count, rs)
for trace_name in ['app.foo', 'app.bar']:
rs = es.count(index=index_name,
body=es_query("trace.name", trace_name))
assert rs['count'] == count / 2, err.format(trace_name, count / 2, rs)
for transaction_name in ['GET /foo', 'GET /bar']:
rs = es.count(index=index_name, body=es_query(
"transaction.name.keyword", transaction_name))
assert rs['count'] == count / 2, \
err.format(transaction_name, count / 2, rs)
def check_contents(lang, it):
def anomaly(x): return x > 100000 or x < 1 # 100000 = 0.1 sec
transactions_query = es_query("processor.event", "transaction")
transaction_dict = {}
for hit in lookup(es.search(index, body=transactions_query), 'hits', 'hits'):
transaction = lookup(hit, '_source', 'transaction')
duration = lookup(transaction, 'duration', 'us')
transaction_dict[transaction['id']] = (transaction['name'], duration)
assert not anomaly(duration), duration
timestamp = datetime.strptime(lookup(hit, '_source', '@timestamp'),
'%Y-%m-%dT%H:%M:%S.%fZ')
assert datetime.utcnow() - timedelta(minutes=it) < timestamp < datetime.utcnow(), \
"{} is too far of {} ".format(timestamp, datetime.utcnow())
assert transaction['result'] == '200', transaction['result']
assert transaction['type'] == 'request'
context = lookup(hit, '_source', 'context')
actual_search = context['request']['url']['search']
assert actual_search == EXPECTATIONS[lang]['url_search'] + p_uid, \
"{} not in context {}".format(p_uid, context)
assert context['request']['method'] == "GET", context['request']['method']
assert context['request']['url']['pathname'] in ("/foo", "/bar"), \
context['request']['url']['pathname']
assert context['request']['url']['hostname'] == 'localhost'
if lang == 'node':
assert context['response']['status_code'] == 200, context['response']['status_code']
assert context['user'] == {}, context
assert context['custom'] == {}, context
actual_lang = lookup(
context, 'app', EXPECTATIONS[lang]['lang_key'], 'name')
assert actual_lang == lang, context
assert lookup(context, 'app', 'name') == 'test-app', context
actual_agent = lookup(context, 'app', 'agent', 'name')
assert actual_agent == EXPECTATIONS[lang]['agent_name'], context
actual_framework = lookup(context, 'app', 'framework', 'name')
assert actual_framework == EXPECTATIONS[lang]['framework'], context
assert context['tags'] == {}, context
assert hit['_source']['processor'] == {'name': 'transaction',
'event': 'transaction'}
traces_query = es_query("processor.event", "trace")
for hit in lookup(es.search(index, body=traces_query), 'hits', 'hits'):
context = lookup(hit, '_source', 'context')
assert lookup(context, 'app', 'name') == 'test-app', context
trace = lookup(hit, '_source', 'trace')
start = lookup(trace, 'start', 'us')
assert not anomaly(start), start
duration = lookup(trace, 'duration', 'us')
assert not anomaly(duration), duration
transaction_name, transaction_duration = transaction_dict[trace['transaction_id']]
assert duration < transaction_duration * 10, \
"trace duration {} is more than 10X bigger than transaction duration{}".format(
duration, transaction_duration)
stacktrace = trace['stacktrace']
assert 15 < len(stacktrace) < 30, \
"number of frames not expected, got {}, but this assertion might be too strict".format(
len(stacktrace))
fns = [frame['function'] for frame in stacktrace]
assert all(fns), fns
for attr in ['abs_path', 'line', 'filename']:
assert all(
frame.get(attr) for frame in stacktrace), stacktrace[0].keys()
if trace['name'] == 'app.bar':
assert transaction_name == 'GET /bar', transaction_name
if lang == 'python':
assert trace['id'] == 0, trace['id']
assert 'bar_route' in fns
elif trace['name'] == 'app.foo':
assert transaction_name == 'GET /foo', transaction_name
if lang == 'python':
assert trace['id'] == 0, trace['id']
assert 'foo_route' in fns
else:
assert False, "trace name not expected {}".format(trace['name'])
def check_contents_not_mixed():
transactions_query = es_query("processor.event", "transaction")
transaction_dict = {}
for hit in lookup(es.search(index, body=transactions_query), 'hits', 'hits'):
transaction = lookup(hit, '_source', 'transaction')
runtime = lookup(hit, '_source', 'context', 'app', 'runtime', 'name')
if transaction['name'] == 'GET /foo':
assert runtime == 'CPython', runtime
elif transaction['name'] == 'GET /bar':
assert runtime == 'node', runtime
else:
assert False, transaction['name']
transaction_dict[transaction['id']] = runtime
traces_query = es_query("processor.event", "trace")
for hit in lookup(es.search(index, body=traces_query), 'hits', 'hits'):
agent = lookup(hit, '_source', 'context', 'app', 'agent', 'name')
trace = lookup(hit, '_source', 'trace')
runtime = transaction_dict[trace['transaction_id']]
if trace['name'] == 'app.bar':
assert runtime == 'node', runtime
assert agent == EXPECTATIONS['node']['agent_name']
elif trace['name'] == 'app.foo':
assert runtime == 'CPython', runtime
assert agent == EXPECTATIONS['python']['agent_name']
else:
assert False, "trace name not expected {}".format(trace['name'])
def lookup(d, *keys):
d1 = copy.deepcopy(d)
for k in keys:
d1 = d1[k]
return d1
def es_query(field, val):
return {"query": {"term": {field: val}}}
def reset():
f = '../../../../apm-server.template-es.json'
with open(f) as meta:
d = json.load(meta)
ver = d['mappings']['doc']['_meta']['version']
index_name = "apm-{}-{}".format(ver, time.strftime('%Y.%m.%d'))
logger.info("Deleting index of the day {}".format(index_name))
es.indices.delete(index=index_name, ignore=[400, 404])
return index_name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tests!')
parser.add_argument(
'-l', '--language', help='Either "node" or "python", defaults to both', default=None)
parser.add_argument(
'-s', '--size', help='Number of events to send on each iteration', default=1000)
parser.add_argument(
'-i', '--iterations', help='Number of iterations to do each test', default=1)
args = parser.parse_args()
langs = [args.language] if args.language else ['python', 'node']
iters = int(args.iterations)
size = int(args.size)
for lang in langs:
logger.info("Testing {} agent".format(lang))
index = reset()
for it in range(1, iters + 1):
logger.info("Sending batch {} / {}".format(it, iters))
load_test(lang, size)
es.indices.refresh(index)
check_counts(index, size, it)
check_contents(lang, it)
logger.info("So far so good...")
if len(langs) > 1:
logger.info("Testing all agents together")
index = reset()
for it in range(1, iters + 1):
logger.info("Sending batch {} / {}".format(it, iters))
load_test_mixed_lang(size)
es.indices.refresh(index)
check_counts(index, size * 2, it)
check_contents_not_mixed()
logger.info("So far so good...")
logger.info("ALL DONE")
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
"global_gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False,
increment_global_step=True):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers include:
- by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- by function taking learning rate `Tensor` as argument and returning an
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- by a subclass of `Optimizer` having a single-argument constructor
(the argument is the learning rate), such as AdamOptimizer or
AdagradOptimizer. E.g. `optimize_loss(...,
optimizer=tf.train.AdagradOptimizer)`.
- by an instance of a subclass of `Optimizer`.
E.g., `optimize_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter to update on each step
unless `increment_global_step` is `False`. If not supplied,
it will be fetched from the default graph (see
`tf.train.get_global_step` for details). If it has
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set, the loss, the learning rate, and the global norm of the
gradients will be reported. The complete list of possible values
is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
increment_global_step: Whether to increment `global_step`. If your model
calls `optimize_loss` multiple times per training step (e.g. to optimize
different parts of the model), use this arg to avoid incrementing
`global_step` more times than necessary.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` has the wrong type.
* `clip_gradients` is neither float nor callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
* `gradients` is empty.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = train.get_global_step()
else:
train.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate", "global_gradient_norm"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if not gradients:
raise ValueError(
"Empty list of (gradient, var) pairs encountered. This is most "
"likely to be caused by an improper value of gradient_multipliers.")
if "global_gradient_norm" in summaries or "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError(
"Unknown type %s for clip_gradients" % type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and ("global_gradient_norm" in summaries or
"gradient_norm" in summaries):
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients,
global_step=global_step if increment_global_step else None,
name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be
rescaled such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
| |
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Module for some slight database-independence for simple databases.
"""
import csv
import math
import supybot.cdb as cdb
import supybot.utils as utils
from supybot.utils.iter import ilen
class Error(Exception):
"""General error for this module."""
class NoRecordError(KeyError):
pass
class InvalidDBError(Exception):
pass
class MappingInterface(object):
"""This is a class to represent the underlying representation of a map
from integer keys to strings."""
def __init__(self, filename, **kwargs):
"""Feel free to ignore the filename."""
raise NotImplementedError
def get(id):
"""Gets the record matching id. Raises NoRecordError otherwise."""
raise NotImplementedError
def set(id, s):
"""Sets the record matching id to s."""
raise NotImplementedError
def add(self, s):
"""Adds a new record, returning a new id for it."""
raise NotImplementedError
def remove(self, id):
"Returns and removes the record with the given id from the database."
raise NotImplementedError
def __iter__(self):
"Return an iterator over (id, s) pairs. Not required to be ordered."
raise NotImplementedError
def flush(self):
"""Flushes current state to disk."""
raise NotImplementedError
def close(self):
"""Flushes current state to disk and invalidates the Mapping."""
raise NotImplementedError
def vacuum(self):
"Cleans up in the database, if possible. Not required to do anything."
pass
class DirMapping(MappingInterface):
def __init__(self, filename, **kwargs):
self.dirname = filename
if not os.path.exists(self.dirname):
os.mkdir(self.dirname)
if not os.path.exists(os.path.join(self.dirname, 'max')):
self._setMax(1)
def _setMax(self, id):
fd = file(os.path.join(self.dirname, 'max'), 'w')
try:
fd.write(str(id))
finally:
fd.close()
def _getMax(self):
fd = file(os.path.join(self.dirname, 'max'))
try:
i = int(fd.read())
return i
finally:
fd.close()
def _makeFilename(self, id):
return os.path.join(self.dirname, str(id))
def get(id):
try:
fd = file(self._makeFilename(id))
return fd.read()
except EnvironmentError, e:
exn = NoRecordError(id)
exn.realException = e
raise exn
def set(id, s):
fd = file(self._makeFilename(id), 'w')
fd.write(s)
fd.close()
def add(self, s):
id = self._getMax()
fd = file(self._makeFilename(id), 'w')
try:
fd.write(s)
return id
finally:
fd.close()
def remove(self, id):
try:
os.remove(self._makeFilename(id))
except EnvironmentError, e:
raise NoRecordError, id
class FlatfileMapping(MappingInterface):
def __init__(self, filename, maxSize=10**6):
self.filename = filename
try:
fd = file(self.filename)
strId = fd.readline().rstrip()
self.maxSize = len(strId)
try:
self.currentId = int(strId)
except ValueError:
raise Error, 'Invalid file for FlatfileMapping: %s' % filename
except EnvironmentError, e:
# File couldn't be opened.
self.maxSize = int(math.log10(maxSize))
self.currentId = 0
self._incrementCurrentId()
def _canonicalId(self, id):
if id is not None:
return str(id).zfill(self.maxSize)
else:
return '-'*self.maxSize
def _incrementCurrentId(self, fd=None):
fdWasNone = fd is None
if fdWasNone:
fd = file(self.filename, 'a')
fd.seek(0)
self.currentId += 1
fd.write(self._canonicalId(self.currentId))
fd.write('\n')
if fdWasNone:
fd.close()
def _splitLine(self, line):
line = line.rstrip('\r\n')
(id, s) = line.split(':', 1)
return (id, s)
def _joinLine(self, id, s):
return '%s:%s\n' % (self._canonicalId(id), s)
def add(self, s):
line = self._joinLine(self.currentId, s)
fd = file(self.filename, 'r+')
try:
fd.seek(0, 2) # End.
fd.write(line)
return self.currentId
finally:
self._incrementCurrentId(fd)
fd.close()
def get(self, id):
strId = self._canonicalId(id)
try:
fd = file(self.filename)
fd.readline() # First line, nextId.
for line in fd:
(lineId, s) = self._splitLine(line)
if lineId == strId:
return s
raise NoRecordError, id
finally:
fd.close()
# XXX This assumes it's not been given out. We should make sure that our
# maximum id remains accurate if this is some value we've never given
# out -- i.e., self.maxid = max(self.maxid, id) or something.
def set(self, id, s):
strLine = self._joinLine(id, s)
try:
fd = file(self.filename, 'r+')
self.remove(id, fd)
fd.seek(0, 2) # End.
fd.write(strLine)
finally:
fd.close()
def remove(self, id, fd=None):
fdWasNone = fd is None
strId = self._canonicalId(id)
try:
if fdWasNone:
fd = file(self.filename, 'r+')
fd.seek(0)
fd.readline() # First line, nextId
pos = fd.tell()
line = fd.readline()
while line:
(lineId, _) = self._splitLine(line)
if lineId == strId:
fd.seek(pos)
fd.write(self._canonicalId(None))
fd.seek(pos)
fd.readline() # Same line we just rewrote the id for.
pos = fd.tell()
line = fd.readline()
# We should be at the end.
finally:
if fdWasNone:
fd.close()
def __iter__(self):
fd = file(self.filename)
fd.readline() # First line, nextId.
for line in fd:
(id, s) = self._splitLine(line)
if not id.startswith('-'):
yield (int(id), s)
fd.close()
def vacuum(self):
infd = file(self.filename)
outfd = utils.file.AtomicFile(self.filename,makeBackupIfSmaller=False)
outfd.write(infd.readline()) # First line, nextId.
for line in infd:
if not line.startswith('-'):
outfd.write(line)
infd.close()
outfd.close()
def flush(self):
pass # No-op, we maintain no open files.
def close(self):
self.vacuum() # Should we do this? It should be fine.
class CdbMapping(MappingInterface):
def __init__(self, filename, **kwargs):
self.filename = filename
self._openCdb() # So it can be overridden later.
if 'nextId' not in self.db:
self.db['nextId'] = '1'
def _openCdb(self, *args, **kwargs):
self.db = cdb.open(self.filename, 'c', **kwargs)
def _getNextId(self):
i = int(self.db['nextId'])
self.db['nextId'] = str(i+1)
return i
def get(self, id):
try:
return self.db[str(id)]
except KeyError:
raise NoRecordError, id
# XXX Same as above.
def set(self, id, s):
self.db[str(id)] = s
def add(self, s):
id = self._getNextId()
self.set(id, s)
return id
def remove(self, id):
del self.db[str(id)]
def __iter__(self):
for (id, s) in self.db.iteritems():
if id != 'nextId':
yield (int(id), s)
def flush(self):
self.db.flush()
def close(self):
self.db.close()
class DB(object):
Mapping = 'flat' # This is a good, sane default.
Record = None
def __init__(self, filename, Mapping=None, Record=None):
if Record is not None:
self.Record = Record
if Mapping is not None:
self.Mapping = Mapping
if isinstance(self.Mapping, basestring):
self.Mapping = Mappings[self.Mapping]
self.map = self.Mapping(filename)
def _newRecord(self, id, s):
record = self.Record(id=id)
record.deserialize(s)
return record
def get(self, id):
s = self.map.get(id)
return self._newRecord(id, s)
def set(self, id, record):
s = record.serialize()
self.map.set(id, s)
def add(self, record):
s = record.serialize()
id = self.map.add(s)
record.id = id
return id
def remove(self, id):
self.map.remove(id)
def __iter__(self):
for (id, s) in self.map:
# We don't need to yield the id because it's in the record.
yield self._newRecord(id, s)
def select(self, p):
for record in self:
if p(record):
yield record
def random(self):
try:
return self._newRecord(*utils.iter.choice(self.map))
except IndexError:
return None
def size(self):
return ilen(self.map)
def flush(self):
self.map.flush()
def vacuum(self):
self.map.vacuum()
def close(self):
self.map.close()
Mappings = {
'cdb': CdbMapping,
'flat': FlatfileMapping,
}
class Record(object):
def __init__(self, id=None, **kwargs):
if id is not None:
assert isinstance(id, int), 'id must be an integer.'
self.id = id
self.fields = []
self.defaults = {}
self.converters = {}
for name in self.__fields__:
if isinstance(name, tuple):
(name, spec) = name
else:
spec = utils.safeEval
assert name != 'id'
self.fields.append(name)
if isinstance(spec, tuple):
(converter, default) = spec
else:
converter = spec
default = None
self.defaults[name] = default
self.converters[name] = converter
seen = set()
for (name, value) in kwargs.iteritems():
assert name in self.fields, 'name must be a record value.'
seen.add(name)
setattr(self, name, value)
for name in self.fields:
if name not in seen:
default = self.defaults[name]
if callable(default):
default = default()
setattr(self, name, default)
def serialize(self):
return csv.join([repr(getattr(self, name)) for name in self.fields])
def deserialize(self, s):
unseenRecords = set(self.fields)
for (name, strValue) in zip(self.fields, csv.split(s)):
setattr(self, name, self.converters[name](strValue))
unseenRecords.remove(name)
for name in unseenRecords:
setattr(self, name, self.defaults[name])
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| |
from __future__ import absolute_import, print_function
import time, rfc822
from xml.etree.cElementTree import Element, SubElement, tostring
from xml.etree.ElementTree import _namespace_map
from pony.templating import Html, StrHtml
ns = 'http://www.w3.org/2005/Atom'
_namespace_map.setdefault(ns, 'atom')
# nsmap_keyargs = {}
# if getattr(etree, 'LXML_VERSION', (0, 0, 0, 0)) >= (1, 3, 7, 0):
# nsmap_keyargs = {'nsmap': {'atom':ns}}
atomtags_cache = {}
def atomtag(tag):
return nstags_cache.get(tag) or nstags_cache.setdefault(tag, '{%s}%s' % (ns, tag))
def utc(dt):
if getattr(dt, 'tzinfo', None) is None: return dt
return dt.replace(tzinfo=None) - dt.utcoffset()
def atom_date(date):
return utc(date).strftime('%Y-%m-%dT%H:%M:%SZ')
def rss2_date(date):
return rfc822.formatdate(time.mktime(utc(date).timetuple()))
def atom_author(author):
if isinstance(author, basestring): author = [ author, None, None ]
else:
try: iter(author)
except TypeError: raise TypeError('Inappropriate author value: %r' % author)
author = (list(author) + [ None, None ])[:3]
name, uri, email = author
result = Element(atomtag('author'))
if name: SubElement(result, atomtag('name')).text = name
if uri: SubElement(result, atomtag('uri')).text = uri
if email: SubElement(result, atomtag('email')).text = email
return result
def rss2_author(author):
if isinstance(author, basestring): return author
if len(author) >= 2: return '%s (%s)' % (author[1], author[0])
return author[0]
def set_atom_text(element, text):
if isinstance(text, (Html, StrHtml)):
element.set('type', 'html')
element.text = text[:]
elif isinstance(text, basestring):
element.set('type', 'text')
element.text = text
elif hasattr(text, 'makeelement') and getattr(text, 'tag') == '{http://www.w3.org/1999/xhtml}div':
element.set('type', 'xhtml')
element.append(text)
else: raise TypeError('Inappropriate text value: %r' % text)
def set_rss2_text(element, text):
if hasattr(text, 'makeelement'): element.text = tostring(text)
elif isinstance(text, basestring): element.text = text[:]
class Feed(object):
def __init__(feed, title, link, updated,
id=None, subtitle=None, feed_link=None, author=None, rights=None,
base=None, language=None, icon=None, logo=None):
feed.link = link
feed.title = title
feed.updated = updated
feed.id = id or feed.link
feed.subtitle = subtitle
feed.feed_link = feed_link
feed.author = author
feed.rights = rights
feed.base = base
feed.language = language
feed.icon = icon
feed.logo = logo
feed.entries = []
def add(feed, entry):
feed.entries.append(entry)
def __str__(feed):
return tostring(feed.atom())
def atom(feed, pretty_print=True):
indent = '\n ' if pretty_print else ''
xml = Element(atomtag('feed')) #, **nsmap_keyargs) # lxml
xml.text = indent
if feed.base: xml.set('{http://www.w3.org/XML/1998/namespace}base', feed.base)
if feed.language: xml.set('{http://www.w3.org/XML/1998/namespace}lang', feed.language)
title = SubElement(xml, atomtag('title'))
set_atom_text(title, feed.title)
if feed.subtitle:
subtitle = SubElement(xml, atomtag('summary'))
set_atom_text(subtitle, feed.subtitle)
link = SubElement(xml, atomtag('link'), href=feed.link)
if feed.feed_link:
feed_link = SubElement(xml, atomtag('link'), rel='self', href=feed.feed_link)
updated = SubElement(xml, atomtag('updated'))
updated.text = atom_date(feed.updated)
id = SubElement(xml, atomtag('id'))
id.text = feed.id
if feed.author:
author = atom_author(feed.author)
xml.append(author)
if feed.rights:
rights = SubElement(xml, atomtag('rights'))
set_atom_text(rights, feed.rights)
if feed.icon:
icon = SubElement(xml, atomtag('icon'))
icon.text = feed.icon
if feed.logo:
logo = SubElement(xml, atomtag('logo'))
logo.text = feed.logo
for entry in feed.entries:
entry_xml = entry.atom(pretty_print)
entry_xml[-1].tail = indent
xml.append(entry_xml)
for child in xml: child.tail = indent
xml[-1].tail = '\n' if pretty_print else ''
return xml
def rss2(feed, pretty_print=True):
indent = '\n ' if pretty_print else ''
indent2 = '\n ' if pretty_print else ''
rss = Element('rss', version='2.0')
rss.text = '\n ' if pretty_print else ''
channel = SubElement(rss, 'channel')
channel.text = indent
channel.tail = '\n' if pretty_print else ''
set_rss2_text(SubElement(channel, 'title'), feed.title)
set_rss2_text(SubElement(channel, 'description'), feed.subtitle or '')
SubElement(channel, 'link').text = feed.link
SubElement(channel, 'lastBuildDate').text = rss2_date(feed.updated)
if feed.language: SubElement(channel, 'language').text = feed.language
if feed.rights: SubElement(channel, 'copyright').text = feed.rights
if feed.logo:
image = SubElement(channel, 'image')
image.text = indent2
SubElement(image, 'url').text = feed.logo
SubElement(image, 'title').text = ''
SubElement(image, 'link').text = feed.link
for child in image: child.tail = indent2
image[-1].tail = '\n ' if pretty_print else ''
for entry in feed.entries:
item = entry.rss2(pretty_print)
item[-1].tail = indent
channel.append(item)
for child in channel: child.tail = indent
channel[-1].tail = '\n ' if pretty_print else ''
return rss
class Entry(object):
def __init__(entry, title, link, updated,
id=None, summary=None, content=None, published=None,
enclosure=None, author=None, rights=None, base=None, language=None):
entry.link = link
entry.title = title
entry.updated = updated
entry.id = id or entry.link
entry.summary = summary
entry.content = content
entry.published = published
entry.enclosure = enclosure
entry.author = author
entry.rights = rights
entry.base = base
entry.language = language
def __str__(entry):
return tostring(entry.atom())
def atom(entry, pretty_print=True):
indent = '\n ' if pretty_print else ''
xml = Element(atomtag('entry')) #, **nsmap_keyargs) # lxml
xml.text = indent
if entry.base: xml.set('{http://www.w3.org/XML/1998/namespace}base', entry.base)
if entry.language: xml.set('{http://www.w3.org/XML/1998/namespace}lang', entry.language)
link = SubElement(xml, atomtag('link'), href=entry.link)
title = SubElement(xml, atomtag('title'))
set_atom_text(title, entry.title)
updated = SubElement(xml, atomtag('updated'))
updated.text = atom_date(entry.updated)
id = SubElement(xml, atomtag('id'))
id.text = entry.id
if entry.summary:
summary = SubElement(xml, atomtag('summary'))
set_atom_text(summary, entry.summary)
if entry.content:
content = SubElement(xml, atomtag('content'))
set_atom_text(content, entry.content)
if entry.enclosure:
href, media_type, length = entry.enclosure
enclosure = SubElement(xml, atomtag('link'),
rel='enclosure', href=href, type=media_type, length=length)
if entry.author:
author = atom_author(entry.author)
xml.append(author)
if entry.rights:
rights = SubElement(xml, atomtag('rights'))
set_atom_text(rights, entry.rights)
if entry.published:
published = SubElement(xml, atomtag('published'))
published.text = atom_date(entry.published)
for child in xml: child.tail = indent
xml[-1].tail = '\n' if pretty_print else ''
return xml
def rss2(entry, pretty_print=True):
indent = '\n ' if pretty_print else ''
item = Element('item')
item.text = indent
set_rss2_text(SubElement(item, 'title'), entry.title)
set_rss2_text(SubElement(item, 'description'), entry.summary or entry.content)
SubElement(item, 'link').text = entry.link
SubElement(item, 'guid', isPermaLink=(entry.id == 'true' if entry.link else 'false')).text = entry.id
if entry.enclosure:
href, media_type, length = entry.enclosure
SubElement(item, 'enclosure', url=href, type=media_type, length=length)
if entry.author: SubElement(item, 'author').text = rss2_author(entry.author)
if entry.published: SubElement(item, 'pubDate').text = rss2_date(entry.published)
for child in item: child.tail = indent
item[-1].tail = '\n' if pretty_print else ''
return item
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def _delete_initial(
self, resource_group_name, network_security_group_name, security_rule_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_security_group_name, security_rule_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'}
def get(
self, resource_group_name, network_security_group_name, security_rule_name, custom_headers=None, raw=False, **operation_config):
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SecurityRule or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.SecurityRule or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'}
def _create_or_update_initial(
self, resource_group_name, network_security_group_name, security_rule_name, security_rule_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_security_group_name, security_rule_name, security_rule_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a security rule in the specified network security
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or
update network security rule operation.
:type security_rule_parameters:
~azure.mgmt.network.v2017_06_01.models.SecurityRule
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SecurityRule or
ClientRawResponse<SecurityRule> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.SecurityRule]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_06_01.models.SecurityRule]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SecurityRule', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'}
def list(
self, resource_group_name, network_security_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security
group.
:type network_security_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of SecurityRule
:rtype:
~azure.mgmt.network.v2017_06_01.models.SecurityRulePaged[~azure.mgmt.network.v2017_06_01.models.SecurityRule]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SecurityRulePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SecurityRulePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'}
| |
#!/usr/bin/env python
"""beanstalkc - A beanstalkd Client Library for Python"""
import logging
import socket
import sys
__license__ = '''
Copyright (C) 2008-2016 Andreas Bolka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.4.0'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
DEFAULT_PRIORITY = 2 ** 31
DEFAULT_TTR = 120
DEFAULT_TUBE_NAME = 'default'
class BeanstalkcException(Exception): pass
class UnexpectedResponse(BeanstalkcException): pass
class CommandFailed(BeanstalkcException): pass
class DeadlineSoon(BeanstalkcException): pass
class SocketError(BeanstalkcException):
@staticmethod
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err)
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self.host = host
self.port = port
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb')
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass
def reconnect(self):
"""Re-connect to server."""
self.close()
self.connect()
def _interact(self, command, expected_ok, expected_err=[]):
SocketError.wrap(self._socket.sendall, command)
status, results = self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results)
def _read_response(self):
line = SocketError.wrap(self._socket_file.readline)
if not line:
raise SocketError()
response = line.split()
return response[0], response[1:]
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
return body
def _interact_value(self, command, expected_ok, expected_err=[]):
return self._interact(command, expected_ok, expected_err)[0]
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved)
def _interact_yaml(self, command, expected_ok, expected_err=[]):
size, = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return self._parse_yaml(body)
def _interact_peek(self, command):
try:
return self._interact_job(command, ['FOUND'], ['NOT_FOUND'], False)
except CommandFailed:
return None
# -- public interface --
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid)
def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results)
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value('kick %d\r\n' % bound, ['KICKED']))
def kick_job(self, jid):
"""Kick a specific job into the ready queue."""
self._interact('kick-job %d\r\n' % jid, ['KICKED'], ['NOT_FOUND'])
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek('peek %d\r\n' % jid)
def peek_ready(self):
"""Peek at next ready job. Returns a Job, or None."""
return self._interact_peek('peek-ready\r\n')
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek('peek-delayed\r\n')
def peek_buried(self):
"""Peek at next buried job. Returns a Job, or None."""
return self._interact_peek('peek-buried\r\n')
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml('list-tubes\r\n', ['OK'])
def using(self):
"""Return the tube currently being used."""
return self._interact_value('list-tube-used\r\n', ['USING'])
def use(self, name):
"""Use a given tube."""
return self._interact_value('use %s\r\n' % name, ['USING'])
def watching(self):
"""Return a list of all tubes being watched."""
return self._interact_yaml('list-tubes-watched\r\n', ['OK'])
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value('watch %s\r\n' % name, ['WATCHING']))
def ignore(self, name):
"""Stop watching a given tube."""
try:
return int(self._interact_value('ignore %s\r\n' % name,
['WATCHING'],
['NOT_IGNORED']))
except CommandFailed:
# Tried to ignore the only tube in the watchlist, which failed.
return 0
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml('stats\r\n', ['OK'])
def stats_tube(self, name):
"""Return a dict of stats about a given tube."""
return self._interact_yaml('stats-tube %s\r\n' % name,
['OK'],
['NOT_FOUND'])
def pause_tube(self, name, delay):
"""Pause a tube for a given delay time, in seconds."""
self._interact('pause-tube %s %d\r\n' % (name, delay),
['PAUSED'],
['NOT_FOUND'])
# -- job interactors --
def delete(self, jid):
"""Delete a job, by job id."""
self._interact('delete %d\r\n' % jid, ['DELETED'], ['NOT_FOUND'])
def release(self, jid, priority=DEFAULT_PRIORITY, delay=0):
"""Release a reserved job back into the ready queue."""
self._interact('release %d %d %d\r\n' % (jid, priority, delay),
['RELEASED', 'BURIED'],
['NOT_FOUND'])
def bury(self, jid, priority=DEFAULT_PRIORITY):
"""Bury a job, by job id."""
self._interact('bury %d %d\r\n' % (jid, priority),
['BURIED'],
['NOT_FOUND'])
def touch(self, jid):
"""Touch a job, by job id, requesting more time to work on a reserved
job before it expires."""
self._interact('touch %d\r\n' % jid, ['TOUCHED'], ['NOT_FOUND'])
def stats_job(self, jid):
"""Return a dict of stats about a job, by job id."""
return self._interact_yaml('stats-job %d\r\n' % jid,
['OK'],
['NOT_FOUND'])
class Job(object):
def __init__(self, conn, jid, body, reserved=True):
self.conn = conn
self.jid = jid
self.body = body
self.reserved = reserved
def _priority(self):
stats = self.stats()
if isinstance(stats, dict):
return stats['pri']
return DEFAULT_PRIORITY
# -- public interface --
def delete(self):
"""Delete this job."""
self.conn.delete(self.jid)
self.reserved = False
def release(self, priority=None, delay=0):
"""Release this job back into the ready queue."""
if self.reserved:
self.conn.release(self.jid, priority or self._priority(), delay)
self.reserved = False
def bury(self, priority=None):
"""Bury this job."""
if self.reserved:
self.conn.bury(self.jid, priority or self._priority())
self.reserved = False
def kick(self):
"""Kick this job alive."""
self.conn.kick_job(self.jid)
def touch(self):
"""Touch this reserved job, requesting more time to work on it before
it expires."""
if self.reserved:
self.conn.touch(self.jid)
def stats(self):
"""Return a dict of stats about this job."""
return self.conn.stats_job(self.jid)
if __name__ == '__main__':
import nose
nose.main(argv=['nosetests', '-c', '.nose.cfg'])
| |
import json, pickle
from itertools import groupby
from fronter import *
class Survey(Tool):
class Reply(object):
Data = namedtuple('Data', ('delete', 'show'))
def __init__(self, firstname, lastname, time, status, score, data):
self.firstname = firstname
self.lastname = lastname
self.title = '%s %s' % (firstname, lastname)
self._code = '0' if score else '1'
self.date = col(time.strftime('%m-%d %H:%M'), c.HL, True) if time else col('NA', c.ERR, True)
self.status = col(status, c.HEAD) if status else col('NA', c.ERR)
self.score = score if score else 0
self.data = data
def str(self):
return '%-21s %-40s %5.1f%% %s' % (self.date, self.title[:39], self.score, self.status)
Answer = namedtuple('Answer', ('text', 'value', 'correct', 'min', 'max'))
Answer.__new__.__defaults__ = (False, 0, 0)
class Question():
CHECKED = col('[', c.ERR) + col('*', c.HL) + col(']', c.ERR)
UNCHECKED = col('[', c.ERR) + ' ' + col(']', c.ERR)
Hints = {'radio' : 'one answer',
'checkbox' : 'multiple choice',
'input' : 'written answer',
'textarea' : 'written answer'}
Prompts = {'radio' : 'answer <index> : ',
'checkbox' : 'answer <index index ... > : ',
'input' : 'answer : '}
def __init__(self, text, idx, images = [], answers = [], qtype = None):
self.text = text
self.idx = idx
self.images = images
self.answers = answers
self.qtype = qtype
self.title = text.split('\n')[0]
self.hint = Survey.Question.Hints.get(qtype, '')
self.prompt = Survey.Question.Prompts.get(qtype, '')
self._len = 1 if self.qtype == 'radio' else len(self.answers)
self._given_answer = ''
self._submit = []
self.checkbox = Survey.Question.UNCHECKED if self._len else Survey.Question.CHECKED
def str(self, show_answer=False):
return '%-60s ... ' % self.title[:59] + (self.checkbox if not show_answer else
(col(self._given_answer, c.HEAD) if self.qtype != 'textarea' else \
col('\n"""\n%s\n"""' % self._given_answer, c.HEAD)))
def ask(self):
print('\n' + col('Q #%i ' % self.idx, c.HL) + col(self.text, c.DIR))
if self.images:
print(col('\nAttached images:', c.HL))
for fname in self.images:
print('file://' + fname)
print('')
if not self.answers:
return
if self.qtype not in ('textarea', 'input'):
for i, ans in enumerate(self.answers):
print(col('[%-3i] ' % (i + 1), c.HL) + ans.text)
print('\n' + col(self.hint, c.ERR))
while 1:
try:
if self.qtype == 'textarea':
if self._given_answer:
print(col('Your answer:', c.HL))
print('"""\n' + self._given_answer + '\n"""\n')
if not Tool._ask('open editor?'):
return
if self._given_answer:
if txt.edit(self._textf):
with open(self._textf, 'rb') as f:
self._given_answer = f.read().strip()
else:
fd, fname = txt.new()
with os.fdopen(fd, 'rb') as f:
self._given_answer = f.read().strip()
self._textf = fname
self.answers = self._submit = [Survey.Answer('', self._given_answer)]
self.checkbox = Survey.Question.CHECKED
return
# radio/checkbox/input
if self._given_answer:
print(col('Your answer: %s' % self._given_answer, c.HL))
reply = input('> %s' % self.prompt).strip()
if not reply:
break
if self.qtype == 'input':
self._given_answer = reply.decode('utf8')
self.answers = self._submit = [Survey.Answer('', reply)]
else:
ix = list(map(int, reply.split())) # ValueError
assert(len(ix) and len(ix) <= self._len)
if not all(i > 0 for i in ix):
raise IndexError
self._submit = [self.answers[i-1] for i in ix] # IndexError
self._given_answer = ' '.join(map(str, ix))
self.checkbox = Survey.Question.CHECKED
break
except ValueError:
print(col(' !! (space separated) integer(s) required', c.ERR))
except IndexError:
print(col(' !! answer out of range', c.ERR))
except AssertionError:
print(col(' !! wrong number of answers given', c.ERR))
globals()[Question.__name__] = Question
globals()[Answer.__name__] = Answer
def __init__(self, client, title, url, treeid):
super(Survey, self).__init__()
self.client = client
self.PATH = self.TARGET + 'questiontest/index.phtml'
self.title = title
self.url = url
self.treeid = treeid
self._save = os.path.join('fronter', 'save_survey_%i' % treeid)
self._dirty = False
def str(self):
return col(self.title, c.HL)
def print_replies(self):
for idx, reply in enumerate(self.replies):
print(col('[%-3i] ' % (idx + 1), c.HL) + reply.str())
def read_replies(self, resp=None):
xml = None
if resp:
xml = fromstring(resp.read())
else:
xml = self.get_xml(self.PATH + '?action=show_reply_list&surveyid=%i' % self.surveyid)
self.replies = Survey._parse_replies(xml)
def get_reply(self, idx):
reply = self._get_reply(idx)
if not reply:
return
payload = dict((k,v) for k,v in reply.data.show.items())
payload['pageno'] = 1
xml = self.post(self.PATH, payload, xml=True)
text = xml.xpath('//span[@class="label"]')
try:
# %&!#/%!
overall_hl = text[5].text.strip()
overall_comment = text[5].getparent().text_content().strip()[len(overall_hl):]
eval_grade = text[6].getparent().text_content().strip()
score = text[8].text.strip()
except IndexError:
print(' !! this reply has been deleted')
return
while 1:
try:
assert(len(text) > 2)
print('\n' + col(text[0].text.strip(), c.DIR))
print(text[1].text.strip())
comment_hl = text[2].text.strip()
print(col(comment_hl, c.HL))
print(text[2].getparent().text_content().strip()[len(comment_hl):])
payload['pageno'] += 1
xml = self.post(self.PATH, payload, xml=True)
text = xml.xpath('//span[@class="label"]')
except AssertionError:
break
print(col('\nFinal score and comments', c.HEAD))
print(score)
print(col(overall_hl, c.HL))
print(overall_comment)
print(col(eval_grade, c.HL))
def get_reply_admin(self, idx):
reply = self._get_reply(idx)
if not reply:
return
payload = dict((k,v) for k,v in reply.data.show.items())
for surveyid, questions in self.pages.items():
if not any(q.answers for q in questions.values()):
continue
teacher = {}
payload['surveyid'] = surveyid
# Omg, fronter html ...
xml = self.post(self.PATH, payload, xml=True, replace=('<br>', '\n'))
for i, item in enumerate(questions.items()):
qid, q = item
if not q.answers:
continue
_q = xml.xpath('//a[@name="question%i"]' % qid)[0].getnext()
print('\n' + col('Q #%i ' % (i+1), c.HL) + col(q.text, c.DIR))
if q.qtype == 'textarea':
a = q.answers[0]
if a.text:
print('\n' + col('Solution/hint:', c.HEAD))
print(a.text)
print('\n' + col('Student\'s answer(s):', c.HL))
answer = _q.xpath('..//span')[0].text_content().strip()
print('"""\n' + answer + '\n"""')
sid = 'q_score_%i' % qid
score = _q.xpath('//input[@name="%s"]' % sid)[0].value
print(col('Score: %s' % score, c.HL))
while 1:
try:
score = input('> score (min=%g, max=%g) : ' % (a.min, a.max)).strip()
if not score:
break
score = float(score)
assert(score >= a.min and score <= a.max)
teacher['q_score_%i' % qid] = score
break
except ValueError:
print(col(' !! number required', c.ERR))
except AssertionError:
print(col(' !! answer out of range', c.ERR))
else:
print('\n' + col('Student\'s answer(s):', c.HL))
checked = { int(c.value) for c in _q.xpath('.//input[@checked]') }
correct = { aid for aid, a in q.answers.items() if a.correct }
if not checked:
print(col('<blank>', c.ERR))
continue
score = 0
# Correct answers by student
for aid in correct & checked:
print(col('* ', c.HL) + q.answers[aid].text)
score += q.answers[aid].max
if checked == correct:
continue
# Wrong answers by students
for aid in checked - (correct & checked):
print(col('* ' + q.answers[aid].text, c.ERR))
score += q.answers[aid].max # Negative or zero
maxscore = 0
print('\n' + col('Correct answer(s):', c.HEAD))
for aid in correct:
print(col('* ', c.HL) + q.answers[aid].text)
maxscore += q.answers[aid].max
print(col('\nScore: ', c.HEAD) + '%g/%g' % (score, maxscore))
comment = _q.xpath('//textarea[@name="teachercomment"]') # Hmm, occasionally not found
if comment:
comment = comment[0].text_content()
edit_comment = True
if comment:
print(col('\nComment:', c.HL))
print('"""\n' + wrap(comment) + '\n"""')
edit_comment = Tool._ask('delete and make new comment?')
if edit_comment:
comment = input('> comment : ').strip()
if comment:
teacher['teachercomment'] = comment
if teacher:
teacher.update(payload)
teacher['do_action'] = 'save_comment'
self.post(self.PATH, teacher)
print(col('\n ******', c.HEAD))
self.evaluate(idx)
def evaluate(self, idx):
reply = self._get_reply(idx)
if not reply:
return
payload = dict((k,v) for k,v in reply.data.show.items())
payload['action'] = 'total_score'
xml = self.post(self.PATH, payload, xml=True)
eval_grade = xml.xpath('//input[@name="total_score"]')[0]
score = eval_grade.getparent().getnext()
max_score = score.getnext()
score = re.findall('\d+\.\d+', score.text)[0]
max_score = re.findall('\d+\.\d+', max_score.text)[0]
print(col('\nTotal score: ', c.HL) + '%s/%s' % (score, max_score))
eval_grade = eval_grade.value.strip()
if eval_grade:
print(col('Evaluation/grade: ', c.HL) + eval_grade)
comment = xml.xpath('//textarea[@name="total_comment"]')[0].text_content().strip()
if comment:
print(col('Comment:', c.HL))
print('"""\n' + wrap(comment) + '\n"""')
if (not eval_grade and not comment) or Tool._ask('edit evaluation/grade and comment?'):
eval_grade = input('> evaluation/grade : ').strip()
if eval_grade:
payload['total_score'] = eval_grade
comment = input('> final comment : ').strip()
if comment:
payload['total_comment'] = comment
if eval_grade or comment:
payload['do_action'] = 'save_total_score'
self.post(self.PATH, payload)
self.read_replies()
def delete_idx(self, idx):
idx = idx.strip().split()
to_delete = filter(lambda r: r.data, [self.replies[int(i) - 1] for i in idx if i > 0])
self.delete(to_delete)
def delete(self, to_delete):
for r in to_delete:
print(col(' * ', c.ERR) + r.str())
if not to_delete or not Tool._ask('delete?'):
return
payload = [('do_action' , 'delete_replies' ),
('action' , 'show_reply_list'),
('surveyid' , self.surveyid )]
for r in to_delete:
payload += r.data.delete
response = self.post(self.PATH, payload)
self.read_replies(response)
def clean(self):
to_delete = []
# Reply list is already sorted by key (title)
for student, replies in groupby(self.replies, lambda r: r.title):
replies = list(replies)
if len(replies) > 1:
to_delete += sorted(replies, key=lambda r: r.score)[:-1]
self.delete(to_delete)
def print_questions(self):
for idx, q in enumerate(self.questions.values()):
print(col('[%-3i] ' % (idx + 1), c.HL) + q.str())
def goto_question(self, idx):
idx = int(idx) - 1
if idx < 0:
raise IndexError
self._dirty = True
self.questions.values()[idx].ask()
def take_survey(self):
self._dirty = True
for q in self.questions.values():
q.ask()
def submit(self):
for idx, q in enumerate(self.questions.values()):
print(col('[%-3i] ' % (idx + 1), c.HL) + q.str(show_answer=True))
if not Tool._ask('submit?'):
return
payload = [(qid, a.value) for qid, q in self.questions.items() for a in q._submit]
payload += [(k,v) for k,v in self._payload.items()]
xml = self.post(self.PATH, payload, xml=True, encoding='utf-8')
self._dirty = False
for span in xml.xpath('//span[@class="label"]')[::-1]:
percent = re.search('\d+%', span.text)
if percent:
print(col('Score: ' + percent.group(), c.HEAD))
break
else:
print(col(' !! something went wrong', c.ERR))
self._dirty = True
if not self._dirty:
try:
os.unlink(self._save)
except:
pass
self.read_replies()
def parse(self):
xml, surveyid_jumps = self.get_xml(self.url, find='test_page_jump\([\d,\s]+\)')
payload = self.get_form(xml)
self._payload = payload
self.surveyid = int(payload['surveyid'])
print(col(' ## loading questions ...', c.ERR))
self.read_replies()
if payload['viewall'] != '1': # You are admin
self.commands['ls'] = Tool.Command('ls', self.print_replies, '',
'list replies and scores')
self.commands['get'] = Tool.Command('get', self.get_reply_admin, '<index>',
'read reply, comment on errors and evaluate')
self.commands['eval'] = Tool.Command('eval', self.evaluate, '<index>', 'evaluate reply')
self.commands['del'] = Tool.Command('del', self.delete_idx, '<index>', 'delete replies')
self.commands['clean'] = Tool.Command('clean', self.clean, '',
'delete all but the best reply for each student')
self.commands['up'] = Tool.Command('up', self.read_replies, '', 'refresh replies')
self._read_questions_and_solutions()
else:
self.commands['lr'] = Tool.Command('lr', self.print_replies, '', 'list replies and scores')
self.commands['get'] = Tool.Command('get', self.get_reply, '<index>',
'read comments to a reply')
try:
self.npages = int(re.search('Side: [0-9]+/([0-9]+)', xml.text_content()).groups()[0])
except:
print(col(' ## inactive survey', c.ERR))
return
self.commands['ls'] = Tool.Command('ls', self.print_questions, '', 'list questions')
self.commands['go'] = Tool.Command('go', self.take_survey, '', 'take survey')
self.commands['goto'] = Tool.Command('goto', self.goto_question,
'<index>', 'go to specific question')
self.commands['post'] = Tool.Command('post', self.submit, '', 'review and submit answers')
loaded = False
try:
assert(os.path.exists(self._save))
with open(self._save, 'rb') as f:
self.questions = pickle.load(f)
loaded = True
except (IOError, OSError, KeyError):
print(col(' !! failed to load saved survey %s' % fname, c.ERR))
except AssertionError:
pass
if loaded: # Load last page to get submithash
payload['surveyid'] = self.questions._last_surveyid
payload['pageno'] = self.npages - 1
xml = self.post(self.PATH, payload, xml=True)
else:
items = xml.xpath('//table/tr/td/ul')
idx, self.questions = self._parse_page(items, 0)
pageno = 1
while pageno < self.npages:
payload['surveyid'] = int(surveyid_jumps[-1].split('(')[-1].split(',')[1])
payload['pageno'] = pageno
xml, surveyid_jumps = self.post(self.PATH, payload, xml=True,
find='test_page_jump\([\d,\s]+\)')
items = xml.xpath('//table/tr/td/ul')
idx, questions = self._parse_page(items, idx)
self.questions.update(questions)
pageno += 1
self.questions._last_surveyid = payload['surveyid']
for script in xml.xpath('//script[@type="text/javascript"]')[::-1]:
submithash = re.search('submithash\.value\s?=\s?"(\w+)";', script.text_content())
if submithash:
payload['submithash'] = submithash.groups()[0]
break
else:
print(col(' !! failed to get submithash (submit might not work)', c.ERR))
# Prepare for submit
payload['test_section'] = self.surveyid
payload['check_surveyid'] = payload['surveyid']
payload['do_action'] = 'send_answer'
payload['action'] = ''
# Wtf, need to fix this
self.post(self.PATH, payload)
def _parse_page(self, xmls, idx):
questions = OrderedDict()
for i, xml in enumerate(xmls):
idx += 1
# fronter HTML is fucked up
to_parse = []
more_text = xml[0].getnext()
while more_text != None and more_text.tag not in ('textarea', 'fieldset'):
to_parse.append(more_text)
more_text = more_text.getnext()
more_text = xml.getnext()
while more_text != None and more_text.tag != 'ul':
to_parse.append(more_text)
more_text = more_text.getnext()
text = (wrap(xml[0].text_content().strip()) + '\n' + html.to_text(to_parse)).strip()
_images = xml.xpath('.//img')
images = []
for i, img in enumerate(_images):
response = self.get(self.ROOT + img.get('src'))
ext = response.headers['content-disposition'].split('=')[-1].strip('"').split('.')[-1]
fd, fname = mkstemp(prefix='fronter_', suffix='.'+ext)
with os.fdopen(fd, 'wb') as f:
copyfileobj(response, f)
images.append(fname)
radio = xml.xpath('.//input[@type="radio"]')
checkbox = xml.xpath('.//input[@type="checkbox"]')
text_input = xml.xpath('.//input[@class="question-reply"]')
textarea = xml.xpath('.//textarea[@class="question-textarea"]')
if radio:
answers = [Survey.Answer(wrap(a.label.text), a.get('value')) for a in radio]
questions[radio[0].name] = Survey.Question(text, idx, images, answers, 'radio')
elif checkbox:
answers = [Survey.Answer(wrap(a.label.text), a.get('value')) for a in checkbox]
questions[checkbox[0].name] = Survey.Question(text, idx, images, answers, 'checkbox')
elif text_input:
answers = [Survey.Answer('', '')]
questions[text_input[0].name] = Survey.Question(text, idx, images, answers, 'input')
elif textarea:
answers = [Survey.Answer('', '')]
questions[textarea[0].name] = Survey.Question(text, idx, images, answers, 'textarea')
else:
questions['info_%i' % idx] = Survey.Question(text, idx, images)
return idx, questions
@staticmethod
def _parse_replies(xml):
onclick = re.compile("actionform\.([a-z]+)\.value\s?=\s?'?(\w+)'?")
tr_odd = xml.xpath('//tr[@class="tablelist-odd"]')
tr_even = xml.xpath('//tr[@class="tablelist-even"]')
_tmp = []
re_data = re.compile('document\.actionform\.(\w+)\.value[\s\'=]+([0-9]+)')
for tr in tr_odd + tr_even:
try:
# IndexError (not a test reply row)
delete = tr.xpath('td[1]/input')
name = tr.xpath('td[2]/label')[0]
time = tr.xpath('td[3]/label')[0]
score = tr.xpath('td[4]/label/img')
status = tr.xpath('td[5]/label')
last, first = name.text_content().strip().split(', ')
data = None
try:
time = datetime.strptime(time.text.strip(),'%Y-%m-%d %H:%M:%S') # ValueError
delete_payload = [(item.name, item.get('value')) for item in delete]
show_payload = dict(onclick.findall(name.xpath('./a')[0].get('onclick')))
data = Survey.Reply.Data(delete=delete_payload, show=show_payload)
score = float(score[0].get('src').split('percent=')[-1].split('&')[0])
status = status[0].text
except ValueError:
time = score = status = None
reply = Survey.Reply(first, last, time, status, score, data)
_tmp.append(reply)
except IndexError:
continue
return sorted(_tmp, key=lambda x: x._code + x.title)
def _read_questions_and_solutions(self):
base_url = self.TARGET + 'app/teststudio/author/tests/%i' % self.treeid
try:
self.pages = OrderedDict()
self.opener.addheaders = [('accept', 'application/json')]
survey = json.loads(self.get(base_url).read().decode('utf-8'))
for surveyid in survey['pageIdList']:
url = base_url + '/pages/%i' % surveyid
response = self.get(url)
page = json.loads(response.read().decode('utf-8').replace('\n',''))
_questions = page['questionIdList']
questions = OrderedDict()
for q in _questions:
url = base_url + '/questions/%i' % q
response = self.get(url)
q = json.loads(response.read().decode('utf-8').replace('\n',''))
_answers = q.get('answers', [])
answers = OrderedDict()
qtype = q.get('metaType', None)
if qtype == 'Text':
min_score, max_score = float(q['minScore']), float(q['maxScore'])
atext = q['correctAnswer']
answers[0] = Survey.Answer(atext, 0, True, min_score, max_score)
qtype = 'textarea'
elif _answers:
for a in _answers:
score = float(a['answerScore'])
atext = a['answerText']
aid = a['answerId']
correct = a['answerCorrect'] or score > 0
answers[aid] = Survey.Answer(wrap(atext), aid, correct, 0, score)
else:
continue
qtext = wrap(q['questionText'])
qid = q['id']
body = q.get('body', '')
if body:
qtext += '\n' + html.to_text(fromstring(q['body']))
questions[qid] = Survey.Question(qtext, 0, [], answers, qtype)
if questions:
self.pages[surveyid] = questions
except HTTPError: # end of pages
self.opener.addheaders = []
def _get_reply(self, idx):
idx = int(idx) - 1
if idx < 0:
raise IndexError
reply = self.replies[idx]
if not reply.data:
print(col(' !! %s has not replied to the survey yet' % reply.title, c.ERR))
return
return reply
def clean_exit(self):
if self._dirty:
fd, tmp = mkstemp()
with os.fdopen(fd, 'wb') as f:
f.write(pickle.dumps(self.questions))
copy(tmp, self._save)
self._dirty = False
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import mle
import network_layer
import thread_cert
DUT_LEADER = 1
ROUTER_1 = 2
ROUTER_31 = 32
ROUTER_32 = 33
class Cert_5_2_3_LeaderReject2Hops(thread_cert.TestCase):
TOPOLOGY = {
DUT_LEADER: {
'mode':
'rsdn',
'panid':
0xface,
'router_downgrade_threshold':
33,
'router_upgrade_threshold':
32,
'whitelist': [
ROUTER_1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
ROUTER_31
]
},
ROUTER_1: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER, ROUTER_32]
},
3: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
4: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
5: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
6: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
7: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
8: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
9: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
10: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
11: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
12: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
13: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
14: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
15: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
16: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
17: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
18: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
19: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
20: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
21: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
22: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
23: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
24: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
25: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
26: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
27: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
28: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
29: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
30: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
31: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
ROUTER_31: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [DUT_LEADER]
},
ROUTER_32: {
'mode': 'rsdn',
'panid': 0xface,
'router_downgrade_threshold': 33,
'router_selection_jitter': 1,
'router_upgrade_threshold': 33,
'whitelist': [ROUTER_1]
},
}
def test(self):
# 1
self.nodes[DUT_LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[DUT_LEADER].get_state(), 'leader')
for i in range(2, 32):
self.nodes[i].start()
self.simulator.go(5)
self.assertEqual(self.nodes[i].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
# 2
self.nodes[ROUTER_31].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER_31].get_state(), 'router')
# 3 - DUT_LEADER
# This method flushes the message queue so calling this method again
# will return only the newly logged messages.
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.next_coap_message('2.04')
msg.assertCoapMessageContainsTlv(network_layer.Status)
msg.assertCoapMessageContainsTlv(network_layer.RouterMask)
msg.assertCoapMessageContainsTlv(network_layer.Rloc16)
status_tlv = msg.get_coap_message_tlv(network_layer.Status)
self.assertEqual(network_layer.StatusValues.SUCCESS, status_tlv.status)
# 4 - DUT_LEADER
msg = leader_messages.last_mle_message(mle.CommandType.ADVERTISEMENT)
msg.assertAssignedRouterQuantity(32)
# 5 - Router_32
self.nodes[ROUTER_32].start()
self.simulator.go(5)
# 6 - DUT_LEADER
leader_messages = self.simulator.get_messages_sent_by(DUT_LEADER)
msg = leader_messages.next_coap_message('2.04')
msg.assertCoapMessageContainsTlv(network_layer.Status)
status_tlv = msg.get_coap_message_tlv(network_layer.Status)
self.assertEqual(network_layer.StatusValues.NO_ADDRESS_AVAILABLE,
status_tlv.status)
if __name__ == '__main__':
unittest.main()
| |
import os
from robot.libraries.BuiltIn import BuiltIn
OPENBMC_BASE_URI = '/xyz/openbmc_project/'
OPENBMC_BASE_DBUS = 'xyz.openbmc_project.'
# org open power base URI.
OPENPOWER_BASE_URI = '/org/open_power/'
OPENPOWER_CONTROL = OPENPOWER_BASE_URI + 'control/'
OPENPOWER_SENSORS = OPENPOWER_BASE_URI + 'sensors/'
# REST URI base endpoint paths.
CONTROL_URI = OPENBMC_BASE_URI + 'control/'
# Continue to keep to support legacy code.
SETTINGS_URI = '/org/openbmc/settings/'
WATCHDOG_URI = OPENBMC_BASE_URI + 'watchdog/'
TIME_MANAGER_URI = OPENBMC_BASE_URI + 'time/'
NETWORK_MANAGER = OPENBMC_BASE_URI + 'network/'
NETWORK_RESOURCE = 'xyz.openbmc_project.Network.IP.Protocol.IPv4'
# SNMP
SNMP_MANAGER_URI = NETWORK_MANAGER + 'snmp/manager/'
# Sensors base variables.
SENSORS_URI = OPENBMC_BASE_URI + 'sensors/'
# Thermal Control base variables
THERMAL_CONTROL_URI = CONTROL_URI + 'thermal/0'
# State Manager base variables
BMC_REBOOT_TRANS = 'xyz.openbmc_project.State.BMC.Transition.Reboot'
HOST_POWEROFF_TRANS = 'xyz.openbmc_project.State.Host.Transition.Off'
HOST_POWERON_TRANS = 'xyz.openbmc_project.State.Host.Transition.On'
HOST_REBOOT_TRANS = 'xyz.openbmc_project.State.Host.Transition.Reboot'
HOST_POWEROFF_STATE = 'xyz.openbmc_project.State.Host.HostState.Off'
HOST_POWERON_STATE = 'xyz.openbmc_project.State.Host.HostState.Running'
CHASSIS_POWEROFF_TRANS = 'xyz.openbmc_project.State.Chassis.Transition.Off'
CHASSIS_POWERON_TRANS = 'xyz.openbmc_project.State.Chassis.Transition.On'
CHASSIS_POWEROFF_STATE = 'xyz.openbmc_project.State.Chassis.PowerState.Off'
CHASSIS_POWERON_STATE = 'xyz.openbmc_project.State.Chassis.PowerState.On'
# State Manager URI variables.
SYSTEM_STATE_URI = OPENBMC_BASE_URI + 'state/'
BMC_STATE_URI = OPENBMC_BASE_URI + 'state/bmc0/'
HOST_STATE_URI = OPENBMC_BASE_URI + 'state/host0/'
CHASSIS_STATE_URI = OPENBMC_BASE_URI + 'state/chassis0/'
HOST_WATCHDOG_URI = OPENBMC_BASE_URI + 'watchdog/host0/'
# Logging URI variables
BMC_LOGGING_URI = OPENBMC_BASE_URI + 'logging/'
BMC_LOGGING_ENTRY = BMC_LOGGING_URI + 'entry/'
REDFISH_BMC_LOGGING_ENTRY = '/redfish/v1/Systems/system/LogServices/EventLog/Entries/'
# Software manager version
SOFTWARE_VERSION_URI = OPENBMC_BASE_URI + 'software/'
ACTIVE = 'xyz.openbmc_project.Software.Activation.Activations.Active'
READY = 'xyz.openbmc_project.Software.Activation.Activations.Ready'
INVALID = 'xyz.openbmc_project.Software.Activation.Activations.Invalid'
ACTIVATING = 'xyz.openbmc_project.Software.Activation.Activations.Activating'
NOTREADY = 'xyz.openbmc_project.Software.Activation.Activations.NotReady'
FAILED = 'xyz.openbmc_project.Software.Activation.Activations.Failed'
SOFTWARE_ACTIVATION = 'xyz.openbmc_project.Software.Activation'
REQUESTED_ACTIVATION = SOFTWARE_ACTIVATION + '.RequestedActivations'
REQUESTED_ACTIVE = REQUESTED_ACTIVATION + '.Active'
REQUESTED_NONE = REQUESTED_ACTIVATION + '.None'
SOFTWARE_PURPOSE = 'xyz.openbmc_project.Software.Version.VersionPurpose'
VERSION_PURPOSE_HOST = SOFTWARE_PURPOSE + '.Host'
VERSION_PURPOSE_BMC = SOFTWARE_PURPOSE + '.BMC'
VERSION_PURPOSE_SYSTEM = SOFTWARE_PURPOSE + '.System'
# Image Upload Directory Path
IMAGE_UPLOAD_DIR_PATH = '/tmp/images/'
# Inventory URI variables
HOST_INVENTORY_URI = OPENBMC_BASE_URI + 'inventory/'
CHASSIS_INVENTORY_URI = HOST_INVENTORY_URI + 'system/chassis/'
MOTHERBOARD_INVENTORY_URI = CHASSIS_INVENTORY_URI + 'motherboard/'
# Led URI variable
LED_GROUPS_URI = OPENBMC_BASE_URI + 'led/groups/'
LED_PHYSICAL_URI = OPENBMC_BASE_URI + 'led/physical/'
LED_LAMP_TEST_ASSERTED_URI = LED_GROUPS_URI + 'lamp_test/'
LED_PHYSICAL_PS0_URI = LED_PHYSICAL_URI + 'cffps1_69/'
LED_PHYSICAL_PS1_URI = LED_PHYSICAL_URI + 'cffps1_68/'
LED_PHYSICAL_FAN0_URI = LED_PHYSICAL_URI + 'fan0/'
LED_PHYSICAL_FAN2_URI = LED_PHYSICAL_URI + 'fan2/'
LED_PHYSICAL_FAN3_URI = LED_PHYSICAL_URI + 'fan3/'
# Host control URI variables.
CONTROL_HOST_URI = OPENBMC_BASE_URI + 'control/host0/'
# Power restore variables.
POWER_RESTORE_URI = CONTROL_HOST_URI + 'power_restore_policy'
CONTROL_DBUS_BASE = 'xyz.openbmc_project.Control.'
RESTORE_LAST_STATE = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.Restore'
ALWAYS_POWER_ON = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.AlwaysOn'
ALWAYS_POWER_OFF = CONTROL_DBUS_BASE + 'Power.RestorePolicy.Policy.AlwaysOff'
# Dump URI variables.
REST_DUMP_URI = OPENBMC_BASE_URI + 'dump/bmc/'
DUMP_ENTRY_URI = REST_DUMP_URI + 'entry/'
DUMP_DOWNLOAD_URI = "/download/dump/"
# The path on the BMC where dumps are stored.
DUMP_DIR_PATH = "/var/lib/phosphor-debug-collector/dumps/"
DUMP_HB_DIR_PATH = "/var/lib/phosphor-debug-collector/hostbootdump/"
# Boot progress variables.
STATE_DBUS_BASE = 'xyz.openbmc_project.State.'
OS_BOOT_START = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.OSStart'
OS_BOOT_OFF = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.Unspecified'
OS_BOOT_PCI = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.PCIInit'
OS_BOOT_SECPCI = STATE_DBUS_BASE + \
'Boot.Progress.ProgressStages.SecondaryProcInit'
OS_BOOT_MEM = STATE_DBUS_BASE + 'Boot.Progress.ProgressStages.MemoryInit'
OS_BOOT_MOTHERBOARD = STATE_DBUS_BASE + \
'Boot.Progress.ProgressStages.MotherboardInit'
# OperatingSystem status variables.
OS_BOOT_COMPLETE = STATE_DBUS_BASE + \
'OperatingSystem.Status.OSStatus.BootComplete'
OS_BOOT_CDROM = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.CDROMBoot'
OS_BOOT_ROM = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.ROMBoot'
OS_BOOT_PXE = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.PXEBoot'
OS_BOOT_CBoot = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.CBoot'
OS_BOOT_DiagBoot = STATE_DBUS_BASE + 'OperatingSystem.Status.OSStatus.DiagBoot'
# Boot variables.
BOOT_SOURCE_DEFAULT = 'xyz.openbmc_project.Control.Boot.Source.Sources.Default'
BOOT_SOURCE_NETWORK = 'xyz.openbmc_project.Control.Boot.Source.Sources.Network'
BOOT_SOURCE_DISK = 'xyz.openbmc_project.Control.Boot.Source.Sources.Disk'
BOOT_SOURCE_CDROM = 'xyz.openbmc_project.Control.Boot.Source.Sources.ExternalMedia'
BOOT_MODE_SAFE = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Safe'
BOOT_MODE_SETUP = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Setup'
BOOT_MODE_REGULAR = 'xyz.openbmc_project.Control.Boot.Mode.Modes.Regular'
BOOT_TYPE_LEGACY = 'xyz.openbmc_project.Control.Boot.Type.Types.Legacy'
BOOT_TYPE_EFI = 'xyz.openbmc_project.Control.Boot.Type.Types.EFI'
# Time variables.
TIME_DBUS_BASE = 'xyz.openbmc_project.Time.'
BMC_OWNER = TIME_DBUS_BASE + 'Owner.Owners.BMC'
HOST_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Host'
SPLIT_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Split'
BOTH_OWNER = TIME_DBUS_BASE + 'Owner.Owners.Both'
NTP_MODE = TIME_DBUS_BASE + 'Synchronization.Method.NTP'
MANUAL_MODE = TIME_DBUS_BASE + 'Synchronization.Method.Manual'
# User manager variable.
BMC_USER_URI = OPENBMC_BASE_URI + 'user/'
# LDAP User manager variable.
BMC_LDAP_URI = BMC_USER_URI + 'ldap'
# The path on the BMC where signed keys are stored.
ACTIVATION_DIR_PATH = "/etc/activationdata/"
# Redfish variables.
REDFISH_BASE_URI = '/redfish/v1/'
REDFISH_SESSION = REDFISH_BASE_URI + 'SessionService/Sessions'
REDFISH_SESSION_URI = 'SessionService/Sessions/'
REDFISH_NW_ETH0 = 'Managers/bmc/EthernetInterfaces/eth0/'
REDFISH_NW_ETH0_URI = REDFISH_BASE_URI + REDFISH_NW_ETH0
REDFISH_NW_ETH_IFACE = REDFISH_BASE_URI + 'Managers/bmc/EthernetInterfaces/'
REDFISH_NW_PROTOCOL = 'Managers/bmc/NetworkProtocol'
REDFISH_NW_PROTOCOL_URI = REDFISH_BASE_URI + REDFISH_NW_PROTOCOL
REDFISH_ACCOUNTS_SERVICE = 'AccountService/'
REDFISH_ACCOUNTS_SERVICE_URI = REDFISH_BASE_URI + REDFISH_ACCOUNTS_SERVICE
REDFISH_ACCOUNTS = 'AccountService/Accounts/'
REDFISH_ACCOUNTS_URI = REDFISH_BASE_URI + REDFISH_ACCOUNTS
REDFISH_HTTPS_CERTIFICATE = 'Managers/bmc/NetworkProtocol/HTTPS/Certificates'
REDFISH_HTTPS_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_HTTPS_CERTIFICATE
REDFISH_LDAP_CERTIFICATE = 'AccountService/LDAP/Certificates'
REDFISH_LDAP_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_LDAP_CERTIFICATE
REDFISH_CA_CERTIFICATE = 'Managers/bmc/Truststore/Certificates'
REDFISH_CA_CERTIFICATE_URI = REDFISH_BASE_URI + REDFISH_CA_CERTIFICATE
REDFISH_CHASSIS_URI = REDFISH_BASE_URI + 'Chassis/'
REDFISH_CHASSIS_THERMAL = 'chassis/Thermal/'
REDFISH_CHASSIS_THERMAL_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_THERMAL
REDFISH_CHASSIS_POWER = 'chassis/Power/'
REDFISH_CHASSIS_POWER_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_POWER
REDFISH_CHASSIS_SENSORS = 'chassis/Sensors'
REDFISH_CHASSIS_SENSORS_URI = REDFISH_CHASSIS_URI + REDFISH_CHASSIS_SENSORS
# Boot options and URI variables.
POWER_ON = 'On'
POWER_GRACEFUL_OFF = "GracefulShutdown"
POWER_GRACEFUL_RESTART = "GracefulRestart"
POWER_FORCE_OFF = 'ForceOff'
REDFISH_POWER = 'Systems/system/Actions/ComputerSystem.Reset'
REDFISH_POWER_URI = REDFISH_BASE_URI + REDFISH_POWER
# rsyslog variables.
REMOTE_LOGGING_URI = OPENBMC_BASE_URI + 'logging/config/remote/'
# Certificate variables.
SERVER_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/server/https'
CLIENT_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/client/ldap'
CA_CERTIFICATE_URI = OPENBMC_BASE_URI + 'certs/authority/ldap'
# EventLog variables.
SYSTEM_BASE_URI = REDFISH_BASE_URI + 'Systems/system/'
EVENT_LOG_URI = SYSTEM_BASE_URI + 'LogServices/EventLog/'
DUMP_URI = SYSTEM_BASE_URI + 'LogServices/Dump/'
BIOS_ATTR_URI = SYSTEM_BASE_URI + 'Bios'
BIOS_ATTR_SETTINGS_URI = BIOS_ATTR_URI + '/Settings'
'''
QEMU HTTPS variable:
By default lib/resource.robot AUTH URI construct is as
${AUTH_URI} https://${OPENBMC_HOST}${AUTH_SUFFIX}
${AUTH_SUFFIX} is populated here by default EMPTY else
the port from the OS environment
'''
AUTH_SUFFIX = ":" + BuiltIn().get_variable_value("${HTTPS_PORT}", os.getenv('HTTPS_PORT', '443'))
# Here contains a list of valid Properties bases on fru_type after a boot.
INVENTORY_ITEMS = {
"CPU": [
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"FRU File ID",
"Manufacturer",
"Name",
"Part Number",
"Serial Number",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"DIMM": [
"Asset Tag",
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"FRU File ID",
"Manufacturer",
"Model Number",
"Name",
"Serial Number",
"Version",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"MEMORY_BUFFER": [
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"FRU File ID",
"Manufacturer",
"Name",
"Part Number",
"Serial Number",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"FAN": [
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"DAUGHTER_CARD": [
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"FRU File ID",
"Manufacturer",
"Name",
"Part Number",
"Serial Number",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"BMC": [
"fault",
"fru_type",
"is_fru",
"manufacturer",
"present",
"version",
],
"MAIN_PLANAR": [
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"Part Number",
"Serial Number",
"Type",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"SYSTEM": [
"Custom Field 1",
"Custom Field 2",
"Custom Field 3",
"Custom Field 4",
"Custom Field 5",
"Custom Field 6",
"Custom Field 7",
"Custom Field 8",
"FRU File ID",
"Manufacturer",
"Model Number",
"Name",
"Serial Number",
"Version",
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
"CORE": [
"fault",
"fru_type",
"is_fru",
"present",
"version",
],
}
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'NGReport', fields ['report_date']
db.create_index('reports_ngreport', ['report_date'])
def backwards(self, orm):
# Removing index on 'NGReport', fields ['report_date']
db.delete_index('reports_ngreport', ['report_date'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard.actionitem': {
'Meta': {'ordering': "['-due_date', '-updated_on', '-created_on']", 'object_name': 'ActionItem'},
'completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'resolved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'action_items_assigned'", 'to': "orm['auth.User']"})
},
'events.attendance': {
'Meta': {'object_name': 'Attendance'},
'date_subscribed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'events.event': {
'Meta': {'ordering': "['start']", 'object_name': 'Event'},
'actual_attendance': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'events_attended'", 'symmetrical': 'False', 'through': "orm['events.Attendance']", 'to': "orm['auth.User']"}),
'budget_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_budget_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['remozilla.Bug']"}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events'", 'null': 'True', 'to': "orm['reports.Campaign']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'events_categories'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'converted_visitors': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'estimated_attendance': ('django.db.models.fields.PositiveIntegerField', [], {}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'extra_content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'goals': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'events_goals'", 'blank': 'True', 'to': "orm['events.EventGoal']"}),
'has_new_metrics': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'hashtag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'metrics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['events.EventMetric']", 'through': "orm['events.EventMetricOutcome']", 'symmetrical': 'False'}),
'mozilla_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'events_created'", 'to': "orm['auth.User']"}),
'planning_pad_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'swag_bug': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_swag_requests'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['remozilla.Bug']"}),
'times_edited': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'venue': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'events.eventgoal': {
'Meta': {'ordering': "['name']", 'object_name': 'EventGoal'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '127'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '127', 'blank': 'True'})
},
'events.eventmetric': {
'Meta': {'ordering': "['name']", 'object_name': 'EventMetric'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.eventmetricoutcome': {
'Meta': {'object_name': 'EventMetricOutcome'},
'details': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']"}),
'expected_outcome': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metric': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.EventMetric']"}),
'outcome': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
'remozilla.bug': {
'Meta': {'ordering': "['-bug_last_change_time']", 'object_name': 'Bug'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_assigned'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'budget_needinfo': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'bug_creation_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'bug_id': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'bug_last_change_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'cc': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'bugs_cced'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'component': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'council_member_assigned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'council_vote_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bugs_created'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'first_comment': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pending_mentor_validation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resolution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'status': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'summary': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'whiteboard': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'})
},
'reports.activity': {
'Meta': {'ordering': "['name']", 'object_name': 'Activity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reports.campaign': {
'Meta': {'ordering': "['name']", 'object_name': 'Campaign'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reports.ngreport': {
'Meta': {'ordering': "['-report_date', '-created_on']", 'object_name': 'NGReport'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ng_reports'", 'to': "orm['reports.Activity']"}),
'activity_description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ng_reports'", 'null': 'True', 'to': "orm['reports.Campaign']"}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Event']", 'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'ng_reports'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_passive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'link_description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ng_reports_mentored'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'report_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ng_reports'", 'to': "orm['auth.User']"}),
'verified_activity': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'reports.ngreportcomment': {
'Meta': {'ordering': "['id']", 'object_name': 'NGReportComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reports.NGReport']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['reports']
| |
import CatalogItem
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toon import ToonDNA
import random
from direct.showbase import PythonUtil
from direct.gui.DirectGui import *
from panda3d.core import *
from panda3d.direct import *
CTArticle = 0
CTString = 1
CTBasePrice = 2
CTEmblemPrices = 3
ABoysShirt = 0
AGirlsShirt = 1
AShirt = 2
ABoysShorts = 3
AGirlsShorts = 4
AGirlsSkirt = 5
AShorts = 6
ClothingTypes = {
101: (ABoysShirt, 'bss1', 40),
102: (ABoysShirt, 'bss2', 40),
103: (ABoysShirt, 'bss3', 40),
105: (ABoysShirt, 'bss4', 40),
104: (ABoysShirt, 'bss5', 40),
106: (ABoysShirt, 'bss6', 40),
107: (ABoysShirt, 'bss7', 40),
108: (ABoysShirt, 'bss8', 40),
109: (ABoysShirt, 'bss9', 40),
111: (ABoysShirt, 'bss11', 40),
115: (ABoysShirt, 'bss15', 40),
116: (ABoysShirt, 'c_ss1', 80),
117: (ABoysShirt, 'c_ss2', 80),
118: (ABoysShirt, 'c_bss1', 80),
119: (ABoysShirt, 'c_bss2', 80),
120: (ABoysShirt, 'c_ss3', 80),
121: (ABoysShirt, 'c_bss3', 80),
122: (ABoysShirt, 'c_bss4', 80),
123: (ABoysShirt, 'c_ss4', 120),
124: (ABoysShirt, 'c_ss5', 120),
125: (AShirt, 'c_ss6', 120),
126: (AShirt, 'c_ss7', 120),
127: (AShirt, 'c_ss8', 120),
128: (AShirt, 'c_ss9', 120),
129: (AShirt, 'c_ss10', 120),
130: (AShirt, 'c_ss11', 120),
131: (ABoysShirt, 'c_ss12', 160),
201: (AGirlsShirt, 'gss1', 40),
202: (AGirlsShirt, 'gss2', 40),
203: (AGirlsShirt, 'gss3', 40),
205: (AGirlsShirt, 'gss4', 40),
204: (AGirlsShirt, 'gss5', 40),
206: (AGirlsShirt, 'gss6', 40),
207: (AGirlsShirt, 'gss7', 40),
208: (AGirlsShirt, 'gss8', 40),
209: (AGirlsShirt, 'gss9', 40),
211: (AGirlsShirt, 'gss11', 40),
215: (AGirlsShirt, 'gss15', 40),
216: (AGirlsShirt, 'c_ss1', 80),
217: (AGirlsShirt, 'c_ss2', 80),
218: (AGirlsShirt, 'c_gss1', 80),
219: (AGirlsShirt, 'c_gss2', 80),
220: (AGirlsShirt, 'c_ss3', 80),
221: (AGirlsShirt, 'c_gss3', 80),
222: (AGirlsShirt, 'c_gss4', 80),
223: (AGirlsShirt, 'c_gss5', 80),
224: (AGirlsShirt, 'c_ss4', 120),
225: (AGirlsShirt, 'c_ss13', 160),
301: (ABoysShorts, 'bbs1', 50),
302: (ABoysShorts, 'bbs2', 50),
303: (ABoysShorts, 'bbs3', 50),
304: (ABoysShorts, 'bbs4', 50),
305: (ABoysShorts, 'bbs5', 50),
308: (ABoysShorts, 'bbs8', 50),
310: (ABoysShorts, 'c_bs1', 120),
311: (ABoysShorts, 'c_bs2', 120),
312: (ABoysShorts, 'c_bs3', 120),
313: (ABoysShorts, 'c_bs4', 120),
314: (ABoysShorts, 'c_bs5', 160),
401: (AGirlsSkirt, 'gsk1', 50),
403: (AGirlsSkirt, 'gsk3', 50),
404: (AGirlsSkirt, 'gsk4', 50),
405: (AGirlsSkirt, 'gsk5', 50),
407: (AGirlsSkirt, 'gsk7', 50),
408: (AGirlsSkirt, 'c_gsk1', 100),
409: (AGirlsSkirt, 'c_gsk2', 100),
410: (AGirlsSkirt, 'c_gsk3', 100),
411: (AGirlsSkirt, 'c_gsk4', 120),
412: (AGirlsSkirt, 'c_gsk5', 120),
413: (AGirlsSkirt, 'c_gsk6', 120),
414: (AGirlsSkirt, 'c_gsk7', 160),
451: (AGirlsShorts, 'gsh1', 50),
452: (AGirlsShorts, 'gsh2', 50),
453: (AGirlsShorts, 'gsh3', 50),
1001: (AShirt, 'hw_ss1', 200),
1002: (AShirt, 'hw_ss2', 200),
1100: (AShirt, 'wh_ss1', 200),
1101: (AShirt, 'wh_ss2', 200),
1102: (AShirt, 'wh_ss3', 200),
1103: (AShirt, 'wh_ss4', 200),
1104: (ABoysShorts, 'wh_bs1', 200),
1105: (ABoysShorts, 'wh_bs2', 200),
1106: (ABoysShorts, 'wh_bs3', 200),
1107: (ABoysShorts, 'wh_bs4', 200),
1108: (AGirlsSkirt, 'wh_gsk1', 200),
1109: (AGirlsSkirt, 'wh_gsk2', 200),
1110: (AGirlsSkirt, 'wh_gsk3', 200),
1111: (AGirlsSkirt, 'wh_gsk4', 200),
1112: (AShirt, 'hw_ss5', 200),
1113: (AShirt, 'hw_ss6', 300),
1114: (AShirt, 'hw_ss7', 200),
1115: (AShirt, 'hw_ss8', 200),
1116: (AShirt, 'hw_ss9', 300),
1117: (ABoysShorts, 'hw_bs1', 200),
1118: (ABoysShorts, 'hw_bs2', 300),
1119: (ABoysShorts, 'hw_bs5', 200),
1120: (ABoysShorts, 'hw_bs6', 200),
1121: (ABoysShorts, 'hw_bs7', 300),
1122: (AGirlsShorts, 'hw_gs1', 200),
1123: (AGirlsShorts, 'hw_gs2', 300),
1124: (AGirlsShorts, 'hw_gs5', 200),
1125: (AGirlsShorts, 'hw_gs6', 200),
1126: (AGirlsShorts, 'hw_gs7', 300),
1127: (AGirlsSkirt, 'hw_gsk1', 300),
1200: (AGirlsShirt, 'vd_ss1', 200),
1201: (AShirt, 'vd_ss2', 200),
1202: (ABoysShirt, 'vd_ss3', 200),
1203: (AGirlsShirt, 'vd_ss4', 200),
1204: (AGirlsSkirt, 'vd_gs1', 200),
1205: (ABoysShorts, 'vd_bs1', 200),
1206: (AShirt, 'vd_ss5', 200),
1207: (AShirt, 'vd_ss6', 200),
1208: (ABoysShorts, 'vd_bs2', 200),
1209: (ABoysShorts, 'vd_bs3', 200),
1210: (AGirlsSkirt, 'vd_gs2', 200),
1211: (AGirlsSkirt, 'vd_gs3', 200),
1212: (AShirt, 'vd_ss7', 200),
1300: (AShirt, 'sd_ss1', 200),
1301: (AShirt, 'sd_ss2', 225),
1302: (AGirlsShorts, 'sd_gs1', 200),
1303: (ABoysShorts, 'sd_bs1', 200),
1304: (AShirt, 'sd_ss3', 25),
1305: (ABoysShorts, 'sd_bs2', 25),
1306: (AGirlsSkirt, 'sd_gs2', 25),
1400: (AShirt, 'tc_ss1', 200),
1401: (AShirt, 'tc_ss2', 200),
1402: (AShirt, 'tc_ss3', 200),
1403: (AShirt, 'tc_ss4', 200),
1404: (AShirt, 'tc_ss5', 200),
1405: (AShirt, 'tc_ss6', 200),
1406: (AShirt, 'tc_ss7', 200),
1500: (AShirt, 'j4_ss1', 200),
1501: (AShirt, 'j4_ss2', 200),
1502: (ABoysShorts, 'j4_bs1', 200),
1503: (AGirlsSkirt, 'j4_gs1', 200),
1600: (AShirt, 'pj_ss1', 500),
1601: (AShirt, 'pj_ss2', 500),
1602: (AShirt, 'pj_ss3', 500),
1603: (ABoysShorts, 'pj_bs1', 500),
1604: (ABoysShorts, 'pj_bs2', 500),
1605: (ABoysShorts, 'pj_bs3', 500),
1606: (AGirlsShorts, 'pj_gs1', 500),
1607: (AGirlsShorts, 'pj_gs2', 500),
1608: (AGirlsShorts, 'pj_gs3', 500),
1700: (AShirt, 'sa_ss1', 200),
1701: (AShirt, 'sa_ss2', 200),
1702: (AShirt, 'sa_ss3', 200),
1703: (AShirt, 'sa_ss4', 200),
1704: (AShirt, 'sa_ss5', 200),
1705: (AShirt, 'sa_ss6', 200),
1706: (AShirt, 'sa_ss7', 200),
1707: (AShirt, 'sa_ss8', 200),
1708: (AShirt, 'sa_ss9', 200),
1709: (AShirt, 'sa_ss10', 200),
1710: (AShirt, 'sa_ss11', 200),
1711: (ABoysShorts, 'sa_bs1', 200),
1712: (ABoysShorts, 'sa_bs2', 200),
1713: (ABoysShorts, 'sa_bs3', 200),
1714: (ABoysShorts, 'sa_bs4', 200),
1715: (ABoysShorts, 'sa_bs5', 200),
1716: (AGirlsSkirt, 'sa_gs1', 200),
1717: (AGirlsSkirt, 'sa_gs2', 200),
1718: (AGirlsSkirt, 'sa_gs3', 200),
1719: (AGirlsSkirt, 'sa_gs4', 200),
1720: (AGirlsSkirt, 'sa_gs5', 200),
1721: (AShirt, 'sa_ss12', 200),
1722: (AShirt, 'sa_ss13', 200),
1723: (AShirt, 'sa_ss14', 250),
1724: (AShirt, 'sa_ss15', 250),
1725: (AShirt, 'sa_ss16', 200),
1726: (AShirt, 'sa_ss17', 200),
1727: (AShirt, 'sa_ss18', 200),
1728: (AShirt, 'sa_ss19', 200),
1729: (AShirt, 'sa_ss20', 200),
1730: (AShirt, 'sa_ss21', 200),
1731: (AShirt, 'sa_ss22', 200),
1732: (AShirt, 'sa_ss23', 200),
1733: (ABoysShorts, 'sa_bs6', 200),
1734: (ABoysShorts, 'sa_bs7', 250),
1735: (ABoysShorts, 'sa_bs8', 250),
1736: (ABoysShorts, 'sa_bs9', 200),
1737: (ABoysShorts, 'sa_bs10', 200),
1738: (AGirlsSkirt, 'sa_gs6', 200),
1739: (AGirlsSkirt, 'sa_gs7', 250),
1740: (AGirlsSkirt, 'sa_gs8', 250),
1741: (AGirlsSkirt, 'sa_gs9', 200),
1742: (AGirlsSkirt, 'sa_gs10', 200),
1743: (AShirt, 'sa_ss24', 250),
1744: (AShirt, 'sa_ss25', 250),
1745: (ABoysShorts, 'sa_bs11', 250),
1746: (ABoysShorts, 'sa_bs12', 250),
1747: (AGirlsSkirt, 'sa_gs11', 250),
1748: (AGirlsSkirt, 'sa_gs12', 250),
1749: (AShirt, 'sil_1', 1),
1750: (AShirt, 'sil_2', 1),
1751: (AShirt, 'sil_3', 1),
1752: (AShirt, 'sil_4', 5000),
1753: (AShirt, 'sil_5', 5000),
1754: (AShirt, 'sil_6', 1),
1755: (ABoysShorts, 'sil_bs1', 1),
1756: (AGirlsShorts, 'sil_gs1', 1),
1757: (AShirt, 'sil_7', 20),
1758: (AShirt, 'sil_8', 20),
1759: (AShirt,
'emb_us1',
0,
(20, 5)),
1760: (AShirt,
'emb_us2',
234,
(0, 7)),
1761: (AShirt,
'emb_us3',
345,
(8, 0)),
1762: (AShirt, 'sa_ss26', 5000),
1763: (AShirt, 'sb_1', 20),
1764: (AShirt, 'sa_ss27', 5000),
1765: (AShirt, 'sa_ss28', 5000),
1766: (ABoysShorts, 'sa_bs13', 5000),
1767: (AGirlsShorts, 'sa_gs13', 5000),
1768: (AShirt, 'jb_1', 20),
1769: (AShirt, 'jb_2', 20),
1770: (AShirt, 'hw_ss3', 250),
1771: (AShirt, 'hw_ss4', 250),
1772: (ABoysShorts, 'hw_bs3', 250),
1773: (AGirlsShorts, 'hw_gs3', 250),
1774: (ABoysShorts, 'hw_bs4', 250),
1775: (AGirlsShorts, 'hw_gs4', 250),
1776: (AShirt, 'ugcms', 15000),
1777: (AShirt, 'lb_1', 20),
1778: (AShirt, 'sa_ss29', 5000),
1779: (AShirt, 'sa_ss30', 5000),
1780: (ABoysShorts, 'sa_bs14', 5000),
1781: (AGirlsShorts, 'sa_gs14', 5000),
1782: (AShirt, 'sa_ss31', 5000),
1783: (ABoysShorts, 'sa_bs15', 5000),
1784: (AGirlsSkirt, 'sa_gs15', 5000),
1785: (AShirt, 'sa_ss32', 5000),
1786: (AShirt, 'sa_ss33', 5000),
1787: (AShirt, 'sa_ss34', 5000),
1788: (AShirt, 'sa_ss35', 5000),
1789: (AShirt, 'sa_ss36', 5000),
1790: (AShirt, 'sa_ss37', 5000),
1791: (ABoysShorts, 'sa_bs16', 5000),
1792: (ABoysShorts, 'sa_bs17', 5000),
1793: (AGirlsSkirt, 'sa_gs16', 5000),
1794: (AGirlsSkirt, 'sa_gs17', 5000),
1795: (AShirt, 'sa_ss38', 5000),
1796: (AShirt, 'sa_ss39', 5000),
1797: (ABoysShorts, 'sa_bs18', 5000),
1798: (AGirlsSkirt, 'sa_gs18', 5000),
1799: (AShirt, 'sa_ss40', 5000),
1800: (AShirt, 'sa_ss41', 5000),
1801: (AShirt, 'sa_ss42', 250),
1802: (AGirlsShirt, 'sa_ss43', 250),
1803: (AShirt, 'sa_ss44', 5000),
1804: (AShirt, 'sa_ss45', 5000),
1805: (AShirt, 'sa_ss46', 5000),
1806: (AShirt, 'sa_ss47', 5000),
1807: (AShirt, 'sa_ss48', 5000),
1808: (AShirt, 'sa_ss49', 5000),
1809: (AShirt, 'sa_ss50', 5000),
1810: (AShirt, 'sa_ss51', 5000),
1811: (AShirt, 'sa_ss52', 5000),
1812: (AShirt, 'sa_ss53', 5000),
1813: (AShirt, 'sa_ss54', 5000),
1814: (ABoysShorts, 'sa_bs19', 5000),
1815: (ABoysShorts, 'sa_bs20', 5000),
1816: (ABoysShorts, 'sa_bs21', 5000),
1817: (AGirlsSkirt, 'sa_gs19', 5000),
1818: (AGirlsSkirt, 'sa_gs20', 5000),
1819: (AGirlsSkirt, 'sa_gs21', 5000),
1820: (AShirt, 'sa_ss55', 5000)}
class CatalogClothingItem(CatalogItem.CatalogItem):
def makeNewItem(self, clothingType, colorIndex, isSpecial = False):
self.clothingType = clothingType
self.colorIndex = colorIndex
self.isSpecial = isSpecial
CatalogItem.CatalogItem.makeNewItem(self)
def storedInCloset(self):
return 1
def notOfferedTo(self, avatar):
article = ClothingTypes[self.clothingType][CTArticle]
if article == AShirt or article == AShorts:
return 0
forBoys = (article == ABoysShirt or article == ABoysShorts)
if avatar.getStyle().getGender() == 'm':
return not forBoys
else:
return forBoys
def forBoysOnly(self):
article = ClothingTypes[self.clothingType][CTArticle]
if article == ABoysShirt or article == ABoysShorts:
return 1
else:
return 0
def forGirlsOnly(self):
article = ClothingTypes[self.clothingType][CTArticle]
if article == AGirlsShirt or article == AGirlsSkirt or article == AGirlsShorts:
return 1
else:
return 0
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
if avatar.onOrder.count(self) != 0:
return 1
if avatar.onGiftOrder.count(self) != 0:
return 1
if avatar.mailboxContents.count(self) != 0:
return 1
if self in avatar.awardMailboxContents or self in avatar.onAwardOrder:
return 1
str = ClothingTypes[self.clothingType][CTString]
dna = avatar.getStyle()
if self.isShirt():
defn = ToonDNA.ShirtStyles[str]
if dna.topTex == defn[0] and dna.topTexColor == defn[2][self.colorIndex][0] and dna.sleeveTex == defn[1] and dna.sleeveTexColor == defn[2][self.colorIndex][1]:
return 1
l = avatar.clothesTopsList
for i in range(0, len(l), 4):
if l[i] == defn[0] and l[i + 1] == defn[2][self.colorIndex][0] and l[i + 2] == defn[1] and l[i + 3] == defn[2][self.colorIndex][1]:
return 1
else:
defn = ToonDNA.BottomStyles[str]
if dna.botTex == defn[0] and dna.botTexColor == defn[1][self.colorIndex]:
return 1
l = avatar.clothesBottomsList
for i in range(0, len(l), 2):
if l[i] == defn[0] and l[i + 1] == defn[1][self.colorIndex]:
return 1
return 0
def getTypeName(self):
return TTLocalizer.ClothingTypeName
def getName(self):
typeName = TTLocalizer.ClothingTypeNames.get(self.clothingType, 0)
if typeName:
return typeName
else:
article = ClothingTypes[self.clothingType][CTArticle]
return TTLocalizer.ClothingArticleNames[article]
def recordPurchase(self, avatar, optional):
if avatar.isClosetFull():
return ToontownGlobals.P_NoRoomForItem
str = ClothingTypes[self.clothingType][CTString]
dna = avatar.getStyle()
if self.isShirt():
added = avatar.addToClothesTopsList(dna.topTex, dna.topTexColor, dna.sleeveTex, dna.sleeveTexColor)
if added:
avatar.b_setClothesTopsList(avatar.getClothesTopsList())
self.notify.info('Avatar %s put shirt %d,%d,%d,%d in closet.' % (avatar.doId,
dna.topTex,
dna.topTexColor,
dna.sleeveTex,
dna.sleeveTexColor))
else:
self.notify.warning('Avatar %s %s lost current shirt; closet full.' % (avatar.doId, dna.asTuple()))
defn = ToonDNA.ShirtStyles[str]
dna.topTex = defn[0]
dna.topTexColor = defn[2][self.colorIndex][0]
dna.sleeveTex = defn[1]
dna.sleeveTexColor = defn[2][self.colorIndex][1]
else:
added = avatar.addToClothesBottomsList(dna.botTex, dna.botTexColor)
if added:
avatar.b_setClothesBottomsList(avatar.getClothesBottomsList())
self.notify.info('Avatar %s put bottoms %d,%d in closet.' % (avatar.doId, dna.botTex, dna.botTexColor))
else:
self.notify.warning('Avatar %s %s lost current bottoms; closet full.' % (avatar.doId, dna.asTuple()))
defn = ToonDNA.BottomStyles[str]
dna.botTex = defn[0]
dna.botTexColor = defn[1][self.colorIndex]
if dna.getGender() == 'f':
try:
bottomPair = ToonDNA.GirlBottoms[dna.botTex]
except:
bottomPair = ToonDNA.GirlBottoms[0]
if dna.torso[1] == 's' and bottomPair[1] == ToonDNA.SKIRT:
dna.torso = dna.torso[0] + 'd'
elif dna.torso[1] == 'd' and bottomPair[1] == ToonDNA.SHORTS:
dna.torso = dna.torso[0] + 's'
avatar.b_setDNAString(dna.makeNetString())
avatar.d_catalogGenClothes()
return ToontownGlobals.P_ItemAvailable
def getDeliveryTime(self):
return 1
def getPicture(self, avatar):
from toontown.toon import Toon
self.hasPicture = True
dna = ToonDNA.ToonDNA(type='t', dna=avatar.style)
str = ClothingTypes[self.clothingType][CTString]
if self.isShirt():
defn = ToonDNA.ShirtStyles[str]
dna.topTex = defn[0]
dna.topTexColor = defn[2][self.colorIndex][0]
dna.sleeveTex = defn[1]
dna.sleeveTexColor = defn[2][self.colorIndex][1]
pieceNames = ('**/1000/**/torso-top', '**/1000/**/sleeves')
else:
defn = ToonDNA.BottomStyles[str]
dna.botTex = defn[0]
dna.botTexColor = defn[1][self.colorIndex]
pieceNames = ('**/1000/**/torso-bot',)
toon = Toon.Toon()
toon.setDNA(dna)
model = NodePath('clothing')
for name in pieceNames:
for piece in toon.findAllMatches(name):
piece.wrtReparentTo(model)
model.setH(135)
toon.delete()
base.graphicsEngine.renderFrame()
return self.makeFrameModel(model)
def requestPurchase(self, phone, callback):
from toontown.toontowngui import TTDialog
avatar = base.localAvatar
clothesOnOrder = 0
for item in avatar.onOrder + avatar.mailboxContents:
if item.storedInCloset():
clothesOnOrder += 1
if avatar.isClosetFull(clothesOnOrder):
self.requestPurchaseCleanup()
buttonCallback = PythonUtil.Functor(self.__handleFullPurchaseDialog, phone, callback)
self.dialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.CatalogPurchaseClosetFull, text_wordwrap=15, command=buttonCallback)
self.dialog.show()
else:
CatalogItem.CatalogItem.requestPurchase(self, phone, callback)
def requestPurchaseCleanup(self):
if hasattr(self, 'dialog'):
self.dialog.cleanup()
del self.dialog
def __handleFullPurchaseDialog(self, phone, callback, buttonValue):
from toontown.toontowngui import TTDialog
self.requestPurchaseCleanup()
if buttonValue == DGG.DIALOG_OK:
CatalogItem.CatalogItem.requestPurchase(self, phone, callback)
else:
callback(ToontownGlobals.P_UserCancelled, self)
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
if self.isShirt():
return TTLocalizer.CatalogAcceptShirt
elif self.isSkirt():
return TTLocalizer.CatalogAcceptSkirt
else:
return TTLocalizer.CatalogAcceptShorts
elif retcode == ToontownGlobals.P_NoRoomForItem:
return TTLocalizer.CatalogAcceptClosetFull
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def getColorChoices(self):
str = ClothingTypes[self.clothingType][CTString]
if self.isShirt():
return ToonDNA.ShirtStyles[str][2]
else:
return ToonDNA.BottomStyles[str][1]
def isShirt(self):
article = ClothingTypes[self.clothingType][CTArticle]
return article < ABoysShorts
def isSkirt(self):
article = ClothingTypes[self.clothingType][CTArticle]
return article == AGirlsSkirt
def output(self, store = -1):
return 'CatalogClothingItem(%s, %s%s)' % (self.clothingType, self.colorIndex, self.formatOptionalData(store))
def getFilename(self):
str = ClothingTypes[self.clothingType][CTString]
if self.isShirt():
defn = ToonDNA.ShirtStyles[str]
topTex = defn[0]
return ToonDNA.Shirts[topTex]
else:
defn = ToonDNA.BottomStyles[str]
botTex = defn[0]
article = ClothingTypes[self.clothingType][CTArticle]
if article == ABoysShorts:
return ToonDNA.BoyShorts[botTex]
else:
return ToonDNA.GirlBottoms[botTex][0]
def getColor(self):
str = ClothingTypes[self.clothingType][CTString]
if self.isShirt():
defn = ToonDNA.ShirtStyles[str]
topTexColor = defn[2][self.colorIndex][0]
return ToonDNA.ClothesColors[topTexColor]
else:
defn = ToonDNA.BottomStyles[str]
botTexColor = defn[1][self.colorIndex]
return ToonDNA.ClothesColors[botTexColor]
def compareTo(self, other):
if self.clothingType != other.clothingType:
return self.clothingType - other.clothingType
return self.colorIndex - other.colorIndex
def getHashContents(self):
return (self.clothingType, self.colorIndex)
def getBasePrice(self):
return ClothingTypes[self.clothingType][CTBasePrice]
def getEmblemPrices(self):
result = ()
info = ClothingTypes[self.clothingType]
if CTEmblemPrices <= len(info) - 1:
result = info[CTEmblemPrices]
return result
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.clothingType = di.getUint16()
self.colorIndex = di.getUint8()
self.isSpecial = di.getBool()
str = ClothingTypes[self.clothingType][CTString]
if self.isShirt():
color = ToonDNA.ShirtStyles[str][2][self.colorIndex]
else:
color = ToonDNA.BottomStyles[str][1][self.colorIndex]
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint16(self.clothingType)
dg.addUint8(self.colorIndex)
dg.addBool(self.isSpecial)
def isGift(self):
if self.getEmblemPrices():
return 0
else:
return 1
def getAllClothes(*clothingTypes):
list = []
for clothingType in clothingTypes:
base = CatalogClothingItem(clothingType, 0)
list.append(base)
for n in range(1, len(base.getColorChoices())):
list.append(CatalogClothingItem(clothingType, n))
return list
| |
import hashlib
import pytest
import aiohttpretty
from waterbutler.core import exceptions
from waterbutler.providers.gitlab import GitLabProvider
from waterbutler.providers.gitlab.path import GitLabPath
from waterbutler.providers.gitlab.metadata import GitLabFileMetadata
from waterbutler.providers.gitlab.metadata import GitLabFolderMetadata
from tests.providers.gitlab.fixtures import (simple_tree, simple_file_metadata, subfolder_tree,
revisions_for_file, default_branches, )
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def settings():
return {
'owner': 'cat',
'repo': 'food',
'repo_id': '123',
'host': 'http://base.url',
}
@pytest.fixture
def repo_metadata():
return {
'full_name': 'octocat/Hello-World',
'settings': {
'push': False,
'admin': False,
'pull': True
}
}
@pytest.fixture
def provider(auth, credentials, settings, repo_metadata):
provider = GitLabProvider(auth, credentials, settings)
return provider
@pytest.fixture
def other(auth, credentials, settings, repo_metadata):
provider = GitLabProvider(auth, credentials, settings)
return provider
class TestHelpers:
def test_build_repo_url(self, provider, settings):
expected = 'http://base.url/api/v4/projects/123/contents'
assert provider._build_repo_url('contents') == expected
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_root(self, provider, default_branches):
path = '/'
default_branch_url = 'http://base.url/api/v4/projects/123'
body = default_branches['default_branch']
aiohttpretty.register_json_uri('GET', default_branch_url, body=body, status=200)
commit_sha_url = 'http://base.url/api/v4/projects/123/repository/branches/master'
commit_sha_body = default_branches['get_commit_sha']
aiohttpretty.register_json_uri('GET', commit_sha_url, body=commit_sha_body, status=200)
root_path = await provider.validate_v1_path(path)
assert root_path.is_dir
assert root_path.is_root
assert root_path.commit_sha == '5e4718bd52874cf373dad0e9ca602a9a36f87e5c'
assert root_path.branch_name == 'master'
assert root_path.extra == {
'commitSha': '5e4718bd52874cf373dad0e9ca602a9a36f87e5c',
'branchName': 'master',
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_root_by_branch(self, provider, default_branches):
commit_sha_url = 'http://base.url/api/v4/projects/123/repository/branches/otherbranch'
commit_sha_body = default_branches['get_commit_sha']
aiohttpretty.register_json_uri('GET', commit_sha_url, body=commit_sha_body, status=200)
root_path = await provider.validate_v1_path('/', branch='otherbranch')
assert root_path.is_dir
assert root_path.is_root
assert root_path.commit_sha == '5e4718bd52874cf373dad0e9ca602a9a36f87e5c'
assert root_path.branch_name == 'otherbranch'
assert root_path.extra == {
'commitSha': '5e4718bd52874cf373dad0e9ca602a9a36f87e5c',
'branchName': 'otherbranch',
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_root_by_commit_sha(self, provider):
path = '/'
root_path = await provider.validate_v1_path(path, commitSha='a1b2c3d4')
assert root_path.is_dir
assert root_path.is_root
assert root_path.commit_sha == 'a1b2c3d4'
assert root_path.branch_name is None
assert root_path.extra == {
'commitSha': 'a1b2c3d4',
'branchName': None,
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_root_by_revision_sha(self, provider):
path = '/'
root_path = await provider.validate_v1_path(path, revision='a1b2c3d4')
assert root_path.is_dir
assert root_path.is_root
assert root_path.commit_sha == 'a1b2c3d4'
assert root_path.branch_name is None
assert root_path.extra == {
'commitSha': 'a1b2c3d4',
'branchName': None,
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_root_by_revision_branch(self, provider, default_branches):
commit_sha_url = 'http://base.url/api/v4/projects/123/repository/branches/otherbranch'
commit_sha_body = default_branches['get_commit_sha']
aiohttpretty.register_json_uri('GET', commit_sha_url, body=commit_sha_body, status=200)
root_path = await provider.validate_v1_path('/', revision='otherbranch')
assert root_path.is_dir
assert root_path.is_root
assert root_path.commit_sha == '5e4718bd52874cf373dad0e9ca602a9a36f87e5c'
assert root_path.branch_name == 'otherbranch'
assert root_path.extra == {
'commitSha': '5e4718bd52874cf373dad0e9ca602a9a36f87e5c',
'branchName': 'otherbranch',
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, simple_tree):
path = '/folder1/file1'
url = ('http://base.url/api/v4/projects/123/repository/tree'
'?path=folder1/&page=1&per_page={}&ref=a1b2c3d4'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', url, body=simple_tree)
try:
file_path = await provider.validate_v1_path(path, commitSha='a1b2c3d4',
branch='master')
except Exception as exc:
pytest.fail(str(exc))
assert file_path.is_file
assert not file_path.is_root
assert file_path.commit_sha == 'a1b2c3d4'
assert file_path.branch_name == 'master'
assert file_path.extra == {
'commitSha': 'a1b2c3d4',
'branchName': 'master',
}
parent_path = file_path.parent
assert parent_path.commit_sha == 'a1b2c3d4'
assert parent_path.branch_name == 'master'
assert parent_path.extra == {
'commitSha': 'a1b2c3d4',
'branchName': 'master',
}
root_path = parent_path.parent
assert root_path.commit_sha == 'a1b2c3d4'
assert root_path.branch_name == 'master'
assert root_path.extra == {
'commitSha': 'a1b2c3d4',
'branchName': 'master',
}
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path(path + '/', commitSha='a1b2c3d4')
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, subfolder_tree):
path = '/files/lfs/'
url = ('http://base.url/api/v4/projects/123/repository/tree'
'?path=files/&page=1&per_page={}&ref=a1b2c3d4'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', url, body=subfolder_tree)
try:
folder_path = await provider.validate_v1_path(path, commitSha='a1b2c3d4',
branch='master')
except Exception as exc:
pytest.fail(str(exc))
assert folder_path.is_folder
assert not folder_path.is_root
assert folder_path.commit_sha == 'a1b2c3d4'
assert folder_path.branch_name == 'master'
parent_path = folder_path.parent
assert parent_path.commit_sha == 'a1b2c3d4'
assert parent_path.branch_name == 'master'
root_path = parent_path.parent
assert root_path.commit_sha == 'a1b2c3d4'
assert root_path.branch_name == 'master'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_no_such_repository(self, provider):
provider.repo_id = '456'
path = '/'
default_branch_url = 'http://base.url/api/v4/projects/456'
aiohttpretty.register_json_uri('GET', default_branch_url, body={}, status=404)
with pytest.raises(exceptions.NotFoundError) as exc:
_ = await provider.validate_v1_path(path)
assert exc.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_uninitialized_repository(self, provider):
provider.repo_id = '456'
path = '/'
default_branch_url = 'http://base.url/api/v4/projects/456'
aiohttpretty.register_json_uri('GET', default_branch_url, body={"default_branch": None})
with pytest.raises(exceptions.UninitializedRepositoryError) as exc:
_ = await provider.validate_v1_path(path)
assert exc.value.code == 400
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_with_default_ref(self, provider, simple_file_metadata,
revisions_for_file):
path = '/folder1/folder2/file'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/files/'
'folder1%2Ffolder2%2Ffile?ref=a1b2c3d4')
aiohttpretty.register_json_uri('GET', url, body=simple_file_metadata)
history_url = ('http://base.url/api/v4/projects/123/repository/commits'
'?path=folder1/folder2/file&ref_name=a1b2c3d4&page=1'
'&per_page={}'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', history_url, body=revisions_for_file)
etag = hashlib.sha256('{}::{}::{}'.format('gitlab', path, 'a1b2c3d4').encode('utf-8'))\
.hexdigest()
result = await provider.metadata(gl_path)
assert result.serialized() == {
'name': 'file',
'kind': 'file',
'size': 123,
'sizeInt': 123,
'provider':'gitlab',
'path': path,
'materialized': path,
'modified': '2017-07-24T16:02:17.000-04:00',
'modified_utc': '2017-07-24T20:02:17+00:00',
'created_utc': '2016-11-30T18:30:23+00:00',
'contentType': None,
'etag': etag,
'extra': {
'commitSha': 'a1b2c3d4',
'branch': 'master',
'webView': 'http://base.url/cat/food/blob/master/folder1/folder2/file',
},
}
assert result.json_api_serialized('mst3k')['links'] == {
'move': ('http://localhost:7777/v1/resources/mst3k/providers/gitlab'
'/folder1/folder2/file?commitSha=a1b2c3d4'),
'upload': None,
'download': ('http://localhost:7777/v1/resources/mst3k/providers/gitlab'
'/folder1/folder2/file?commitSha=a1b2c3d4'),
'delete': None,
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_with_branch(self, provider,
simple_file_metadata, revisions_for_file):
path = '/folder1/folder2/file'
gl_path = GitLabPath(path, _ids=([(None, 'my-branch')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/files/'
'folder1%2Ffolder2%2Ffile?ref=my-branch')
aiohttpretty.register_json_uri('GET', url, body=simple_file_metadata)
history_url = ('http://base.url/api/v4/projects/123/repository/commits'
'?path=folder1/folder2/file&ref_name=my-branch&page=1'
'&per_page={}'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', history_url, body=revisions_for_file)
result = await provider.metadata(gl_path)
assert result.json_api_serialized('mst3k')['links'] == {
'move': ('http://localhost:7777/v1/resources/mst3k/providers/gitlab'
'/folder1/folder2/file?branch=my-branch'),
'upload': None,
'download': ('http://localhost:7777/v1/resources/mst3k/providers/gitlab'
'/folder1/folder2/file?branch=my-branch'),
'delete': None,
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file_no_such_file(self, provider):
path = '/folder1/folder2/file'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/files/'
'folder1%2Ffolder2%2Ffile?ref=a1b2c3d4')
aiohttpretty.register_json_uri('GET', url, body={}, status=404)
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.metadata(gl_path)
assert exc.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder(self, provider):
path = '/folder1/folder2/folder3/'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/tree'
'?path=folder1/folder2/folder3/&ref=a1b2c3d4&page=1'
'&per_page={}'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', url, body=[
{
'id': '123',
'type': 'tree',
'name': 'my folder'
},
{
'id': '1234',
'type': 'file',
'name': 'my file'
}
])
result = await provider.metadata(gl_path)
assert isinstance(result[0], GitLabFolderMetadata)
assert result[0].name == 'my folder'
assert result[0].json_api_serialized('mst3k')['links'] == {
'move': ('http://localhost:7777/v1/resources/mst3k/providers/gitlab'
'/folder1/folder2/folder3/my%20folder/?commitSha=a1b2c3d4'),
'upload': None,
'delete': None,
'new_folder': None,
}
assert result[1].name == 'my file'
assert isinstance(result[1], GitLabFileMetadata)
child_path = provider.path_from_metadata(gl_path, result[1])
child_path.name == 'my file'
child_path.commit_sha == 'a1b2c3d4'
child_path.branch_name == 'master'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_no_such_folder_200(self, provider):
path = '/folder1/folder2/folder3/'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/tree'
'?path=folder1/folder2/folder3/&ref=a1b2c3d4&page=1'
'&per_page={}'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', url, body=[])
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.metadata(gl_path)
assert exc.value.code == 404
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_no_such_folder_404(self, provider):
path = '/folder1/folder2/folder3/'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/tree'
'?path=folder1/folder2/folder3/&ref=a1b2c3d4&page=1'
'&per_page={}'.format(provider.MAX_PAGE_SIZE))
aiohttpretty.register_json_uri('GET', url, body={}, status=404)
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.metadata(gl_path)
assert exc.value.code == 404
class TestRevisions:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_revisions(self, provider, revisions_for_file):
path = '/folder1/folder2/file'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/commits'
'?path=folder1/folder2/file&ref_name=a1b2c3d4')
aiohttpretty.register_json_uri('GET', url, body=revisions_for_file)
revisions = await provider.revisions(gl_path)
assert len(revisions) == 3
assert revisions[0].serialized() == {
'version': '931aece9275c0d084dfa7f6e0b3b2bb250e4b089',
'modified': '2017-07-24T16:02:17.000-04:00',
'modified_utc': '2017-07-24T20:02:17+00:00',
'versionIdentifier': 'commitSha',
'extra': {
'user': {
'name': 'Fitz Elliott',
},
},
}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_no_such_revision(self, provider):
path = '/folder1/folder2/file'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 4))
url = ('http://base.url/api/v4/projects/123/repository/commits'
'?path=folder1/folder2/file&ref_name=a1b2c3d4')
aiohttpretty.register_json_uri('GET', url, body=[])
with pytest.raises(exceptions.RevisionsError) as exc:
await provider.revisions(gl_path)
assert exc.value.code == 404
class TestDownload:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download(self, provider):
path = '/folder1/file.py'
gl_path = GitLabPath(path, _ids=([('a1b2c3d4', 'master')] * 3))
url = ('http://base.url/api/v4/projects/123/repository/files'
'/folder1%2Ffile.py/raw?ref=a1b2c3d4')
aiohttpretty.register_uri('GET', url, body=b'hello', headers={'X-Gitlab-Size': '5'})
result = await provider.download(gl_path, branch='master')
assert await result.read() == b'hello'
class TestReadOnlyProvider:
def test_can_duplicate_names(self, provider):
assert provider.can_duplicate_names() == False
@pytest.mark.asyncio
async def test_upload(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.upload('/foo-file.txt')
assert e.value.code == 501
@pytest.mark.asyncio
async def test_create_folder(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.create_folder('foo')
assert e.value.code == 501
@pytest.mark.asyncio
async def test_delete(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.delete()
assert e.value.code == 501
@pytest.mark.asyncio
async def test_move(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.move()
assert e.value.code == 501
@pytest.mark.asyncio
async def test_copy_to(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.copy(provider)
assert e.value.code == 501
def test_can_intra_move(self, provider):
assert provider.can_intra_move(provider) == False
def test_can_intra_copy(self, provider):
assert provider.can_intra_copy(provider) == False
| |
from contentbase.attachment import ItemWithAttachment
from contentbase.schema_utils import (
load_schema,
)
from contentbase import (
calculated_property,
collection,
)
from pyramid.traversal import find_root
from .base import (
Item,
paths_filtered_by_status,
)
import json
def includeme(config):
config.scan()
### new collections added for handling curation data, 06/19/2015
@collection(
name='genes',
unique_key='gene:symbol',
properties={
'title': 'HGNC Genes',
'description': 'List of genes',
})
class Gene(Item):
item_type = 'gene'
schema = load_schema('clincoded:schemas/gene.json')
name_key = 'symbol'
@collection(
name='diseases',
unique_key='orphaPhenotype:orphaNumber',
properties={
'title': 'Orphanet Diseases',
'description': 'List of Orphanet diseases (phenotypes)',
})
class OrphaPhenotype(Item):
item_type = 'orphaPhenotype'
schema = load_schema('clincoded:schemas/orphaPhenotype.json')
name_key = 'orphaNumber'
'''
@collection(
name='diseases',
unique_key='orphaPhenotype:uuid',
properties={
'title': 'diseases',
'description': 'List of all diseases',
})
class Disease(Item):
item_type = 'disease'
schema = load_schema('clincoded:schemas/disease.json')
name_key = 'uuid'
@collection(
name='statistics',
unique_key='statistic:uuid',
properties={
'title': 'Statistical Study',
'description': 'List of statistical studies in all gdm pairs',
})
class Statistic(Item):
item_type = 'statistic'
schema = load_schema('clincoded:schemas/statistic.json')
name_key = 'uuid'
embedded = [
'variants',
'assessments'
]
@collection(
name='controlgroups',
unique_key='controlGroup:uuid',
properties={
'title': 'Control Groups',
'description': 'List of control groups in all gdm pairs',
})
class ControlGroup(Item):
item_type = 'controlGroup'
schema = load_schema('clincoded:schemas/controlGroup.json')
name_key = 'uuid'
'''
@collection(
name='articles',
unique_key='article:pmid',
properties={
'title': 'References',
'description': 'List of PubMed references stored locally',
})
class Article(Item):
item_type = 'article'
schema = load_schema('clincoded:schemas/article.json')
name_key = 'pmid'
embedded = [
'submitted_by'
]
@collection(
name='variants',
unique_key='variant:uuid',
properties={
'title': 'Variants',
'description': 'List of variants stored locally',
})
class Variant(Item):
item_type = 'variant'
schema = load_schema('clincoded:schemas/variant.json')
name_key = 'uuid'
embedded = [
'submitted_by',
'associatedPathogenicities',
'associatedPathogenicities.assessments',
'associatedPathogenicities.assessments.submitted_by',
'associatedPathogenicities.variant',
'associatedPathogenicities.submitted_by'
]
rev = {
'associatedPathogenicities': ('pathogenicity', 'variant')
}
@calculated_property(schema={
"title": "Associated pathogenicities",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "pathogenicity.variant",
},
})
def associatedPathogenicities(self, request, associatedPathogenicities):
return paths_filtered_by_status(request, associatedPathogenicities)
@collection(
name='gdm',
unique_key='gdm:uuid',
properties={
'title': 'Gene:Disease:Mode',
'description': 'List of Gene:Disease:Mode pairs',
})
class Gdm(Item):
item_type = 'gdm'
schema = load_schema('clincoded:schemas/gdm.json')
name_key = 'uuid'
embedded = [
'gene',
'disease',
'submitted_by',
'variantPathogenicity',
'variantPathogenicity.submitted_by',
'variantPathogenicity.variant',
'variantPathogenicity.variant.submitted_by',
'variantPathogenicity.assessments',
'variantPathogenicity.assessments.submitted_by',
'provisionalClassifications',
'provisionalClassifications.submitted_by',
'annotations',
'annotations.article',
'annotations.article.submitted_by',
'annotations.submitted_by',
'annotations.groups',
'annotations.groups.commonDiagnosis',
'annotations.groups.submitted_by',
'annotations.groups.otherGenes',
'annotations.groups.otherPMIDs',
'annotations.groups.otherPMIDs.submitted_by',
#'annotations.groups.statistic',
#'annotations.groups.statistic.variants',
'annotations.groups.familyIncluded',
'annotations.groups.familyIncluded.associatedGroups',
'annotations.groups.familyIncluded.commonDiagnosis',
'annotations.groups.familyIncluded.submitted_by',
'annotations.groups.familyIncluded.otherPMIDs',
'annotations.groups.familyIncluded.otherPMIDs.submitted_by',
'annotations.groups.familyIncluded.segregation.variants',
'annotations.groups.familyIncluded.segregation.variants.submitted_by',
'annotations.groups.familyIncluded.segregation.variants.associatedPathogenicities',
'annotations.groups.familyIncluded.segregation.variants.associatedPathogenicities.submitted_by',
'annotations.groups.familyIncluded.segregation.assessments',
'annotations.groups.familyIncluded.segregation.assessments.submitted_by',
'annotations.groups.familyIncluded.individualIncluded',
'annotations.groups.familyIncluded.individualIncluded.associatedGroups',
'annotations.groups.familyIncluded.individualIncluded.associatedFamilies',
'annotations.groups.familyIncluded.individualIncluded.associatedFamilies.associatedGroups',
'annotations.groups.familyIncluded.individualIncluded.diagnosis',
'annotations.groups.familyIncluded.individualIncluded.submitted_by',
'annotations.groups.familyIncluded.individualIncluded.variants',
'annotations.groups.familyIncluded.individualIncluded.variants.submitted_by',
'annotations.groups.familyIncluded.individualIncluded.variants.associatedPathogenicities',
'annotations.groups.familyIncluded.individualIncluded.variants.associatedPathogenicities.submitted_by',
'annotations.groups.familyIncluded.individualIncluded.otherPMIDs',
'annotations.groups.familyIncluded.individualIncluded.otherPMIDs.submitted_by',
'annotations.groups.individualIncluded',
'annotations.groups.individualIncluded.associatedGroups',
'annotations.groups.individualIncluded.diagnosis',
'annotations.groups.individualIncluded.submitted_by',
'annotations.groups.individualIncluded.variants',
'annotations.groups.individualIncluded.variants.submitted_by',
'annotations.groups.individualIncluded.variants.associatedPathogenicities',
'annotations.groups.individualIncluded.variants.associatedPathogenicities.submitted_by',
'annotations.groups.individualIncluded.otherPMIDs',
'annotations.groups.individualIncluded.otherPMIDs.submitted_by',
#'annotations.groups.control',
'annotations.families',
'annotations.families.associatedGroups',
'annotations.families.commonDiagnosis',
'annotations.families.submitted_by',
'annotations.families.otherPMIDs',
'annotations.families.otherPMIDs.submitted_by',
'annotations.families.segregation.variants',
'annotations.families.segregation.variants.submitted_by',
'annotations.families.segregation.variants.associatedPathogenicities',
'annotations.families.segregation.variants.associatedPathogenicities.submitted_by',
'annotations.families.segregation.assessments',
'annotations.families.segregation.assessments.submitted_by',
'annotations.families.individualIncluded',
'annotations.families.individualIncluded.associatedGroups',
'annotations.families.individualIncluded.associatedFamilies',
'annotations.families.individualIncluded.associatedFamilies.associatedGroups',
'annotations.families.individualIncluded.diagnosis',
'annotations.families.individualIncluded.submitted_by',
'annotations.families.individualIncluded.variants',
'annotations.families.individualIncluded.variants.submitted_by',
'annotations.families.individualIncluded.variants.associatedPathogenicities',
'annotations.families.individualIncluded.variants.associatedPathogenicities.submitted_by',
'annotations.families.individualIncluded.otherPMIDs',
'annotations.families.individualIncluded.otherPMIDs.submitted_by',
'annotations.individuals',
'annotations.individuals.associatedGroups',
'annotations.individuals.associatedFamilies',
'annotations.individuals.associatedFamilies.associatedGroups',
'annotations.individuals.diagnosis',
'annotations.individuals.submitted_by',
'annotations.individuals.variants',
'annotations.individuals.variants.submitted_by',
'annotations.individuals.variants.associatedPathogenicities',
'annotations.individuals.variants.associatedPathogenicities.submitted_by',
'annotations.individuals.otherPMIDs',
'annotations.individuals.otherPMIDs.submitted_by',
'annotations.experimentalData',
'annotations.experimentalData.submitted_by',
'annotations.experimentalData.variants',
'annotations.experimentalData.variants.submitted_by',
'annotations.experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.genes',
'annotations.experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.assessments',
'annotations.experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.assessments.submitted_by',
'annotations.experimentalData.biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments',
'annotations.experimentalData.biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments.submitted_by',
'annotations.experimentalData.proteinIneractions.assessments',
'annotations.experimentalData.proteinIneractions.assessments.submitted_by',
'annotations.experimentalData.expression.normalExpression.assessments',
'annotations.experimentalData.expression.normalExpression.assessments.submitted_by',
'annotations.experimentalData.expression.alteredExpression.assessments',
'annotations.experimentalData.expression.alteredExpression.assessments.submitted_by',
'annotations.experimentalData.functionalAleration.assessments',
'annotations.experimentalData.functionalAleration.assessments.submitted_by',
'annotations.experimentalData.modelSystems.assessments',
'annotations.experimentalData.modelSystems.assessments.submitted_by',
'annotations.experimentalData.rescue.assessments',
'annotations.experimentalData.rescue.assessments.submitted_by'
]
@calculated_property(schema={
"title": "Status",
"type": "string",
})
def status(self, finalClassification, draftClassification, provisionalClassifications, annotations):
if finalClassification != '':
return 'Final Classification'
elif draftClassification != '':
return 'Draft Classification'
elif len(provisionalClassifications) > 0:
return 'Summary/Provisional Classifications'
elif len(annotations) > 0:
return 'In Progress'
else:
return 'Created'
@calculated_property(schema={
"title": "Number of Articles",
"type": "string",
})
def number_article(seft, annotations):
if len(annotations) > 0:
return str(len(annotations))
return ""
@calculated_property(schema={
"title": "Number of Pathogenicity",
"type": "string",
})
def number_pathogenicity(seft, variantPathogenicity):
if len(variantPathogenicity) > 0:
return str(len(variantPathogenicity))
return ""
@calculated_property(schema={
"title": "Number of Provisional",
"type": "string",
})
def number_provisional(seft, provisionalClassifications):
if len(provisionalClassifications) > 0:
return str(len(provisionalClassifications))
return ""
@calculated_property(schema={
"title": "GDM",
"type": "string",
})
def gdm_title(seft, gene, disease, modeCode):
gene_symbol = gene.replace('/genes/', '').replace('/', '')
orpha_id = disease.replace('/diseases/', '').replace('/', '')
return gene_symbol + '-' + orpha_id + '-' + modeCode
@collection(
name='evidence',
unique_key='annotation:uuid',
properties={
'title': 'Evidence',
'description': 'List of evidence for all G:D:M pairs',
})
class Annotation(Item):
item_type = 'annotation'
schema = load_schema('clincoded:schemas/annotation.json')
name_key = 'uuid'
embedded = [
'article',
'article.submitted_by',
'submitted_by',
'groups',
'groups.commonDiagnosis',
'groups.submitted_by',
'groups.otherGenes',
'groups.otherPMIDs',
'groups.otherPMIDs.submitted_by',
'groups.familyIncluded.commonDiagnosis',
'groups.familyIncluded.submitted_by',
'groups.familyIncluded.otherPMIDs',
'groups.familyIncluded.otherPMIDs.submitted_by',
'groups.familyIncluded.segregation.variants',
'groups.familyIncluded.segregation.variants.submitted_by',
'groups.familyIncluded.segregation.assessments',
'groups.familyIncluded.segregation.assessments.submitted_by',
'groups.familyIncluded.individualIncluded',
'groups.familyIncluded.individualIncluded.diagnosis',
'groups.familyIncluded.individualIncluded.submitted_by',
'groups.familyIncluded.individualIncluded.variants',
'groups.familyIncluded.individualIncluded.variants.submitted_by',
'groups.familyIncluded.individualIncluded.otherPMIDs',
'groups.familyIncluded.individualIncluded.otherPMIDs.submitted_by',
'groups.individualIncluded',
'groups.individualIncluded.diagnosis',
'groups.individualIncluded.submitted_by',
'groups.individualIncluded.variants',
'groups.individualIncluded.variants.submitted_by',
'groups.individualIncluded.otherPMIDs',
'groups.individualIncluded.otherPMIDs.submitted_by',
#'groups.control',
'families',
'families.associatedGroups',
'families.commonDiagnosis',
'families.submitted_by',
'families.otherPMIDs',
'families.otherPMIDs.submitted_by',
'families.segregation.variants',
'families.segregation.variants.submitted_by',
'families.segregation.assessments',
'families.segregation.assessments.submitted_by',
'families.individualIncluded',
'families.individualIncluded.diagnosis',
'families.individualIncluded.submitted_by',
'families.individualIncluded.variants',
'families.individualIncluded.variants.submitted_by',
'families.individualIncluded.otherPMIDs',
'families.individualIncluded.otherPMIDs.submitted_by',
'individuals',
'individuals.associatedGroups',
'individuals.associatedFamilies',
'individuals.diagnosis',
'individuals.submitted_by',
'individuals.variants',
'individuals.variants.submitted_by',
'individuals.otherPMIDs',
'individuals.otherPMIDs.submitted_by',
'experimentalData',
'experimentalData.submitted_by',
'experimentalData.variants',
'experimentalData.variants.submitted_by',
'experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.genes',
'experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.assessments',
'experimentalData.biochemicalFunction.geneWithSameFunctionSameDisease.assessments.submitted_by',
'experimentalData.biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments',
'experimentalData.biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments.submitted_by',
'experimentalData.proteinIneractions.assessments',
'experimentalData.proteinIneractions.assessments.submitted_by',
'experimentalData.expression.normalExpression.assessments',
'experimentalData.expression.normalExpression.assessments.submitted_by',
'experimentalData.expression.alteredExpression.assessments',
'experimentalData.expression.alteredExpression.assessments.submitted_by',
'experimentalData.functionalAleration.assessments',
'experimentalData.functionalAleration.assessments.submitted_by',
'experimentalData.modelSystems.assessments',
'experimentalData.modelSystems.assessments.submitted_by',
'experimentalData.rescue.assessments',
'experimentalData.rescue.assessments.submitted_by'
]
@calculated_property(schema={
"title": "Number of Group",
"type": "string",
})
def number_group(selft, groups):
if len(groups) > 0:
return len(groups)
return ""
@calculated_property(schema={
"title": "Number of Family",
"type": "string",
})
def number_family(selft, families):
if len(families) > 0:
return len(families)
return ""
@calculated_property(schema={
"title": "Number of Provisioinal Individual",
"type": "string",
})
def number_individual(selft, individuals):
if len(individuals) > 0:
return len(individuals)
return ""
@calculated_property(schema={
"title": "Number of Experimental",
"type": "string",
})
def number_experimental(selft, experimentalData):
if len(experimentalData) > 0:
return len(experimentalData)
return ""
@collection(
name='groups',
unique_key='group:uuid',
properties={
'title': 'Groups',
'description': 'List of groups in all gdm pairs',
})
class Group(Item):
item_type = 'group'
schema = load_schema('clincoded:schemas/group.json')
name_key = 'uuid'
embedded = [
'commonDiagnosis',
'submitted_by',
'otherGenes',
'otherPMIDs',
'otherPMIDs.submitted_by',
#'statistic',
'familyIncluded',
'familyIncluded.commonDiagnosis',
'familyIncluded.submitted_by',
'familyIncluded.otherPMIDs',
'familyIncluded.otherPMIDs.submitted_by',
'familyIncluded.segregation.variants',
'familyIncluded.segregation.variants.submitted_by',
'familyIncluded.segregation.assessments',
'familyIncluded.segregation.assessments.submitted_by',
'familyIncluded.individualIncluded',
'familyIncluded.individualIncluded.diagnosis',
'familyIncluded.individualIncluded.submitted_by',
'familyIncluded.individualIncluded.variants',
'familyIncluded.individualIncluded.variants.submitted_by',
'familyIncluded.individualIncluded.otherPMIDs',
'familyIncluded.individualIncluded.otherPMIDs.submitted_by',
'individualIncluded',
'individualIncluded.diagnosis',
'individualIncluded.submitted_by',
'individualIncluded.otherPMIDs',
'individualIncluded.otherPMIDs.submitted_by',
'individualIncluded.variants',
'individualIncluded.variants.submitted_by',
'associatedAnnotations',
'associatedAnnotations.article'
#'control'
]
rev = {
'associatedAnnotations': ('annotation', 'groups')
}
@calculated_property(schema={
"title": "Associated annotations",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "annotation.groups",
},
})
def associatedAnnotations(self, request, associatedAnnotations):
return paths_filtered_by_status(request, associatedAnnotations)
@collection(
name='families',
unique_key='family:uuid',
properties={
'title': 'Families',
'description': 'List of families in all gdm pairs',
})
class Family(Item):
item_type = 'family'
schema = load_schema('clincoded:schemas/family.json')
name_key = 'uuid'
embedded = [
'commonDiagnosis',
'submitted_by',
'segregation.variants',
'segregation.variants.submitted_by',
'segregation.assessments',
'segregation.assessments.submitted_by',
'otherPMIDs',
'otherPMIDs.submitted_by',
'individualIncluded',
'individualIncluded.diagnosis',
'individualIncluded.associatedFamilies',
'individualIncluded.associatedGroups',
'individualIncluded.otherPMIDs',
'individualIncluded.submitted_by',
'individualIncluded.variants',
'individualIncluded.variants.submitted_by',
'associatedGroups',
'associatedGroups.commonDiagnosis',
'associatedGroups.associatedAnnotations',
'associatedGroups.associatedAnnotations.article',
'associatedAnnotations',
'associatedAnnotations.article',
]
rev = {
'associatedGroups': ('group', 'familyIncluded'),
'associatedAnnotations': ('annotation', 'families'),
}
@calculated_property(schema={
"title": "Associated groups",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "group.familyIncluded",
},
})
def associatedGroups(self, request, associatedGroups):
return paths_filtered_by_status(request, associatedGroups)
@calculated_property(schema={
"title": "Associated annotations",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "annotation.families",
},
})
def associatedAnnotations(self, request, associatedAnnotations):
return paths_filtered_by_status(request, associatedAnnotations)
@collection(
name='individuals',
unique_key='individual:uuid',
properties={
'title': 'Individuals',
'description': 'List of individuals in gdm pair',
})
class Individual(Item):
item_type = 'individual'
schema = load_schema('clincoded:schemas/individual.json')
name_key = 'uuid'
embedded = [
'diagnosis',
'submitted_by',
'variants',
'variants.submitted_by',
'otherPMIDs',
'otherPMIDs.submitted_by',
'associatedGroups',
'associatedGroups.commonDiagnosis',
'associatedGroups.associatedAnnotations',
'associatedGroups.associatedAnnotations.article',
'associatedFamilies',
'associatedFamilies.associatedGroups',
'associatedFamilies.associatedGroups.associatedAnnotations',
'associatedFamilies.associatedGroups.associatedAnnotations.article',
'associatedFamilies.associatedAnnotations',
'associatedFamilies.associatedAnnotations.article',
'associatedFamilies.commonDiagnosis',
'associatedAnnotations',
'associatedAnnotations.article'
]
rev = {
'associatedGroups': ('group', 'individualIncluded'),
'associatedFamilies': ('family', 'individualIncluded'),
'associatedAnnotations': ('annotation', 'individuals')
}
@calculated_property(schema={
"title": "Associated groups",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "group.individualIncluded",
},
})
def associatedGroups(self, request, associatedGroups):
return paths_filtered_by_status(request, associatedGroups)
@calculated_property(schema={
"title": "Associated families",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "family.individualIncluded",
},
})
def associatedFamilies(self, request, associatedFamilies):
return paths_filtered_by_status(request, associatedFamilies)
@calculated_property(schema={
"title": "Associated annotations",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "annotation.individuals",
},
})
def associatedAnnotations(self, request, associatedAnnotations):
return paths_filtered_by_status(request, associatedAnnotations)
@collection(
name='experimental',
unique_key='experimental:uuid',
properties={
'title': 'Experimental Studies',
'description': 'List of all experimental studies',
})
class Experimental(Item):
item_type = 'experimental'
schema = load_schema('clincoded:schemas/experimental.json')
name_key = 'uuid'
embedded = [
'submitted_by',
'variants',
'variants.submitted_by',
'biochemicalFunction.geneWithSameFunctionSameDisease.genes',
'biochemicalFunction.geneWithSameFunctionSameDisease.assessments',
'biochemicalFunction.geneWithSameFunctionSameDisease.assessments.submitted_by',
'biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments',
'biochemicalFunction.geneFunctionConsistentWithPhenotype.assessments.submitted_by',
'proteinIneractions.assessments',
'proteinIneractions.assessments.submitted_by',
'expression.normalExpression.assessments',
'expression.normalExpression.assessments.submitted_by',
'expression.alteredExpression.assessments',
'expression.alteredExpression.assessments.submitted_by',
'functionalAleration.assessments',
'functionalAleration.assessments.submitted_by',
'modelSystems.assessments',
'modelSystems.assessments.submitted_by',
'rescue.assessments',
'rescue.assessments.submitted_by'
]
@collection(
name='pathogenicity',
unique_key='pathogenicity:uuid',
properties={
'title': 'Pathogenicity',
'description': 'List of variant pathogenicity',
})
class Pathogenicity(Item):
item_type = 'pathogenicity'
schema = load_schema('clincoded:schemas/pathogenicity.json')
name_key = 'uuid'
embedded = [
'submitted_by',
'variant',
'variant.associatedPathogenicities',
'variant.associatedPathogenicities.assessments',
'variant.associatedPathogenicities.assessments.submitted_by',
'variant.associatedPathogenicities.submitted_by',
'variant.associatedPathogenicities.variant',
'assessments',
'assessments.submitted_by',
'associatedGdm',
]
rev = {
'associatedGdm': ('gdm', 'variantPathogenicity'),
}
@calculated_property(schema={
"title": "Associated GDM",
"type": "object",
"linkFrom": "gdm.variantPathogenicity"
})
def associatedGdm(self, request, associatedGdm):
return paths_filtered_by_status(request, associatedGdm)
@calculated_property(schema={
"title": "Number of Assessment",
"type": "integer"
})
def numberOfAssessment(self, assessments):
return len(assessments)
@collection(
name='assessments',
unique_key='assessment:uuid',
properties={
'title': 'Assessments',
'description': 'List of assessments',
})
class Assessment(Item):
item_type = 'assessment'
schema = load_schema('clincoded:schemas/assessment.json')
name_key = 'uuid'
embedded = [
'submitted_by',
'pathogenicity_assessed',
]
rev = {
'pathogenicity_assessed': ('pathogenicity', 'assessments'),
}
@calculated_property(schema={
"title": "Pathogenicity Assessed",
"type": ["string", "object"],
"linkFrom": "pathogenicity.assessments"
})
def pathogenicity_assessed(self, request, pathogenicity_assessed):
return paths_filtered_by_status(request, pathogenicity_assessed)
@collection(
name='provisional',
unique_key='provisionalClassification:uuid',
properties={
'title': 'Provisional Classifications',
'description': 'List of provisional classifications',
})
class Provisional(Item):
item_type = 'provisionalClassification'
schema = load_schema('clincoded:schemas/provisionalClassification.json')
name_key = 'uuid'
embedded = [
'submitted_by',
'gdm_associated',
]
rev = {
'gdm_associated': ('gdm', 'provisionalClassifications'),
}
@calculated_property(schema={
"title": "GDM Associated",
"type": ["string", "object"],
"linkFrom": "gdm.provisionalClassifications"
})
def gdm_associated(self, request, gdm_associated):
return paths_filtered_by_status(request, gdm_associated)
### end of new collections for curation data
@collection(
name='labs',
unique_key='lab:name',
properties={
'title': 'Labs',
'description': 'Listing of ENCODE DCC labs',
})
class Lab(Item):
item_type = 'lab'
schema = load_schema('clincoded:schemas/lab.json')
name_key = 'name'
embedded = ['awards']
@collection(
name='awards',
unique_key='award:name',
properties={
'title': 'Awards (Grants)',
'description': 'Listing of awards (aka grants)',
})
class Award(Item):
item_type = 'award'
schema = load_schema('clincoded:schemas/award.json')
name_key = 'name'
@collection(
name='organisms',
unique_key='organism:name',
properties={
'title': 'Organisms',
'description': 'Listing of all registered organisms',
})
class Organism(Item):
item_type = 'organism'
schema = load_schema('clincoded:schemas/organism.json')
name_key = 'name'
@collection(
name='sources',
unique_key='source:name',
properties={
'title': 'Sources',
'description': 'Listing of sources and vendors for ENCODE material',
})
class Source(Item):
item_type = 'source'
schema = load_schema('clincoded:schemas/source.json')
name_key = 'name'
@collection(
name='documents',
properties={
'title': 'Documents',
'description': 'Listing of Biosample Documents',
})
class Document(ItemWithAttachment, Item):
item_type = 'document'
schema = load_schema('clincoded:schemas/document.json')
embedded = ['lab', 'award', 'submitted_by']
| |
# Copyright (c) 2016 by Gilbert Ramirez <gramirez@a10networks.com>
"""
Show individual records, using logical syntax similar to the "find" command
"""
from instmakelib import instmake_log as LOG
import sys
import re
import os
description = "Find records using a syntax similar to 'find'"
def usage():
print "find:", description
print "\t[(] [FIELD] regex [)] [-o|-a] ..."
print
print "\tFIELD: --cmdline, --tool, --target, --cwd, --retval"
TOK_REGEX = "regular expression"
TOK_TOOL = "--tool"
TOK_TARGET = "--target"
TOK_CWD = "--cwd"
TOK_RETVAL = "--retval"
TOK_CMDLINE = "--cmdline"
FIELDNAME_TOKENS = (TOK_TOOL, TOK_TARGET, TOK_CWD, TOK_RETVAL, TOK_CMDLINE)
TOK_AND = "-a"
TOK_OR = "-o"
TOK_LPAREN = "("
TOK_RPAREN = ")"
class Token:
def __init__(self, type_, value):
self.type_ = type_
self.value = value
def Type(self):
return self.type_
def Value(self):
return self.value
def SetValue(self, value):
if self.value is not None:
raise ValueError("%s has value %s; cannot set to %s" % (
self, self.value, value))
self.value = value
def __str__(self):
return "<Token %s/%s>" % (self.type_, self.value)
class TokenStream:
"""Records tokens coming in from the CLI processor"""
def __init__(self):
self.tokens = []
def __len__(self):
return len(self.tokens)
def Append(self, token):
"""Will raise ValueError if the token type is not
valid in terms of the sequence of other tokens"""
assert isinstance(token, Token)
if len(self.tokens) == 0:
if token.Type() in (TOK_AND, TOK_OR):
raise ValueError("%s cannot be the first option" % token.Type())
else:
# If this doesn't raise an exception, then we can continue
self._CheckNewToken(token)
self.tokens.append(token)
def _CheckNewToken(self, token):
last_token = self.tokens[-1]
if last_token.Type() in FIELDNAME_TOKENS:
if token.Type() != TOK_REGEX:
raise ValueError("After %s, a regex is required, not %s" % (
last_token.Type(), token.Type()))
elif last_token.Type() in (TOK_AND, TOK_OR, TOK_LPAREN):
if token.Type() not in FIELDNAME_TOKENS + (TOK_LPAREN,):
raise ValueError("After %s, a field option is required, not %s" % (
last_token.Type(), token.Type()))
elif last_token.Type() == TOK_REGEX:
if token.Type() not in (TOK_AND, TOK_OR, TOK_RPAREN) + FIELDNAME_TOKENS:
raise ValueError("After a regex, %s is not allowed" % (
token.Type()))
elif last_token.Type() == TOK_RPAREN:
if token.Type() not in (TOK_AND, TOK_OR, TOK_RPAREN) + FIELDNAME_TOKENS:
raise ValueError("After a ), %s is not allowed" % (
token.Type()))
def CheckFinal(self):
last_token = self.tokens[-1]
if last_token.Type() not in (TOK_REGEX, TOK_RPAREN):
raise ValueError("%s cannot be the final option" % (last_token.Type(),))
def Parse(self):
"""Parse the token stream, and return a SyntaxTreeNode, which is the
root node of the overall syntax tree"""
# Combine a fieldname token with its TOK_REGEX value
new_token_stream = []
for token in self.tokens:
self._SetFieldnameValues(new_token_stream, token)
# Parse into a syntax tee
node_stack = []
for token in new_token_stream:
self._Parse(node_stack, token)
if len(node_stack) != 1:
print >> sys.stderr, "Nodes:"
for node in node_stack:
print >> sys.stderr, " ", node
raise ValueError("Expected 1 node after parsing")
return node_stack[0]
def _SetFieldnameValues(self, new_token_stream, token):
if len(new_token_stream) == 0:
new_token_stream.append(token)
elif token.Type() == TOK_REGEX:
last_token = new_token_stream[-1]
assert last_token.Type() in FIELDNAME_TOKENS
last_token.SetValue(token.Value())
else:
new_token_stream.append(token)
def _Parse(self, stack, token):
assert token.Type() != TOK_REGEX
node = Node(token)
if len(stack) == 0:
stack.append(node)
return
last_node = stack[-1]
if token.Type() in (TOK_AND, TOK_OR):
node.SetLeft(last_node)
stack[-1] = node
elif token.Type() == TOK_LPAREN:
stack.append(node)
elif token.Type() in FIELDNAME_TOKENS:
last_node = stack[-1]
if last_node.Type() in (TOK_AND, TOK_OR):
last_node.SetRight(node)
else:
stack.append(node)
elif token.Type() == TOK_RPAREN:
self._CloseParens(stack)
else:
raise ValueError("Unexpected token %s" % token)
def _CloseParens(self, stack):
assert len(stack) >= 2
last_node = stack[-1]
lparen_node = stack[-2]
assert lparen_node.Type() == TOK_LPAREN
stack[-2] = last_node
del stack[-1]
if len(stack) == 1:
return
# AND(X, None) Y -> AND(X, Y)
# OR(X, None) Y -> OR(X, Y)
prev_node = stack[-2]
if prev_node.Type() in (TOK_AND, TOK_OR):
prev_node.SetRight(last_node)
del stack[-1]
class Node:
"""One node in a syntax tree."""
def __init__(self, token):
assert isinstance(token, Token)
self.token = token
self.left = None
self.right = None
def Type(self):
return self.token.Type()
def __str__(self):
return "<Node %s>" % (self.token)
def Dump(self, indent=0, tag=""):
spaces = " " * indent + tag
print "%s%s%s" % (spaces, tag, self.token)
if self.left is not None:
self.left.Dump(indent + 1, "L:")
if self.right is not None:
self.right.Dump(indent + 1, "R:")
def SetLeft(self, node):
assert isinstance(node, Node)
if self.left is not None:
raise ValueError("%s already has left %s, can't set to %s" % (
self, self.left, node))
self.left = node
def SetRight(self, node):
assert isinstance(node, Node)
if self.right is not None:
raise ValueError("%s already has right %s, can't set to %s" % (
self, self.right, node))
self.right = node
def Apply(self, rec):
"""Apply the syntax node logic to a record.
Returns True/False, if the record matches the syntax node or not."""
if self.token.Type() == TOK_AND:
assert self.left is not None
assert self.right is not None
if not self.left.Apply(rec):
# Short-circuit
return False
return self.right.Apply(rec)
elif self.token.Type() == TOK_OR:
assert self.left is not None
assert self.right is not None
if self.left.Apply(rec):
# Short-circuit
return True
return self.right.Apply(rec)
elif self.token.Type() in FIELDNAME_TOKENS:
if self.token.Type() == TOK_TOOL:
field = rec.tool
elif self.token.Type() == TOK_TARGET:
# Can be None
field = rec.make_target or ""
elif self.token.Type() == TOK_CWD:
field = rec.cwd
elif self.token.Type() == TOK_RETVAL:
field = str(rec.retval)
elif self.token.Type() == TOK_CMDLINE:
field = rec.cmdline
else:
raise ValueError("Unexpected field name %s" % self.token.Type())
assert self.token.Value() is not None
m = self.token.Value().search(field)
return m is not None
else:
raise ValueError("Unexpected token type: %s" % self.token.Type())
def report(log_file_names, args):
# We only accept one log file
if len(log_file_names) != 1:
sys.exit("'find' report uses one log file.")
else:
log_file_name = log_file_names[0]
try:
token_stream = ParseCLI(args)
if len(token_stream) == 0:
sys.exit("No search options given")
token_stream.CheckFinal()
except ValueError as e:
sys.exit(e)
syntax_tree = token_stream.Parse()
# syntax_tree.Dump()
# Open the log file
log = LOG.LogFile(log_file_name)
# Read through the records
while 1:
try:
rec = log.read_record()
except EOFError:
log.close()
break
if syntax_tree.Apply(rec):
rec.Print()
def ParseCLI(args):
STATE_ANY = 0
STATE_REGEX = 1
state = STATE_ANY
token_stream = TokenStream()
arg_num = 0
for arg in args:
arg_num += 1
if state == STATE_ANY:
if arg in (TOK_AND, TOK_OR, TOK_LPAREN, TOK_RPAREN):
token_stream.Append(Token(arg, None))
continue
elif arg in FIELDNAME_TOKENS:
token_stream.Append(Token(arg, None))
state = STATE_REGEX
continue
else:
raise ValueError("%s unexpected as argument #%d" % (
arg, arg_num))
elif state == STATE_REGEX:
try:
regex = re.compile(arg)
except re.error as e:
raise ValueError("Incorrect regex %s: %s" % (arg, e))
token_stream.Append(Token(TOK_REGEX, regex))
state = STATE_ANY
continue
else:
assert False, "State %d unexpected" % (state,)
return token_stream
| |
import yaml
import logging
import os
from flask import _app_ctx_stack
from werkzeug.local import LocalProxy
from moxie.places.importers.loaders import OrderedDictYAMLLoader
logger = logging.getLogger(__name__)
# ID's are used to identify the doc, doens't make sense to merge
# Solr will complain if this is done as we also return the _version_ of the
# doc, this must align with the correct "ID" else a 409 (conflict) error is
# raised by Solr.
SPECIAL_KEYS = ['id', '_version_']
MANAGED_KEYS = ['name', 'name_sort', 'location']
MERGABLE_KEYS = ['identifiers', 'tags', 'type', 'type_name']
PRECEDENCE_KEY = 'meta_precedence'
# Keys we don't want to copy over to the merged doc
PROTECTED_KEYS = MANAGED_KEYS + MERGABLE_KEYS + SPECIAL_KEYS
class ACIDException(Exception):
pass
def get_types_dict():
ctx = _app_ctx_stack.top
types = getattr(ctx, 'places_types', None)
if types is None:
types = yaml.load(open(os.path.join(os.path.dirname(__file__), '..', 'poi-types.yaml')), OrderedDictYAMLLoader)
ctx.places_types = types
return types
types = LocalProxy(get_types_dict)
def prepare_document(doc, results, precedence):
"""
Prepare a document to be inserted in a datastore
@param doc: doc to be inserted
@param results: results of the search concerning this doc
@param precedence: precedence to be applied depending on importers
@return: document updated to fill
"""
# TODO this function shouldn't be called by importers but should be part of the import process at a lower level
# Add the "friendly" name of the type to the full-text search field
try:
doc['type_name'] = find_type_name(doc["type"])
except KeyError:
logger.warning("Couldn't find name for type '{0}'.".format(doc["type"]))
# Attempt to merge documents
if len(results.results) == 0:
doc[PRECEDENCE_KEY] = precedence
return doc
elif len(results.results) == 1:
return merge_docs(results.results[0], doc, precedence)
else:
raise ACIDException()
def find_type_name(type_paths, singular=True):
"""
Find the name of the type from its path
@param type_paths: a list of types a single POI might be e.g.
["/amenity/hospital", "/university/site"] (e.g. JR)
also supports being passed a single type_path e.g.
"/transport/bus-stop"
@param singular: optional parameter whether it should be a singular or plural name
@return: list of names (singular or plural) e.g. ["Hospital", "Site"]
"""
if not isinstance(type_paths, list):
type_paths = [type_paths]
type_names = []
for type_path in type_paths:
to_find = type_path.split("/")[-1]
node = find_type(types, type_path, to_find, 1)
if singular:
type_names.append(node["name_singular"])
else:
type_names.append(node["name_plural"])
return type_names
def find_type(data, path, to_find, count):
"""
Recursive function used by find_type_name to find a node in a tree
@param data: dictionary to use
@param path: path to traverse in the dictionary
@param to_find: actual node to find
@param count: current part of the path
@return: dictionary
"""
part = path.split("/")[count]
if part == to_find:
return data[part]
else:
count += 1
if 'types' in data[part]:
return find_type(data[part]["types"], path, to_find, count)
def merge_docs(current_doc, new_doc, new_precedence):
"""Given two documents, attempt to merge according to the precedence rules
So if the new_precedence is greater than our existing we overwrite the
managed keys in the updated document.
@param new_precedence Integer proportional to the reliability of new data
"""
new_doc = merge_keys(current_doc, new_doc, MERGABLE_KEYS)
current_precedence = current_doc.get(PRECEDENCE_KEY, -1)
if new_precedence > current_precedence:
current_doc[PRECEDENCE_KEY] = new_precedence
for key in MANAGED_KEYS:
if key in new_doc:
current_doc[key] = new_doc[key]
# Remove any protected keys, this includes all MERGABLE_KEYS, MANAGED_KEYS
# and SPECIAL_KEYS.
safe_dict = {key: new_doc[key] for key in set(new_doc.keys()).difference(PROTECTED_KEYS)}
# Copy all safe items over to the new (merged) doc
current_doc.update(safe_dict)
return current_doc
def merge_keys(current_doc, new_doc, keys):
for key in keys:
new_val = new_doc.get(key, [])
if not isinstance(new_val, list):
new_val = [new_val]
new_doc[key] = merge_values(
current_doc.get(key, []), new_val)
return new_doc
def merge_values(current_vals, new_vals):
current_vals.extend(new_vals)
merged = list(set(current_vals))
return merged
def format_uk_telephone(value):
"""
Formats UK telephone numbers to E.123 format (national notation)
University number ranges are also formatted according to internal guidelines
"""
# Normalise a number
value = value.replace("(0)", "").replace(" ", "").replace("-", "")
if value.startswith("0"):
value = "+44" + value[1:]
normalised = value
# Convert UK numbers into national format
if value.startswith("+44"):
value = "0" + value[3:]
# Now apply rules on how to split up area codes
if value[:8] in ('01332050', '01382006'):
# Direct dial only
value = value[:5] + " " + value[5:]
elif value[:7] in ('0141005', '0117101') or value[:6] in ('011800',):
# Direct dial only
value = value[:4] + " " + value[4:7] + " " + value[7:]
elif value[:7] in ('0200003',):
# Direct dial only
value = value[:3] + " " + value[3:7] + " " + value[7:]
elif value.startswith('01'):
if value[2] == '1' or value[3] == '1':
# 4 digit area codes
area_code = value[:4]
local_part = value[4:7] + " " + value[7:]
elif value[:6] in (
'013873', # Langholm
'015242', # Hornby
'015394', # Hawkshead
'015395', # Grange-over-Sands
'015396', # Sedbergh
'016973', # Wigton
'016974', # Raughton Head
'016977', # Brampton
'017683', # Appleby
'017684', # Pooley Bridge
'017687', # Keswick
'019467', # Gosforth
):
# 6 digit area codes
area_code = value[:4] + " " + value[4:6]
local_part = value[6:]
else:
# 5 digit
area_code = value[:5]
local_part = value[5:]
value = "(%s) %s" % (area_code, local_part)
elif value.startswith('02'):
# 3 digit area codes
value = "(%s) %s %s" % (value[:3], value[3:7], value[7:])
elif value.startswith('0500') or value.startswith('0800'):
# direct dial - 4 digit prefix, short following
value = "%s %s" % (value[:4], value[4:])
elif value.startswith('03') or value.startswith('08') or value.startswith('09'):
# direct dial - 4 digit prefix
value = "%s %s %s" % (value[:4], value[4:7], value[7:])
elif value.startswith('05') or value.startswith('070'):
# direct dial - 3 digit prefix
value = "%s %s %s" % (value[:3], value[3:7], value[7:])
elif value.startswith('07'):
# direct dial - 5 digit prefix, short following
value = "%s %s" % (value[:5], value[5:])
# Now apply University rules:
if value[:10] in ('(01865) 27', '(01865) 28', '(01865) 43', '(01865) 61'):
# Oxford - list of internal number prefixes here:
# http://www.oucs.ox.ac.uk/telecom/directories/intdiraccess.xml
value = "(01865 " + value[8] + ")" + value[9:]
return value
| |
import unittest, os, os.path, sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))
import backend
backend.load()
class GCCBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'gcc',
'files': {
'main.c': {'data': """
#include <stdio.h>
int main(void) {
printf("Hello World");
return 0;
}
"""}}})
self.assertEquals(res['run']['stdout'], 'Hello World')
class CPPBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'cpp',
'files': {
'main.cpp': {'data': """
#include <iostream>
using namespace std;
int main() {
cout << "Hello World" << endl;
return 0;
}
"""}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class Python2BackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'python2',
'files': {
'main.py': {'data': "print 'Hello World'"}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class Python3BackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'python3',
'files': {
'main.py': {'data': "print('Hello World')"}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class HaskellBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'haskell',
'files': {
'main.hs': {'data': 'main = print "Hello World"\n'}}})
self.assertEquals(res['run']['stdout'], '"Hello World"\n')
class RubyBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'ruby',
'files': {
'main.rb': {'data': "puts 'Hello World'"}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class ScalaBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'scala',
'files': {
'main.sc': {'data': 'println("Hello World")'}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class TclBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'tcl',
'files': {
'main.tcl': {'data': 'puts "Hello World"'}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class JavaBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'java',
'files': {
'Main.java': {'data': """
class Main {
public static void main(String[] args) {
System.out.println("Hello World");
}
}
"""}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class PerlBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'perl',
'files': {
'main.pl': {'data': 'print "Hello World"'}}})
self.assertEquals(res['run']['stdout'], 'Hello World')
class BashBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'bash',
'files': {
'main.sh': {'data': 'echo "Hello World"'}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class PHPBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'php',
'files': {
'main.php': {'data': '<?php echo "Hello World";'}}})
self.assertEquals(res['run']['stdout'], 'Hello World')
class ClojureBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'clojure',
'files': {
'main.clj': {'data': '(println "Hello World")'}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class CommonLispBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'commonlisp',
'files': {
'main.lisp': {'data': '(format t "Hello World")'}}})
self.assertEquals(res['run']['stdout'], 'Hello World')
class JavaScriptBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'javascript',
'files': {
'main.js': {'data': 'console.log("Hello World")'}}})
self.assertEquals(res['run']['stdout'], 'Hello World\n')
class GoBackendTest(unittest.TestCase):
def setUp(self):
self.runner = backend.Runner()
def test_compile(self):
res = self.runner.delegate({
'backend': 'go',
'files': {
'main.go': {'data': """
package main
import "fmt"; func main() { fmt.Printf("Hello World") }
"""}}})
self.assertEquals(res['run']['stdout'], 'Hello World')
if __name__ == '__main__':
unittest.main()
| |
'''Skype client user interface control.
'''
from enums import *
from errors import ISkypeError
from utils import *
import weakref
class IClient(object):
'''Represents a Skype client. Access using L{ISkype.Client<skype.ISkype.Client>}.
'''
def __init__(self, Skype):
'''__init__.
@param Skype: Skype
@type Skype: L{ISkype}
'''
self._SkypeRef = weakref.ref(Skype)
def ButtonPressed(self, Key):
'''Sends button button pressed to client.
@param Key: Key
@type Key: unicode
'''
self._Skype._DoCommand('BTN_PRESSED %s' % Key)
def ButtonReleased(self, Key):
'''Sends button released event to client.
@param Key: Key
@type Key: unicode
'''
self._Skype._DoCommand('BTN_RELEASED %s' % Key)
def CreateEvent(self, EventId, Caption, Hint):
'''Creates a custom event displayed in Skype client's events pane.
@param EventId: Unique identifier for the event.
@type EventId: unicode
@param Caption: Caption text.
@type Caption: unicode
@param Hint: Hint text. Shown when mouse hoovers over the event.
@type Hint: unicode
@return: Event object.
@rtype: L{IPluginEvent}
'''
self._Skype._DoCommand('CREATE EVENT %s CAPTION %s HINT %s' % (EventId, quote(Caption), quote(Hint)))
return IPluginEvent(EventId, self._Skype)
def CreateMenuItem(self, MenuItemId, PluginContext, CaptionText, HintText=u'', IconPath='', Enabled=True,
ContactType=pluginContactTypeAll, MultipleContacts=False):
'''Creates custom menu item in Skype client's "Do More" menus.
@param MenuItemId: Unique identifier for the menu item.
@type MenuItemId: unicode
@param PluginContext: Menu item context. Allows to choose in which client windows will
the menu item appear.
@type PluginContext: L{Plug-in context<enums.pluginContextUnknown>}
@param CaptionText: Caption text.
@type CaptionText: unicode
@param HintText: Hint text (optional). Shown when mouse hoovers over the menu item.
@type HintText: unicode
@param IconPath: Path to the icon (optional).
@type IconPath: unicode
@param Enabled: Initial state of the menu item. True by default.
@type Enabled: bool
@param ContactType: In case of L{pluginContextContact<enums.pluginContextContact>} tells which contacts
the menu item should appear for. Defaults to L{pluginContactTypeAll<enums.pluginContactTypeAll>}.
@type ContactType: L{Plug-in contact type<enums.pluginContactTypeUnknown>}
@param MultipleContacts: Set to True if multiple contacts should be allowed (defaults to False).
@type MultipleContacts: bool
@return: Menu item object.
@rtype: L{IPluginMenuItem}
'''
com = 'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s' % (MenuItemId, PluginContext, quote(CaptionText), cndexp(Enabled, 'true', 'false'))
if HintText:
com += ' HINT %s' % quote(HintText)
if IconPath:
com += ' ICON %s' % quote(IconPath)
if MultipleContacts:
com += ' ENABLE_MULTIPLE_CONTACTS true'
if PluginContext == pluginContextContact:
com += ' CONTACT_TYPE_FILTER %s' % ContactType
self._Skype._DoCommand(com)
return IPluginMenuItem(MenuItemId, self._Skype, CaptionText, HintText, Enabled)
def Focus(self):
'''Brings the client window into focus.
'''
self._Skype._DoCommand('FOCUS')
def Minimize(self):
'''Hides Skype application window.
'''
self._Skype._DoCommand('MINIMIZE')
def OpenAddContactDialog(self, Username=''):
'''Opens "Add a Contact" dialog.
@param Username: Optional Skypename of the contact.
@type Username: unicode
'''
self.OpenDialog('ADDAFRIEND', Username)
def OpenAuthorizationDialog(self, Username):
'''Opens authorization dialog.
@param Username: Skypename of the user to authenticate.
@type Username: unicode
'''
self.OpenDialog('AUTHORIZATION', Username)
def OpenBlockedUsersDialog(self):
'''Opens blocked users dialog.
'''
self.OpenDialog('BLOCKEDUSERS')
def OpenCallHistoryTab(self):
'''Opens call history tab.
'''
self.OpenDialog('CALLHISTORY')
def OpenConferenceDialog(self):
'''Opens create conference dialog.
'''
self.OpenDialog('CONFERENCE')
def OpenContactsTab(self):
'''Opens contacts tab.
'''
self.OpenDialog('CONTACTS')
def OpenDialog(self, Name, *Params):
'''Open dialog. Use this method to open dialogs added in newer Skype versions if there is no
dedicated method in Skype4Py.
@param Name: Dialog name.
@type Name: unicode
@param Params: One or more optional parameters.
@type Params: unicode
'''
self._Skype._DoCommand('OPEN %s %s' % (Name, ' '.join(Params)))
def OpenDialpadTab(self):
'''Opens dial pad tab.
'''
self.OpenDialog('DIALPAD')
def OpenFileTransferDialog(self, Username, Folder):
'''Opens file transfer dialog.
@param Username: Skypename of the user.
@type Username: unicode
@param Folder: Path to initial directory.
@type Folder: unicode
'''
self.OpenDialog('FILETRANSFER', Username, 'IN %s' % Folder)
def OpenGettingStartedWizard(self):
'''Opens getting started wizard.
'''
self.OpenDialog('GETTINGSTARTED')
def OpenImportContactsWizard(self):
'''Opens import contacts wizard.
'''
self.OpenDialog('IMPORTCONTACTS')
def OpenLiveTab(self):
'''OpenLiveTab.
'''
self.OpenDialog('LIVETAB')
def OpenMessageDialog(self, Username, Text=''):
'''Opens "Send an IM Message" dialog.
@param Username: Message target.
@type Username: unicode
@param Text: Message text.
@type Text: unicode
'''
self.OpenDialog('IM', Username, Text)
def OpenOptionsDialog(self, Page=''):
'''Opens options dialog.
@param Page: Page name to open.
@type Page: unicode
'''
self.OpenDialog('OPTIONS', Page)
def OpenProfileDialog(self):
'''Opens current user profile dialog.
'''
self.OpenDialog('PROFILE')
def OpenSearchDialog(self):
'''Opens search dialog.
'''
self.OpenDialog('SEARCH')
def OpenSendContactsDialog(self, Username=''):
'''Opens send contacts dialog.
@param Username: Optional Skypename of the user.
@type Username: unicode
'''
self.OpenDialog('SENDCONTACTS', Username)
def OpenSmsDialog(self, SmsId):
'''Opens SMS window
@param SmsId: SMS message Id.
@type SmsId: int
'''
self.OpenDialog('SMS', SmsId)
def OpenUserInfoDialog(self, Username):
'''Opens user information dialog.
@param Username: Skypename of the user.
@type Username: unicode
'''
self.OpenDialog('USERINFO', Username)
def OpenVideoTestDialog(self):
'''Opens video test dialog.
'''
self.OpenDialog('VIDEOTEST')
def Shutdown(self):
'''Closes Skype application.
'''
self._Skype._API.Shutdown()
def Start(self, Minimized=False, Nosplash=False):
'''Starts Skype application.
@param Minimized: If True, Skype is started minized in system tray.
@type Minimized: bool
@param Nosplash: If True, no splash screen is displayed upon startup.
@type Nosplash: bool
'''
self._Skype._API.Start(Minimized, Nosplash)
def _Get_Skype(self):
skype = self._SkypeRef()
if skype:
return skype
raise ISkypeError('Skype4Py internal error')
_Skype = property(_Get_Skype)
def _GetIsRunning(self):
return self._Skype._API.IsRunning()
IsRunning = property(_GetIsRunning,
doc='''Tells if Skype client is running.
@type: bool
''')
def _GetWallpaper(self):
return self._Skype.Variable('WALLPAPER')
def _SetWallpaper(self, value):
self._Skype.Variable('WALLPAPER', value)
Wallpaper = property(_GetWallpaper, _SetWallpaper,
doc='''Path to client wallpaper bitmap.
@type: unicode
''')
def _GetWindowState(self):
return self._Skype.Variable('WINDOWSTATE')
def _SetWindowState(self, value):
self._Skype.Variable('WINDOWSTATE', value)
WindowState = property(_GetWindowState, _SetWindowState,
doc='''Client window state.
@type: L{Window state<enums.wndUnknown>}
''')
class IPluginEvent(Cached):
'''Represents an event displayed in Skype client's events pane.
'''
def __repr__(self):
return '<%s with Id=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Id))
def _Init(self, Id, Skype):
self._Skype = Skype
self._Id = unicode(Id)
def Delete(self):
'''Deletes the event from the events pane in the Skype client.
'''
self._Skype._DoCommand('DELETE EVENT %s' % self._Id)
def _GetId(self):
return self._Id
Id = property(_GetId,
doc='''Unique event Id.
@type: unicode
''')
class IPluginMenuItem(Cached):
'''Represents a menu item displayed in Skype client's "Do More" menus.
'''
def __repr__(self):
return '<%s with Id=%s>' % (Cached.__repr__(self)[1:-1], repr(self.Id))
def _Init(self, Id, Skype, Caption=None, Hint=None, Enabled=None):
self._Skype = Skype
self._Id = unicode(Id)
self._CacheDict = {}
if Caption is not None:
self._CacheDict['CAPTION'] = unicode(Caption)
if Hint is not None:
self._CacheDict['HINT'] = unicode(Hint)
if Enabled is not None:
self._CacheDict['ENABLED'] = cndexp(Enabled, u'TRUE', u'FALSE')
def _Property(self, PropName, Set=None):
if Set is None:
return self._CacheDict[PropName]
self._Skype._Property('MENU_ITEM', self._Id, PropName, Set)
self._CacheDict[PropName] = unicode(Set)
def Delete(self):
'''Removes the menu item from the "Do More" menus.
'''
self._Skype._DoCommand('DELETE MENU_ITEM %s' % self._Id)
def _GetCaption(self):
return self._Property('CAPTION')
def _SetCaption(self, value):
self._Property('CAPTION', value)
Caption = property(_GetCaption, _SetCaption,
doc='''Menu item caption text.
@type: unicode
''')
def _GetEnabled(self):
return self._Property('ENABLED') == 'TRUE'
def _SetEnabled(self, value):
self._Property('ENABLED', cndexp(value, 'TRUE', 'FALSE'))
Enabled = property(_GetEnabled, _SetEnabled,
doc='''Defines whether the menu item is enabled when a user launches Skype. If no value is defined,
the menu item will be enabled.
@type: bool
''')
def _GetHint(self):
return self._Property('HINT')
def _SetHint(self, value):
self._Property('HINT', value)
Hint = property(_GetHint, _SetHint,
doc='''Menu item hint text.
@type: unicode
''')
def _GetId(self):
return self._Id
Id = property(_GetId,
doc='''Unique menu item Id.
@type: unicode
''')
| |
"""Tests for classes defining properties of ground domains, e.g. ZZ, QQ, ZZ[x] ... """
from sympy import S, sqrt, sin, oo, Poly, Float
from sympy.abc import x, y, z
from sympy.polys.domains import ZZ, QQ, RR, CC, FF, GF, EX
from sympy.polys.domains.realfield import RealField
from sympy.polys.rings import ring
from sympy.polys.fields import field
from sympy.polys.polyerrors import (
UnificationFailed,
GeneratorsNeeded,
GeneratorsError,
CoercionFailed,
NotInvertible,
DomainError)
from sympy.utilities.pytest import raises
ALG = QQ.algebraic_field(sqrt(2), sqrt(3))
def unify(K0, K1):
return K0.unify(K1)
def test_Domain_unify():
F3 = GF(3)
assert unify(F3, F3) == F3
assert unify(F3, ZZ) == ZZ
assert unify(F3, QQ) == QQ
assert unify(F3, ALG) == ALG
assert unify(F3, RR) == RR
assert unify(F3, CC) == CC
assert unify(F3, ZZ[x]) == ZZ[x]
assert unify(F3, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(F3, EX) == EX
assert unify(ZZ, F3) == ZZ
assert unify(ZZ, ZZ) == ZZ
assert unify(ZZ, QQ) == QQ
assert unify(ZZ, ALG) == ALG
assert unify(ZZ, RR) == RR
assert unify(ZZ, CC) == CC
assert unify(ZZ, ZZ[x]) == ZZ[x]
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ, EX) == EX
assert unify(QQ, F3) == QQ
assert unify(QQ, ZZ) == QQ
assert unify(QQ, QQ) == QQ
assert unify(QQ, ALG) == ALG
assert unify(QQ, RR) == RR
assert unify(QQ, CC) == CC
assert unify(QQ, ZZ[x]) == QQ[x]
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, EX) == EX
assert unify(RR, F3) == RR
assert unify(RR, ZZ) == RR
assert unify(RR, QQ) == RR
assert unify(RR, ALG) == RR
assert unify(RR, RR) == RR
assert unify(RR, CC) == CC
assert unify(RR, ZZ[x]) == RR[x]
assert unify(RR, ZZ.frac_field(x)) == RR.frac_field(x)
assert unify(RR, EX) == EX
assert RR[x].unify(ZZ.frac_field(y)) == RR.frac_field(x, y)
assert unify(CC, F3) == CC
assert unify(CC, ZZ) == CC
assert unify(CC, QQ) == CC
assert unify(CC, ALG) == CC
assert unify(CC, RR) == CC
assert unify(CC, CC) == CC
assert unify(CC, ZZ[x]) == CC[x]
assert unify(CC, ZZ.frac_field(x)) == CC.frac_field(x)
assert unify(CC, EX) == EX
assert unify(ZZ[x], F3) == ZZ[x]
assert unify(ZZ[x], ZZ) == ZZ[x]
assert unify(ZZ[x], QQ) == QQ[x]
assert unify(ZZ[x], ALG) == ALG[x]
assert unify(ZZ[x], RR) == RR[x]
assert unify(ZZ[x], CC) == CC[x]
assert unify(ZZ[x], ZZ[x]) == ZZ[x]
assert unify(ZZ[x], ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ[x], EX) == EX
assert unify(ZZ.frac_field(x), F3) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x), ALG) == ALG.frac_field(x)
assert unify(ZZ.frac_field(x), RR) == RR.frac_field(x)
assert unify(ZZ.frac_field(x), CC) == CC.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ[x]) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), EX) == EX
assert unify(EX, F3) == EX
assert unify(EX, ZZ) == EX
assert unify(EX, QQ) == EX
assert unify(EX, ALG) == EX
assert unify(EX, RR) == EX
assert unify(EX, CC) == EX
assert unify(EX, ZZ[x]) == EX
assert unify(EX, ZZ.frac_field(x)) == EX
assert unify(EX, EX) == EX
def test_Domain_unify_composite():
assert unify(ZZ.poly_ring(x), ZZ) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(ZZ, ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(QQ, ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(ZZ, ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(QQ, ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(ZZ, ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ, ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x, z)) == ZZ.poly_ring(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.poly_ring(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.poly_ring(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x, z)) == QQ.frac_field(x, y, z)
def test_Domain_unify_algebraic():
sqrt5 = QQ.algebraic_field(sqrt(5))
sqrt7 = QQ.algebraic_field(sqrt(7))
sqrt57 = QQ.algebraic_field(sqrt(5), sqrt(7))
assert sqrt5.unify(sqrt7) == sqrt57
assert sqrt5.unify(sqrt5[x, y]) == sqrt5[x, y]
assert sqrt5[x, y].unify(sqrt5) == sqrt5[x, y]
assert sqrt5.unify(sqrt5.frac_field(x, y)) == sqrt5.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt5) == sqrt5.frac_field(x, y)
assert sqrt5.unify(sqrt7[x, y]) == sqrt57[x, y]
assert sqrt5[x, y].unify(sqrt7) == sqrt57[x, y]
assert sqrt5.unify(sqrt7.frac_field(x, y)) == sqrt57.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt7) == sqrt57.frac_field(x, y)
def test_Domain_unify_with_symbols():
raises(UnificationFailed, lambda: ZZ[x, y].unify_with_symbols(ZZ, (y, z)))
raises(UnificationFailed, lambda: ZZ.unify_with_symbols(ZZ[x, y], (y, z)))
def test_Domain__contains__():
assert (0 in EX) is True
assert (0 in ZZ) is True
assert (0 in QQ) is True
assert (0 in RR) is True
assert (0 in CC) is True
assert (0 in ALG) is True
assert (0 in ZZ[x, y]) is True
assert (0 in QQ[x, y]) is True
assert (0 in RR[x, y]) is True
assert (-7 in EX) is True
assert (-7 in ZZ) is True
assert (-7 in QQ) is True
assert (-7 in RR) is True
assert (-7 in CC) is True
assert (-7 in ALG) is True
assert (-7 in ZZ[x, y]) is True
assert (-7 in QQ[x, y]) is True
assert (-7 in RR[x, y]) is True
assert (17 in EX) is True
assert (17 in ZZ) is True
assert (17 in QQ) is True
assert (17 in RR) is True
assert (17 in CC) is True
assert (17 in ALG) is True
assert (17 in ZZ[x, y]) is True
assert (17 in QQ[x, y]) is True
assert (17 in RR[x, y]) is True
assert (-S(1)/7 in EX) is True
assert (-S(1)/7 in ZZ) is False
assert (-S(1)/7 in QQ) is True
assert (-S(1)/7 in RR) is True
assert (-S(1)/7 in CC) is True
assert (-S(1)/7 in ALG) is True
assert (-S(1)/7 in ZZ[x, y]) is False
assert (-S(1)/7 in QQ[x, y]) is True
assert (-S(1)/7 in RR[x, y]) is True
assert (S(3)/5 in EX) is True
assert (S(3)/5 in ZZ) is False
assert (S(3)/5 in QQ) is True
assert (S(3)/5 in RR) is True
assert (S(3)/5 in CC) is True
assert (S(3)/5 in ALG) is True
assert (S(3)/5 in ZZ[x, y]) is False
assert (S(3)/5 in QQ[x, y]) is True
assert (S(3)/5 in RR[x, y]) is True
assert (3.0 in EX) is True
assert (3.0 in ZZ) is True
assert (3.0 in QQ) is True
assert (3.0 in RR) is True
assert (3.0 in CC) is True
assert (3.0 in ALG) is True
assert (3.0 in ZZ[x, y]) is True
assert (3.0 in QQ[x, y]) is True
assert (3.0 in RR[x, y]) is True
assert (3.14 in EX) is True
assert (3.14 in ZZ) is False
assert (3.14 in QQ) is True
assert (3.14 in RR) is True
assert (3.14 in CC) is True
assert (3.14 in ALG) is True
assert (3.14 in ZZ[x, y]) is False
assert (3.14 in QQ[x, y]) is True
assert (3.14 in RR[x, y]) is True
assert (oo in EX) is True
assert (oo in ZZ) is False
assert (oo in QQ) is False
assert (oo in RR) is True
assert (oo in CC) is True
assert (oo in ALG) is False
assert (oo in ZZ[x, y]) is False
assert (oo in QQ[x, y]) is False
assert (oo in RR[x, y]) is True
assert (-oo in EX) is True
assert (-oo in ZZ) is False
assert (-oo in QQ) is False
assert (-oo in RR) is True
assert (-oo in CC) is True
assert (-oo in ALG) is False
assert (-oo in ZZ[x, y]) is False
assert (-oo in QQ[x, y]) is False
assert (-oo in RR[x, y]) is True
assert (sqrt(7) in EX) is True
assert (sqrt(7) in ZZ) is False
assert (sqrt(7) in QQ) is False
assert (sqrt(7) in RR) is True
assert (sqrt(7) in CC) is True
assert (sqrt(7) in ALG) is False
assert (sqrt(7) in ZZ[x, y]) is False
assert (sqrt(7) in QQ[x, y]) is False
assert (sqrt(7) in RR[x, y]) is True
assert (2*sqrt(3) + 1 in EX) is True
assert (2*sqrt(3) + 1 in ZZ) is False
assert (2*sqrt(3) + 1 in QQ) is False
assert (2*sqrt(3) + 1 in RR) is True
assert (2*sqrt(3) + 1 in CC) is True
assert (2*sqrt(3) + 1 in ALG) is True
assert (2*sqrt(3) + 1 in ZZ[x, y]) is False
assert (2*sqrt(3) + 1 in QQ[x, y]) is False
assert (2*sqrt(3) + 1 in RR[x, y]) is True
assert (sin(1) in EX) is True
assert (sin(1) in ZZ) is False
assert (sin(1) in QQ) is False
assert (sin(1) in RR) is True
assert (sin(1) in CC) is True
assert (sin(1) in ALG) is False
assert (sin(1) in ZZ[x, y]) is False
assert (sin(1) in QQ[x, y]) is False
assert (sin(1) in RR[x, y]) is True
assert (x**2 + 1 in EX) is True
assert (x**2 + 1 in ZZ) is False
assert (x**2 + 1 in QQ) is False
assert (x**2 + 1 in RR) is False
assert (x**2 + 1 in CC) is False
assert (x**2 + 1 in ALG) is False
assert (x**2 + 1 in ZZ[x]) is True
assert (x**2 + 1 in QQ[x]) is True
assert (x**2 + 1 in RR[x]) is True
assert (x**2 + 1 in ZZ[x, y]) is True
assert (x**2 + 1 in QQ[x, y]) is True
assert (x**2 + 1 in RR[x, y]) is True
assert (x**2 + y**2 in EX) is True
assert (x**2 + y**2 in ZZ) is False
assert (x**2 + y**2 in QQ) is False
assert (x**2 + y**2 in RR) is False
assert (x**2 + y**2 in CC) is False
assert (x**2 + y**2 in ALG) is False
assert (x**2 + y**2 in ZZ[x]) is False
assert (x**2 + y**2 in QQ[x]) is False
assert (x**2 + y**2 in RR[x]) is False
assert (x**2 + y**2 in ZZ[x, y]) is True
assert (x**2 + y**2 in QQ[x, y]) is True
assert (x**2 + y**2 in RR[x, y]) is True
assert (S(3)/2*x/(y + 1) - z in QQ[x, y, z]) is False
def test_Domain_get_ring():
assert ZZ.has_assoc_Ring is True
assert QQ.has_assoc_Ring is True
assert ZZ[x].has_assoc_Ring is True
assert QQ[x].has_assoc_Ring is True
assert ZZ[x, y].has_assoc_Ring is True
assert QQ[x, y].has_assoc_Ring is True
assert ZZ.frac_field(x).has_assoc_Ring is True
assert QQ.frac_field(x).has_assoc_Ring is True
assert ZZ.frac_field(x, y).has_assoc_Ring is True
assert QQ.frac_field(x, y).has_assoc_Ring is True
assert EX.has_assoc_Ring is False
assert RR.has_assoc_Ring is False
assert ALG.has_assoc_Ring is False
assert ZZ.get_ring() == ZZ
assert QQ.get_ring() == ZZ
assert ZZ[x].get_ring() == ZZ[x]
assert QQ[x].get_ring() == QQ[x]
assert ZZ[x, y].get_ring() == ZZ[x, y]
assert QQ[x, y].get_ring() == QQ[x, y]
assert ZZ.frac_field(x).get_ring() == ZZ[x]
assert QQ.frac_field(x).get_ring() == QQ[x]
assert ZZ.frac_field(x, y).get_ring() == ZZ[x, y]
assert QQ.frac_field(x, y).get_ring() == QQ[x, y]
assert EX.get_ring() == EX
assert RR.get_ring() == RR
# XXX: This should also be like RR
raises(DomainError, lambda: ALG.get_ring())
def test_Domain_get_field():
assert EX.has_assoc_Field is True
assert ZZ.has_assoc_Field is True
assert QQ.has_assoc_Field is True
assert RR.has_assoc_Field is True
assert ALG.has_assoc_Field is True
assert ZZ[x].has_assoc_Field is True
assert QQ[x].has_assoc_Field is True
assert ZZ[x, y].has_assoc_Field is True
assert QQ[x, y].has_assoc_Field is True
assert EX.get_field() == EX
assert ZZ.get_field() == QQ
assert QQ.get_field() == QQ
assert RR.get_field() == RR
assert ALG.get_field() == ALG
assert ZZ[x].get_field() == ZZ.frac_field(x)
assert QQ[x].get_field() == QQ.frac_field(x)
assert ZZ[x, y].get_field() == ZZ.frac_field(x, y)
assert QQ[x, y].get_field() == QQ.frac_field(x, y)
def test_Domain_get_exact():
assert EX.get_exact() == EX
assert ZZ.get_exact() == ZZ
assert QQ.get_exact() == QQ
assert RR.get_exact() == QQ
assert ALG.get_exact() == ALG
assert ZZ[x].get_exact() == ZZ[x]
assert QQ[x].get_exact() == QQ[x]
assert ZZ[x, y].get_exact() == ZZ[x, y]
assert QQ[x, y].get_exact() == QQ[x, y]
assert ZZ.frac_field(x).get_exact() == ZZ.frac_field(x)
assert QQ.frac_field(x).get_exact() == QQ.frac_field(x)
assert ZZ.frac_field(x, y).get_exact() == ZZ.frac_field(x, y)
assert QQ.frac_field(x, y).get_exact() == QQ.frac_field(x, y)
def test_Domain_convert():
assert QQ.convert(10e-52) == QQ(1684996666696915, 1684996666696914987166688442938726917102321526408785780068975640576)
R, x = ring("x", ZZ)
assert ZZ.convert(x - x) == 0
assert ZZ.convert(x - x, R.to_domain()) == 0
def test_PolynomialRing__init():
raises(GeneratorsNeeded, lambda: ZZ.poly_ring())
def test_FractionField__init():
raises(GeneratorsNeeded, lambda: ZZ.frac_field())
def test_inject():
assert ZZ.inject(x, y, z) == ZZ[x, y, z]
assert ZZ[x].inject(y, z) == ZZ[x, y, z]
assert ZZ.frac_field(x).inject(y, z) == ZZ.frac_field(x, y, z)
raises(GeneratorsError, lambda: ZZ[x].inject(x))
def test_Domain_map():
seq = ZZ.map([1, 2, 3, 4])
assert all(ZZ.of_type(elt) for elt in seq)
seq = ZZ.map([[1, 2, 3, 4]])
assert all(ZZ.of_type(elt) for elt in seq[0]) and len(seq) == 1
def test_Domain___eq__():
assert (ZZ[x, y] == ZZ[x, y]) is True
assert (QQ[x, y] == QQ[x, y]) is True
assert (ZZ[x, y] == QQ[x, y]) is False
assert (QQ[x, y] == ZZ[x, y]) is False
assert (ZZ.frac_field(x, y) == ZZ.frac_field(x, y)) is True
assert (QQ.frac_field(x, y) == QQ.frac_field(x, y)) is True
assert (ZZ.frac_field(x, y) == QQ.frac_field(x, y)) is False
assert (QQ.frac_field(x, y) == ZZ.frac_field(x, y)) is False
def test_Domain__algebraic_field():
alg = ZZ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = QQ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = alg.algebraic_field(sqrt(3))
assert alg.ext.minpoly == Poly(x**4 - 10*x**2 + 1)
assert alg.dom == QQ
def test_PolynomialRing_from_FractionField():
F, x,y = field("x,y", ZZ)
R, X,Y = ring("x,y", ZZ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
F, x,y = field("x,y", QQ)
R, X,Y = ring("x,y", QQ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
def test_FractionField_from_PolynomialRing():
R, x,y = ring("x,y", QQ)
F, X,Y = field("x,y", ZZ)
f = 3*x**2 + 5*y**2
g = x**2/3 + y**2/5
assert F.to_domain().from_PolynomialRing(f, R.to_domain()) == 3*X**2 + 5*Y**2
assert F.to_domain().from_PolynomialRing(g, R.to_domain()) == (5*X**2 + 3*Y**2)/15
def test_FF_of_type():
assert FF(3).of_type(FF(3)(1)) is True
assert FF(5).of_type(FF(5)(3)) is True
assert FF(5).of_type(FF(7)(3)) is False
def test___eq__():
assert not QQ[x] == ZZ[x]
assert not QQ.frac_field(x) == ZZ.frac_field(x)
def test_RealField_from_sympy():
assert RR.convert(S(0)) == RR.dtype(0)
assert RR.convert(S(0.0)) == RR.dtype(0.0)
assert RR.convert(S(1)) == RR.dtype(1)
assert RR.convert(S(1.0)) == RR.dtype(1.0)
assert RR.convert(sin(1)) == RR.dtype(sin(1).evalf())
assert RR.convert(oo) == RR("+inf")
assert RR.convert(-oo) == RR("-inf")
raises(CoercionFailed, lambda: RR.convert(x))
def test_ModularInteger():
F3 = FF(3)
a = F3(0)
assert isinstance(a, F3.dtype) and a == 0
a = F3(1)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)
assert isinstance(a, F3.dtype) and a == 2
a = F3(3)
assert isinstance(a, F3.dtype) and a == 0
a = F3(4)
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(0))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(1))
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(2))
assert isinstance(a, F3.dtype) and a == 2
a = F3(F3(3))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(4))
assert isinstance(a, F3.dtype) and a == 1
a = -F3(1)
assert isinstance(a, F3.dtype) and a == 2
a = -F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2 + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 3 - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 1 % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**0
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**1
assert isinstance(a, F3.dtype) and a == 2
a = F3(2)**2
assert isinstance(a, F3.dtype) and a == 1
assert bool(F3(3)) is False
assert bool(F3(4)) is True
F5 = FF(5)
a = F5(1)**(-1)
assert isinstance(a, F5.dtype) and a == 1
a = F5(2)**(-1)
assert isinstance(a, F5.dtype) and a == 3
a = F5(3)**(-1)
assert isinstance(a, F5.dtype) and a == 2
a = F5(4)**(-1)
assert isinstance(a, F5.dtype) and a == 4
assert (F5(1) < F5(2)) is True
assert (F5(1) <= F5(2)) is True
assert (F5(1) > F5(2)) is False
assert (F5(1) >= F5(2)) is False
assert (F5(3) < F5(2)) is False
assert (F5(3) <= F5(2)) is False
assert (F5(3) > F5(2)) is True
assert (F5(3) >= F5(2)) is True
assert (F5(1) < F5(7)) is True
assert (F5(1) <= F5(7)) is True
assert (F5(1) > F5(7)) is False
assert (F5(1) >= F5(7)) is False
assert (F5(3) < F5(7)) is False
assert (F5(3) <= F5(7)) is False
assert (F5(3) > F5(7)) is True
assert (F5(3) >= F5(7)) is True
assert (F5(1) < 2) is True
assert (F5(1) <= 2) is True
assert (F5(1) > 2) is False
assert (F5(1) >= 2) is False
assert (F5(3) < 2) is False
assert (F5(3) <= 2) is False
assert (F5(3) > 2) is True
assert (F5(3) >= 2) is True
assert (F5(1) < 7) is True
assert (F5(1) <= 7) is True
assert (F5(1) > 7) is False
assert (F5(1) >= 7) is False
assert (F5(3) < 7) is False
assert (F5(3) <= 7) is False
assert (F5(3) > 7) is True
assert (F5(3) >= 7) is True
raises(NotInvertible, lambda: F5(0)**(-1))
raises(NotInvertible, lambda: F5(5)**(-1))
raises(ValueError, lambda: FF(0))
raises(ValueError, lambda: FF(2.1))
def test_QQ_int():
assert int(QQ(2**2000, 3**1250)) == 455431
assert int(QQ(2**100, 3)) == 422550200076076467165567735125
def test_RR_double():
assert RR(3.14) > 1e-50
assert RR(1e-13) > 1e-50
assert RR(1e-14) > 1e-50
assert RR(1e-15) > 1e-50
assert RR(1e-20) > 1e-50
assert RR(1e-40) > 1e-50
def test_RR_Float():
f1 = Float("1.01")
f2 = Float("1.0000000000000000000001")
assert f1._prec == 53
assert f2._prec == 80
assert RR(f1)-1 > 1e-50
assert RR(f2)-1 < 1e-50 # RR's precision is lower than f2's
RR2 = RealField(prec=f2._prec)
assert RR2(f1)-1 > 1e-50
assert RR2(f2)-1 > 1e-50 # RR's precision is equal to f2's
def test_CC_double():
assert CC(3.14).real > 1e-50
assert CC(1e-13).real > 1e-50
assert CC(1e-14).real > 1e-50
assert CC(1e-15).real > 1e-50
assert CC(1e-20).real > 1e-50
assert CC(1e-40).real > 1e-50
assert CC(3.14j).imag > 1e-50
assert CC(1e-13j).imag > 1e-50
assert CC(1e-14j).imag > 1e-50
assert CC(1e-15j).imag > 1e-50
assert CC(1e-20j).imag > 1e-50
assert CC(1e-40j).imag > 1e-50
| |
import datetime
import hashlib
import itertools
import os
import json
import random
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
from django.conf import settings
import pydenticon
import requests
import mkt
from mkt.constants.applications import DEVICE_CHOICES_IDS
from mkt.constants.base import STATUS_CHOICES_API_LOOKUP
from mkt.constants.categories import CATEGORY_CHOICES
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller)
from mkt.developers.tasks import resize_preview, save_icon
from mkt.prices.models import AddonPremium, Price
from mkt.ratings.models import Review
from mkt.ratings.tasks import addon_review_aggregates
from mkt.site.utils import app_factory, slugify, version_factory
from mkt.users.models import UserProfile
from mkt.users.utils import create_user
from mkt.webapps.models import AddonUser, AppManifest, Preview
adjectives = ['Exquisite', 'Delicious', 'Elegant', 'Swanky', 'Spicy',
'Food Truck', 'Artisanal', 'Tasty']
nouns = ['Sandwich', 'Pizza', 'Curry', 'Pierogi', 'Sushi', 'Salad', 'Stew',
'Pasta', 'Barbeque', 'Bacon', 'Pancake', 'Waffle', 'Chocolate',
'Gyro', 'Cookie', 'Burrito', 'Pie']
fake_app_names = list(itertools.product(adjectives, nouns))[:-1]
def generate_app_data(num):
repeats, tailsize = divmod(num, len(fake_app_names))
if repeats:
apps = fake_app_names[:]
for i in range(repeats - 1):
for a in fake_app_names:
apps.append(a + (str(i + 1),))
for a in fake_app_names[:tailsize]:
apps.append(a + (str(i + 2),))
else:
apps = random.sample(fake_app_names, tailsize)
# Let's have at least 3 apps in each category, if we can.
if num < (len(CATEGORY_CHOICES) * 3):
num_cats = max(num // 3, 1)
else:
num_cats = len(CATEGORY_CHOICES)
catsize = num // num_cats
ia = iter(apps)
for cat_slug, cat_name in CATEGORY_CHOICES[:num_cats]:
for n in range(catsize):
appname = ' '.join(next(ia))
yield (appname, cat_slug)
for i, app in enumerate(ia):
appname = ' '.join(app)
cat_slug, cat_name = CATEGORY_CHOICES[i % len(CATEGORY_CHOICES)]
yield (appname, cat_slug)
foreground = ["rgb(45,79,255)",
"rgb(254,180,44)",
"rgb(226,121,234)",
"rgb(30,179,253)",
"rgb(232,77,65)",
"rgb(49,203,115)",
"rgb(141,69,170)"]
def generate_icon(app):
gen = pydenticon.Generator(8, 8, foreground=foreground)
img = gen.generate(unicode(app.name).encode('utf8'), 128, 128,
output_format="png")
save_icon(app, img)
def generate_previews(app, n=1):
gen = pydenticon.Generator(8, 12, foreground=foreground,
digest=hashlib.sha512)
for i in range(n):
img = gen.generate(unicode(app.name).encode('utf8') + chr(i), 320, 480,
output_format="png")
p = Preview.objects.create(addon=app, filetype="image/png",
thumbtype="image/png",
caption="screenshot " + str(i),
position=i)
f = tempfile.NamedTemporaryFile(suffix='.png')
f.write(img)
f.flush()
resize_preview(f.name, p)
def generate_localized_names(name, n):
prefixes = [('fr', u'fran\xe7ais'),
('es', u'espa\xf1ol'),
('ru', u'\u0420\u0443\u0441\u0441\u043a\u0438\u0439'),
('ja', u'\u65e5\u672c\u8a9e'),
('pt', u'portugu\xeas')]
names = dict((lang, u'%s %s' % (prefix, name))
for lang, prefix in prefixes[:n])
names['en-us'] = unicode(name)
return names
def generate_ratings(app, num):
for n in range(num):
email = 'testuser%s@example.com' % (n,)
user, _ = UserProfile.objects.get_or_create(
email=email, source=mkt.LOGIN_SOURCE_UNKNOWN,
display_name=email)
Review.objects.create(
addon=app, user=user, rating=random.randrange(1, 6),
title="Test Review " + str(n), body="review text")
def generate_hosted_app(name, categories, developer_name,
privacy_policy=None, device_types=(), status=4,
**spec):
generated_url = 'http://%s.testmanifest.com/manifest.webapp' % (
slugify(name),)
a = app_factory(categories=categories, name=name, complete=False,
privacy_policy=spec.get('privacy_policy'),
file_kw={'status': status},
rated=True, manifest_url=spec.get('manifest_url',
generated_url))
if device_types:
for dt in device_types:
a.addondevicetype_set.create(device_type=DEVICE_CHOICES_IDS[dt])
else:
a.addondevicetype_set.create(device_type=1)
a.versions.latest().update(reviewed=datetime.datetime.now(),
_developer_name=developer_name)
if 'manifest_file' in spec:
AppManifest.objects.create(
version=a._latest_version,
manifest=open(spec['manifest_file']).read())
else:
generate_hosted_manifest(a)
return a
def generate_hosted_manifest(app):
data = {
'name': unicode(app.name),
'description': 'This app has been automatically generated',
'version': '1.0',
'icons': {
'16': 'http://testmanifest.com/icon-16.png',
'48': 'http://testmanifest.com/icon-48.png',
'128': 'http://testmanifest.com/icon-128.png'
},
'installs_allowed_from': ['*'],
'developer': {
'name': 'Marketplace Team',
'url': 'https://marketplace.firefox.com/credits'
}
}
AppManifest.objects.create(
version=app._latest_version, manifest=json.dumps(data))
def generate_app_package(app, out, apptype, permissions, version='1.0',
num_locales=2):
manifest = {
'version': version.version,
'name': unicode(app.name),
'description': ('This packaged app has been automatically generated'
' (version %s)' % (version.version,)),
'icons': {
'16': '/icons/16.png',
'32': '/icons/32.png',
'256': '/icons/256.png'
},
'developer': {
'name': 'Marketplace Team',
'url': 'https://marketplace.firefox.com/credits'
},
'installs_allowed_launch': ['*'],
'from_path': 'index.html',
'locales': dict((lang, {
'name': name,
'description': 'This packaged app has been automatically generated'
}) for lang, name in generate_localized_names(
app.name, num_locales).items()),
'permissions': dict(((k, {"description": k})
for k in permissions)),
'default_locale': 'en',
'orientation': 'landscape',
'type': 'web' if apptype == 'packaged' else apptype,
'fullscreen': 'true'
}
outz = ZipFile(file=out, mode='w', compression=ZIP_DEFLATED)
try:
for size in ('32', 'med'):
outz.writestr(
'icons/%s.png' % (size,),
open(os.path.join(
settings.MEDIA_ROOT,
'img/app-icons/%s/generic.png' % (size,))).read())
outz.writestr('script.js',
'document.onload=function() {alert("Hello!");};')
outz.writestr(
'index.html',
'<title>Packaged app</title><script src="script.js"></script>'
'<h1>Test packaged app</h1>')
outz.writestr("manifest.webapp", json.dumps(manifest))
finally:
outz.close()
AppManifest.objects.create(
version=version, manifest=json.dumps(manifest))
def generate_packaged_app(name, apptype, categories, developer_name,
privacy_policy=None, device_types=(),
permissions=(), versions=None, num_locales=2,
package_file=None, status=4, **kw):
if versions is None:
versions = [status]
now = datetime.datetime.now()
app = app_factory(categories=categories, name=name, complete=False,
rated=True, is_packaged=True,
privacy_policy=privacy_policy,
version_kw={
'version': '1.0',
'reviewed': now if status >= 4 else None},
file_kw={'status': status})
if device_types:
for dt in device_types:
app.addondevicetype_set.create(device_type=DEVICE_CHOICES_IDS[dt])
else:
app.addondevicetype_set.create(device_type=1)
f = app.latest_version.all_files[0]
f.update(filename=f.generate_filename())
fp = os.path.join(app.latest_version.path_prefix, f.filename)
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
if package_file:
return app
with open(fp, 'w') as out:
generate_app_package(app, out, apptype, permissions=permissions,
version=app.latest_version,
num_locales=num_locales)
for i, f_status in enumerate(versions[1:], 1):
st = STATUS_CHOICES_API_LOOKUP[f_status]
rtime = (now + datetime.timedelta(i)) if st >= 4 else None
v = version_factory(version="1." + str(i), addon=app,
reviewed=rtime, created=rtime,
file_kw={'status': st},
_developer_name=developer_name)
generate_app_package(app, out, apptype, permissions, v,
num_locales=num_locales)
app.update_version()
return app
def get_or_create_payment_account():
email = 'fakedeveloper@example.com'
user, _ = UserProfile.objects.get_or_create(
email=email,
source=mkt.LOGIN_SOURCE_UNKNOWN,
display_name=email)
seller, _ = SolitudeSeller.objects.get_or_create(user=user)
acct, _ = PaymentAccount.objects.get_or_create(
user=user,
solitude_seller=seller,
uri='/bango/package/123',
name='fake data payment account',
agreed_tos=True)
return acct
def get_or_create_price(tier):
return Price.objects.get_or_create(price=tier, active=True)[0]
def generate_apps(hosted=0, packaged=0, privileged=0, versions=('public',)):
apps_data = generate_app_data(hosted + packaged + privileged)
specs = []
for i, (appname, cat_slug) in enumerate(apps_data):
if i < privileged:
specs.append({'name': appname,
'type': 'privileged',
'status': versions[0],
'permissions': ['camera', 'storage'],
'categories': [cat_slug],
'versions': versions,
'num_ratings': 5,
'num_previews': 2})
elif i < (privileged + packaged):
specs.append({'name': appname,
'type': 'packaged',
'status': versions[0],
'categories': [cat_slug],
'versions': versions,
'num_ratings': 5,
'num_previews': 2})
else:
specs.append({'name': appname,
'type': 'hosted',
'status': versions[0],
'categories': [cat_slug],
'num_ratings': 5,
'num_previews': 2})
return generate_apps_from_specs(specs, None)
def generate_apps_from_specs(specs, specdir):
apps = []
for spec, (appname, cat_slug) in zip(specs, generate_app_data(len(specs))):
if spec.get('preview_files'):
spec['preview_files'] = [os.path.join(specdir, p)
for p in spec['preview_files']]
if spec.get('package_file'):
spec['package_file'] = os.path.join(specdir, spec['package_file'])
if spec.get('manifest_file'):
spec['manifest_file'] = os.path.join(specdir,
spec['manifest_file'])
spec['name'] = spec.get('name', appname)
spec['categories'] = spec.get('categories', [cat_slug])
apps.append(generate_app_from_spec(**spec))
return apps
def generate_app_from_spec(name, categories, type, status, num_previews=1,
num_ratings=1, num_locales=0, preview_files=(),
author='fakedeveloper@example.com',
premium_type='free', description=None, **spec):
status = STATUS_CHOICES_API_LOOKUP[status]
if type == 'hosted':
app = generate_hosted_app(name, categories, author,
status=status, **spec)
else:
app = generate_packaged_app(
name, type, categories, author,
status=status, **spec)
generate_icon(app)
if not preview_files:
generate_previews(app, num_previews)
if preview_files:
for i, f in enumerate(preview_files):
p = Preview.objects.create(addon=app, filetype="image/png",
thumbtype="image/png",
caption="screenshot " + str(i),
position=i)
resize_preview(f, p)
generate_ratings(app, num_ratings)
app.name = generate_localized_names(app.name, num_locales)
if not description:
description = requests.get('http://baconipsum.com/api/'
'?type=meat-and-filler¶s=2'
'&start-with-lorem=1').json()[0]
app.description = description
app.support_email = author
premium_type = mkt.ADDON_PREMIUM_API_LOOKUP[premium_type]
app.premium_type = premium_type
if premium_type != mkt.ADDON_FREE:
acct = get_or_create_payment_account()
AddonPaymentAccount.objects.create(addon=app, payment_account=acct,
account_uri=acct.uri,
product_uri=app.app_slug)
price = get_or_create_price(spec.get('price', '0.99'))
AddonPremium.objects.create(addon=app, price=price)
app.solitude_public_id = 'fake'
# Status has to be updated at the end because STATUS_DELETED apps can't
# be saved.
app.status = status
app.save()
addon_review_aggregates(app.pk)
u = create_user(author)
AddonUser.objects.create(user=u, addon=app)
return app
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-21 22:17
from __future__ import unicode_literals
import autoslug.fields
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_project.mixins
import django_project.models
import smart_selects.db_fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_pk', models.TextField(verbose_name='object ID')),
('comment', models.TextField(max_length=3000, verbose_name='comment')),
('submit_date', models.DateTimeField(auto_now_add=True, verbose_name='date/time submitted')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment_comments', to=settings.AUTH_USER_MODEL, verbose_name='author')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content_type_set_for_comment', to='contenttypes.ContentType', verbose_name='content type')),
],
options={
'permissions': [('can_moderate', 'Can moderate comments')],
'verbose_name': 'comment',
'verbose_name_plural': 'comments',
'ordering': ('-submit_date',),
},
bases=(django_project.mixins.CommentMixin, models.Model),
),
migrations.CreateModel(
name='Component',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, max_length=64, populate_from='name', unique_with=('project',))),
('description', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'component',
'verbose_name_plural': 'components',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('joined_at', models.DateTimeField(auto_now_add=True, verbose_name='joined at')),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='member')),
],
),
migrations.CreateModel(
name='Milestone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, max_length=64, populate_from='name', unique_with=('project',))),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('deadline', models.DateField(default=datetime.date(2016, 5, 1), verbose_name='deadline')),
('date_completed', models.DateField(blank=True, null=True, verbose_name='date completed')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='author')),
],
options={
'verbose_name': 'milestone',
'verbose_name_plural': 'milestones',
'ordering': ('created_at',),
},
),
migrations.CreateModel(
name='ObjectTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.TextField(verbose_name='object ID')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content_type_set_for_objecttask', to='contenttypes.ContentType', verbose_name='content type')),
],
options={
'verbose_name': 'objecttask',
'verbose_name_plural': 'objecttasks',
},
),
migrations.CreateModel(
name='Priority',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='name')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, max_length=64, populate_from='name', unique_with=('project',))),
],
options={
'verbose_name': 'priority level',
'verbose_name_plural': 'priority levels',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='name')),
('slug', autoslug.fields.AutoSlugField(editable=False, max_length=128, populate_from='name', unique_with=('author',))),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_projects', to=settings.AUTH_USER_MODEL)),
('members', models.ManyToManyField(through='django_project.Membership', to=settings.AUTH_USER_MODEL, verbose_name='members')),
],
options={
'permissions': (('view_project', 'Can view project'), ('admin_project', 'Can administer project'), ('can_read_repository', 'Can read repository'), ('can_write_to_repository', 'Can write to repository'), ('can_add_task', 'Can add task'), ('can_change_task', 'Can change task'), ('can_delete_task', 'Can delete task'), ('can_view_tasks', 'Can view tasks'), ('can_add_member', 'Can add member'), ('can_change_member', 'Can change member'), ('can_delete_member', 'Can delete member')),
},
bases=(django_project.mixins.ProjectMixin, models.Model),
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='name')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order')),
('is_resolved', models.BooleanField(default=False, verbose_name='is resolved')),
('is_initial', models.BooleanField(default=False, verbose_name='is initial')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, max_length=64, populate_from='name', unique_with=('project',))),
],
options={
'verbose_name': 'status',
'verbose_name_plural': 'statuses',
'ordering': ['order'],
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('summary', models.CharField(max_length=64, verbose_name='summary')),
('description', models.TextField(verbose_name='description')),
('deadline', models.DateField(blank=True, help_text='YYYY-MM-DD', null=True, verbose_name='deadline')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('author', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_tasks', to=settings.AUTH_USER_MODEL, verbose_name='author')),
('component', smart_selects.db_fields.ChainedForeignKey(chained_field='project', chained_model_field='project', on_delete=django.db.models.deletion.CASCADE, to='django_project.Component', verbose_name='component')),
('milestone', smart_selects.db_fields.ChainedForeignKey(blank=True, chained_field='project', chained_model_field='project', null=True, on_delete=django.db.models.deletion.CASCADE, to='django_project.Milestone', verbose_name='milestone')),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owned_tasks', to=settings.AUTH_USER_MODEL, verbose_name='owner')),
('priority', smart_selects.db_fields.ChainedForeignKey(chained_field='project', chained_model_field='project', on_delete=django.db.models.deletion.CASCADE, to='django_project.Priority', verbose_name='priority')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project', verbose_name='project')),
('status', smart_selects.db_fields.ChainedForeignKey(chained_field='project', chained_model_field='project', on_delete=django.db.models.deletion.CASCADE, to='django_project.Status', verbose_name='status')),
],
bases=(django_project.mixins.TaskMixin, models.Model),
),
migrations.CreateModel(
name='TaskType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, verbose_name='name')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('order', models.IntegerField(verbose_name='order')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project')),
],
options={
'verbose_name': 'task type',
'verbose_name_plural': 'task types',
},
),
migrations.CreateModel(
name='Transition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('destination', django_project.models.ChainedForeignKeyTransition(chained_field='source', chained_model_field='project', on_delete=django.db.models.deletion.CASCADE, to='django_project.Status', verbose_name='destination status')),
('source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sources', to='django_project.Status', verbose_name='source status')),
],
options={
'verbose_name': 'transition',
'verbose_name_plural': 'transitions',
},
),
migrations.AddField(
model_name='task',
name='type',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='project', chained_model_field='project', on_delete=django.db.models.deletion.CASCADE, to='django_project.TaskType', verbose_name='task type'),
),
migrations.AddField(
model_name='status',
name='destinations',
field=models.ManyToManyField(blank=True, through='django_project.Transition', to='django_project.Status', verbose_name='destinations'),
),
migrations.AddField(
model_name='status',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project'),
),
migrations.AddField(
model_name='priority',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project'),
),
migrations.AddField(
model_name='objecttask',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='objecttask_tasks', to='django_project.Task', verbose_name='task'),
),
migrations.AddField(
model_name='milestone',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project', verbose_name='project'),
),
migrations.AddField(
model_name='membership',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project', verbose_name='project'),
),
migrations.AddField(
model_name='component',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_project.Project'),
),
migrations.AlterUniqueTogether(
name='transition',
unique_together=set([('source', 'destination')]),
),
migrations.AlterUniqueTogether(
name='tasktype',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='status',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='priority',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='milestone',
unique_together=set([('project', 'name')]),
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('project', 'member')]),
),
migrations.AlterUniqueTogether(
name='component',
unique_together=set([('project', 'name')]),
),
]
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the epsf module.
"""
import itertools
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.nddata import NDData
from astropy.table import Table
from astropy.stats import SigmaClip
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from ..epsf import EPSFBuilder, EPSFFitter
from ..epsf_stars import extract_stars, EPSFStars
from ..models import IntegratedGaussianPRF, EPSFModel
from ...datasets import make_gaussian_prf_sources_image
from ...utils._optional_deps import HAS_SCIPY # noqa
@pytest.mark.skipif('not HAS_SCIPY')
class TestEPSFBuild:
def setup_class(self):
"""
Create a simulated image for testing.
"""
from scipy.spatial import cKDTree
shape = (750, 750)
# define random star positions
nstars = 100
rng = np.random.default_rng(0)
xx = rng.uniform(low=0, high=shape[1], size=nstars)
yy = rng.uniform(low=0, high=shape[0], size=nstars)
# enforce a minimum separation
min_dist = 25
coords = [(yy[0], xx[0])]
for xxi, yyi in zip(xx, yy):
newcoord = [yyi, xxi]
dist, _ = cKDTree([newcoord]).query(coords, 1)
if np.min(dist) > min_dist:
coords.append(newcoord)
yy, xx = np.transpose(coords)
zz = rng.uniform(low=0, high=200000., size=len(xx))
# define a table of model parameters
self.stddev = 2.
sources = Table()
sources['amplitude'] = zz
sources['x_0'] = xx
sources['y_0'] = yy
sources['sigma'] = np.zeros(len(xx)) + self.stddev
sources['theta'] = 0.
self.data = make_gaussian_prf_sources_image(shape, sources)
self.nddata = NDData(self.data)
init_stars = Table()
init_stars['x'] = xx.astype(int)
init_stars['y'] = yy.astype(int)
self.init_stars = init_stars
def test_extract_stars(self):
size = 25
with pytest.warns(AstropyUserWarning, match='were not extracted'):
stars = extract_stars(self.nddata, self.init_stars, size=size)
assert len(stars) == 81
assert isinstance(stars, EPSFStars)
assert isinstance(stars[0], EPSFStars)
assert stars[0].data.shape == (size, size)
def test_epsf_build(self):
"""
This is an end-to-end test of EPSFBuilder on a simulated image.
"""
size = 25
oversampling = 4.
with pytest.warns(AstropyUserWarning, match='were not extracted'):
stars = extract_stars(self.nddata, self.init_stars, size=size)
epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=15,
progress_bar=False, norm_radius=25,
recentering_maxiters=15)
epsf, fitted_stars = epsf_builder(stars)
ref_size = (size * oversampling) + 1
assert epsf.data.shape == (ref_size, ref_size)
y0 = (ref_size - 1) / 2 / oversampling
y = np.arange(ref_size, dtype=float) / oversampling
psf_model = IntegratedGaussianPRF(sigma=self.stddev)
z = epsf.data
x = psf_model.evaluate(y.reshape(-1, 1), y.reshape(1, -1), 1, y0, y0, self.stddev)
assert_allclose(z, x, rtol=1e-2, atol=1e-5)
resid_star = fitted_stars[0].compute_residual_image(epsf)
assert_almost_equal(np.sum(resid_star)/fitted_stars[0].flux, 0, decimal=3)
def test_epsf_fitting_bounds(self):
size = 25
oversampling = 4.
with pytest.warns(AstropyUserWarning, match='were not extracted'):
stars = extract_stars(self.nddata, self.init_stars, size=size)
epsf_builder = EPSFBuilder(oversampling=oversampling, maxiters=8,
progress_bar=True, norm_radius=25,
recentering_maxiters=5,
fitter=EPSFFitter(fit_boxsize=30),
smoothing_kernel='quadratic')
# With a boxsize larger than the cutout we expect the fitting to
# fail for all stars, due to star._fit_error_status
with pytest.raises(ValueError), pytest.warns(AstropyUserWarning):
epsf_builder(stars)
def test_epsf_build_invalid_fitter(self):
"""
Test that the input fitter is an EPSFFitter instance.
"""
with pytest.raises(TypeError):
EPSFBuilder(fitter=EPSFFitter, maxiters=3)
with pytest.raises(TypeError):
EPSFBuilder(fitter=LevMarLSQFitter(), maxiters=3)
with pytest.raises(TypeError):
EPSFBuilder(fitter=LevMarLSQFitter, maxiters=3)
def test_epsfbuilder_inputs():
# invalid inputs
with pytest.raises(ValueError):
EPSFBuilder(oversampling=None)
with pytest.raises(ValueError):
EPSFBuilder(oversampling=-1)
with pytest.raises(ValueError):
EPSFBuilder(maxiters=-1)
with pytest.raises(ValueError):
EPSFBuilder(oversampling=[-1, 4])
# valid inputs
EPSFBuilder(oversampling=6)
EPSFBuilder(oversampling=[4, 6])
# invalid inputs
for sigclip in [None, [], 'a']:
with pytest.raises(ValueError):
EPSFBuilder(flux_residual_sigclip=sigclip)
# valid inputs
EPSFBuilder(flux_residual_sigclip=SigmaClip(sigma=2.5, cenfunc='mean',
maxiters=2))
def test_epsfmodel_inputs():
data = np.array([[], []])
with pytest.raises(ValueError):
EPSFModel(data)
data = np.ones((5, 5), dtype=float)
data[2, 2] = np.inf
with pytest.raises(ValueError):
EPSFModel(data)
with pytest.warns(RuntimeWarning,
match='overflow encountered in double_scalars'):
data[2, 2] = np.finfo(float).max * 2
with pytest.raises(ValueError):
EPSFModel(data, flux=None)
data[2, 2] = 1
for oversampling in [-1, [-2, 4], (1, 4, 8), ((1, 2), (3, 4)),
np.ones((2, 2, 2)), 2.1, np.nan, (1, np.inf)]:
with pytest.raises(ValueError):
EPSFModel(data, oversampling=oversampling)
origin = (1, 2, 3)
with pytest.raises(TypeError):
EPSFModel(data, origin=origin)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('oversamp', [3, 4])
def test_epsf_build_oversampling(oversamp):
offsets = np.arange(oversamp) * 1./oversamp - 0.5 + 1./(2. * oversamp)
xydithers = np.array(list(itertools.product(offsets, offsets)))
xdithers = np.transpose(xydithers)[0]
ydithers = np.transpose(xydithers)[1]
nstars = oversamp**2
sigma = 3.0
sources = Table()
offset = 50
size = oversamp * offset + offset
y, x = np.mgrid[0:oversamp, 0:oversamp] * offset + offset
sources['amplitude'] = np.full((nstars,), 100.0)
sources['x_0'] = x.ravel() + xdithers
sources['y_0'] = y.ravel() + ydithers
sources['sigma'] = np.full((nstars,), sigma)
data = make_gaussian_prf_sources_image((size, size), sources)
nddata = NDData(data=data)
stars_tbl = Table()
stars_tbl['x'] = sources['x_0']
stars_tbl['y'] = sources['y_0']
stars = extract_stars(nddata, stars_tbl, size=25)
epsf_builder = EPSFBuilder(oversampling=oversamp, maxiters=15,
progress_bar=False, recentering_maxiters=20)
epsf, fitted_stars = epsf_builder(stars)
# input PSF shape
size = epsf.data.shape[0]
cen = (size - 1) / 2
sigma2 = oversamp * sigma
m = IntegratedGaussianPRF(sigma2, x_0=cen, y_0=cen, flux=1)
yy, xx = np.mgrid[0:size, 0:size]
psf = m(xx, yy)
assert_allclose(epsf.data, psf*epsf.data.sum(), atol=2.5e-4)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os.path
import re
import signal
import time
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from ducktape.cluster.remoteaccount import RemoteCommandError
from config import KafkaConfig
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import config_property
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.services.security.minikdc import MiniKdc
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH, LATEST_0_10_0
Port = collections.namedtuple('Port', ['name', 'number', 'open'])
class KafkaService(KafkaPathResolverMixin, JmxMixin, Service):
PERSISTENT_ROOT = "/mnt/kafka"
STDOUT_STDERR_CAPTURE = os.path.join(PERSISTENT_ROOT, "server-start-stdout-stderr.log")
LOG4J_CONFIG = os.path.join(PERSISTENT_ROOT, "kafka-log4j.properties")
# Logs such as controller.log, server.log, etc all go here
OPERATIONAL_LOG_DIR = os.path.join(PERSISTENT_ROOT, "kafka-operational-logs")
OPERATIONAL_LOG_INFO_DIR = os.path.join(OPERATIONAL_LOG_DIR, "info")
OPERATIONAL_LOG_DEBUG_DIR = os.path.join(OPERATIONAL_LOG_DIR, "debug")
# Kafka log segments etc go here
DATA_LOG_DIR_PREFIX = os.path.join(PERSISTENT_ROOT, "kafka-data-logs")
DATA_LOG_DIR_1 = "%s-1" % (DATA_LOG_DIR_PREFIX)
DATA_LOG_DIR_2 = "%s-2" % (DATA_LOG_DIR_PREFIX)
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "kafka.properties")
# Kafka Authorizer
SIMPLE_AUTHORIZER = "kafka.security.auth.SimpleAclAuthorizer"
logs = {
"kafka_server_start_stdout_stderr": {
"path": STDOUT_STDERR_CAPTURE,
"collect_default": True},
"kafka_operational_logs_info": {
"path": OPERATIONAL_LOG_INFO_DIR,
"collect_default": True},
"kafka_operational_logs_debug": {
"path": OPERATIONAL_LOG_DEBUG_DIR,
"collect_default": False},
"kafka_data_1": {
"path": DATA_LOG_DIR_1,
"collect_default": False},
"kafka_data_2": {
"path": DATA_LOG_DIR_2,
"collect_default": False}
}
def __init__(self, context, num_nodes, zk, security_protocol=SecurityConfig.PLAINTEXT, interbroker_security_protocol=SecurityConfig.PLAINTEXT,
client_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI, interbroker_sasl_mechanism=SecurityConfig.SASL_MECHANISM_GSSAPI,
authorizer_class_name=None, topics=None, version=DEV_BRANCH, jmx_object_names=None,
jmx_attributes=None, zk_connect_timeout=5000, zk_session_timeout=6000, server_prop_overides=None, zk_chroot=None):
"""
:type context
:type zk: ZookeeperService
:type topics: dict
"""
Service.__init__(self, context, num_nodes)
JmxMixin.__init__(self, num_nodes=num_nodes, jmx_object_names=jmx_object_names, jmx_attributes=(jmx_attributes or []),
root=KafkaService.PERSISTENT_ROOT)
self.zk = zk
self.security_protocol = security_protocol
self.interbroker_security_protocol = interbroker_security_protocol
self.client_sasl_mechanism = client_sasl_mechanism
self.interbroker_sasl_mechanism = interbroker_sasl_mechanism
self.topics = topics
self.minikdc = None
self.authorizer_class_name = authorizer_class_name
self.zk_set_acl = False
if server_prop_overides is None:
self.server_prop_overides = []
else:
self.server_prop_overides = server_prop_overides
self.log_level = "DEBUG"
self.zk_chroot = zk_chroot
#
# In a heavily loaded and not very fast machine, it is
# sometimes necessary to give more time for the zk client
# to have its session established, especially if the client
# is authenticating and waiting for the SaslAuthenticated
# in addition to the SyncConnected event.
#
# The default value for zookeeper.connect.timeout.ms is
# 2 seconds and here we increase it to 5 seconds, but
# it can be overridden by setting the corresponding parameter
# for this constructor.
self.zk_connect_timeout = zk_connect_timeout
# Also allow the session timeout to be provided explicitly,
# primarily so that test cases can depend on it when waiting
# e.g. brokers to deregister after a hard kill.
self.zk_session_timeout = zk_session_timeout
self.port_mappings = {
'PLAINTEXT': Port('PLAINTEXT', 9092, False),
'SSL': Port('SSL', 9093, False),
'SASL_PLAINTEXT': Port('SASL_PLAINTEXT', 9094, False),
'SASL_SSL': Port('SASL_SSL', 9095, False)
}
for node in self.nodes:
node.version = version
node.config = KafkaConfig(**{config_property.BROKER_ID: self.idx(node)})
def set_version(self, version):
for node in self.nodes:
node.version = version
@property
def security_config(self):
config = SecurityConfig(self.context, self.security_protocol, self.interbroker_security_protocol,
zk_sasl=self.zk.zk_sasl,
client_sasl_mechanism=self.client_sasl_mechanism, interbroker_sasl_mechanism=self.interbroker_sasl_mechanism)
for protocol in self.port_mappings:
port = self.port_mappings[protocol]
if port.open:
config.enable_security_protocol(port.name)
return config
def open_port(self, protocol):
self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=True)
def close_port(self, protocol):
self.port_mappings[protocol] = self.port_mappings[protocol]._replace(open=False)
def start_minikdc(self, add_principals=""):
if self.security_config.has_sasl:
if self.minikdc is None:
self.minikdc = MiniKdc(self.context, self.nodes, extra_principals = add_principals)
self.minikdc.start()
else:
self.minikdc = None
def alive(self, node):
return len(self.pids(node)) > 0
def start(self, add_principals=""):
self.open_port(self.security_protocol)
self.open_port(self.interbroker_security_protocol)
self.start_minikdc(add_principals)
self._ensure_zk_chroot()
Service.start(self)
self.logger.info("Waiting for brokers to register at ZK")
retries = 30
expected_broker_ids = set(self.nodes)
wait_until(lambda: {node for node in self.nodes if self.is_registered(node)} == expected_broker_ids, 30, 1)
if retries == 0:
raise RuntimeError("Kafka servers didn't register at ZK within 30 seconds")
# Create topics if necessary
if self.topics is not None:
for topic, topic_cfg in self.topics.items():
if topic_cfg is None:
topic_cfg = {}
topic_cfg["topic"] = topic
self.create_topic(topic_cfg)
def _ensure_zk_chroot(self):
self.logger.info("Ensuring zk_chroot %s exists", self.zk_chroot)
if self.zk_chroot:
if not self.zk_chroot.startswith('/'):
raise Exception("Zookeeper chroot must start with '/' but found " + self.zk_chroot)
parts = self.zk_chroot.split('/')[1:]
for i in range(len(parts)):
self.zk.create('/' + '/'.join(parts[:i+1]))
def set_protocol_and_port(self, node):
listeners = []
advertised_listeners = []
for protocol in self.port_mappings:
port = self.port_mappings[protocol]
if port.open:
listeners.append(port.name + "://:" + str(port.number))
advertised_listeners.append(port.name + "://" + node.account.hostname + ":" + str(port.number))
self.listeners = ','.join(listeners)
self.advertised_listeners = ','.join(advertised_listeners)
def prop_file(self, node):
self.set_protocol_and_port(node)
#load template configs as dictionary
config_template = self.render('kafka.properties', node=node, broker_id=self.idx(node),
security_config=self.security_config, num_nodes=self.num_nodes)
configs = dict( l.rstrip().split('=', 1) for l in config_template.split('\n')
if not l.startswith("#") and "=" in l )
#load specific test override configs
override_configs = KafkaConfig(**node.config)
override_configs[config_property.ADVERTISED_HOSTNAME] = node.account.hostname
override_configs[config_property.ZOOKEEPER_CONNECT] = self.zk_connect_setting()
for prop in self.server_prop_overides:
override_configs[prop[0]] = prop[1]
#update template configs with test override configs
configs.update(override_configs)
prop_file = self.render_configs(configs)
return prop_file
def render_configs(self, configs):
"""Render self as a series of lines key=val\n, and do so in a consistent order. """
keys = [k for k in configs.keys()]
keys.sort()
s = ""
for k in keys:
s += "%s=%s\n" % (k, str(configs[k]))
return s
def start_cmd(self, node):
cmd = "export JMX_PORT=%d; " % self.jmx_port
cmd += "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % self.LOG4J_CONFIG
cmd += "export KAFKA_OPTS=%s; " % self.security_config.kafka_opts
cmd += "%s %s 1>> %s 2>> %s &" % \
(self.path.script("kafka-server-start.sh", node),
KafkaService.CONFIG_FILE,
KafkaService.STDOUT_STDERR_CAPTURE,
KafkaService.STDOUT_STDERR_CAPTURE)
return cmd
def start_node(self, node):
node.account.mkdirs(KafkaService.PERSISTENT_ROOT)
prop_file = self.prop_file(node)
self.logger.info("kafka.properties:")
self.logger.info(prop_file)
node.account.create_file(KafkaService.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG, self.render('log4j.properties', log_dir=KafkaService.OPERATIONAL_LOG_DIR))
self.security_config.setup_node(node)
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=True)
cmd = self.start_cmd(node)
self.logger.debug("Attempting to start KafkaService on %s with command: %s" % (str(node.account), cmd))
with node.account.monitor_log(KafkaService.STDOUT_STDERR_CAPTURE) as monitor:
node.account.ssh(cmd)
# Kafka 1.0.0 and higher don't have a space between "Kafka" and "Server"
monitor.wait_until("Kafka\s*Server.*started", timeout_sec=30, backoff_sec=.25, err_msg="Kafka server didn't finish startup")
# Credentials for inter-broker communication are created before starting Kafka.
# Client credentials are created after starting Kafka so that both loading of
# existing credentials from ZK and dynamic update of credentials in Kafka are tested.
self.security_config.setup_credentials(node, self.path, self.zk_connect_setting(), broker=False)
self.start_jmx_tool(self.idx(node), node)
if len(self.pids(node)) == 0:
raise Exception("No process ids recorded on node %s" % node.account.hostname)
def pids(self, node):
"""Return process ids associated with running processes on the given node."""
try:
cmd = "jcmd | grep -e %s | awk '{print $1}'" % self.java_class_name()
pid_arr = [pid for pid in node.account.ssh_capture(cmd, allow_fail=True, callback=int)]
return pid_arr
except (RemoteCommandError, ValueError) as e:
return []
def signal_node(self, node, sig=signal.SIGTERM):
pids = self.pids(node)
for pid in pids:
node.account.signal(pid, sig)
def signal_leader(self, topic, partition=0, sig=signal.SIGTERM):
leader = self.leader(topic, partition)
self.signal_node(leader, sig)
def stop_node(self, node, clean_shutdown=True):
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=False)
try:
wait_until(lambda: len(self.pids(node)) == 0, timeout_sec=60, err_msg="Kafka node failed to stop")
except Exception:
self.thread_dump(node)
raise
def thread_dump(self, node):
for pid in self.pids(node):
try:
node.account.signal(pid, signal.SIGQUIT, allow_fail=True)
except:
self.logger.warn("Could not dump threads on node")
def clean_node(self, node):
JmxMixin.clean_node(self, node)
self.security_config.clean_node(node)
node.account.kill_java_processes(self.java_class_name(),
clean_shutdown=False, allow_fail=True)
node.account.ssh("sudo rm -rf -- %s" % KafkaService.PERSISTENT_ROOT, allow_fail=False)
def create_topic(self, topic_cfg, node=None):
"""Run the admin tool create topic command.
Specifying node is optional, and may be done if for different kafka nodes have different versions,
and we care where command gets run.
If the node is not specified, run the command from self.nodes[0]
"""
if node is None:
node = self.nodes[0]
self.logger.info("Creating topic %s with settings %s",
topic_cfg["topic"], topic_cfg)
kafka_topic_script = self.path.script("kafka-topics.sh", node)
cmd = kafka_topic_script + " "
cmd += "--zookeeper %(zk_connect)s --create --topic %(topic)s " % {
'zk_connect': self.zk_connect_setting(),
'topic': topic_cfg.get("topic"),
}
if 'replica-assignment' in topic_cfg:
cmd += " --replica-assignment %(replica-assignment)s" % {
'replica-assignment': topic_cfg.get('replica-assignment')
}
else:
cmd += " --partitions %(partitions)d --replication-factor %(replication-factor)d" % {
'partitions': topic_cfg.get('partitions', 1),
'replication-factor': topic_cfg.get('replication-factor', 1)
}
if topic_cfg.get('if-not-exists', False):
cmd += ' --if-not-exists'
if "configs" in topic_cfg.keys() and topic_cfg["configs"] is not None:
for config_name, config_value in topic_cfg["configs"].items():
cmd += " --config %s=%s" % (config_name, str(config_value))
self.logger.info("Running topic creation command...\n%s" % cmd)
node.account.ssh(cmd)
time.sleep(1)
self.logger.info("Checking to see if topic was properly created...\n%s" % cmd)
for line in self.describe_topic(topic_cfg["topic"]).split("\n"):
self.logger.info(line)
def describe_topic(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --topic %s --describe" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting(), topic)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
return output
def list_topics(self, topic, node=None):
if node is None:
node = self.nodes[0]
cmd = "%s --zookeeper %s --list" % \
(self.path.script("kafka-topics.sh", node), self.zk_connect_setting())
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
yield line.rstrip()
def alter_message_format(self, topic, msg_format_version, node=None):
if node is None:
node = self.nodes[0]
self.logger.info("Altering message format version for topic %s with format %s", topic, msg_format_version)
cmd = "%s --zookeeper %s --entity-name %s --entity-type topics --alter --add-config message.format.version=%s" % \
(self.path.script("kafka-configs.sh", node), self.zk_connect_setting(), topic, msg_format_version)
self.logger.info("Running alter message format command...\n%s" % cmd)
node.account.ssh(cmd)
def parse_describe_topic(self, topic_description):
"""Parse output of kafka-topics.sh --describe (or describe_topic() method above), which is a string of form
PartitionCount:2\tReplicationFactor:2\tConfigs:
Topic: test_topic\ttPartition: 0\tLeader: 3\tReplicas: 3,1\tIsr: 3,1
Topic: test_topic\tPartition: 1\tLeader: 1\tReplicas: 1,2\tIsr: 1,2
into a dictionary structure appropriate for use with reassign-partitions tool:
{
"partitions": [
{"topic": "test_topic", "partition": 0, "replicas": [3, 1]},
{"topic": "test_topic", "partition": 1, "replicas": [1, 2]}
]
}
"""
lines = map(lambda x: x.strip(), topic_description.split("\n"))
partitions = []
for line in lines:
m = re.match(".*Leader:.*", line)
if m is None:
continue
fields = line.split("\t")
# ["Partition: 4", "Leader: 0"] -> ["4", "0"]
fields = map(lambda x: x.split(" ")[1], fields)
partitions.append(
{"topic": fields[0],
"partition": int(fields[1]),
"replicas": map(int, fields[3].split(','))})
return {"partitions": partitions}
def verify_reassign_partitions(self, reassignment, node=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script("kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--verify "
cmd += "&& sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Verifying parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
if re.match(".*Reassignment of partition.*failed.*",
output.replace('\n', '')) is not None:
return False
if re.match(".*is still in progress.*",
output.replace('\n', '')) is not None:
return False
return True
def execute_reassign_partitions(self, reassignment, node=None,
throttle=None):
"""Run the reassign partitions admin tool in "verify" mode
"""
if node is None:
node = self.nodes[0]
json_file = "/tmp/%s_reassign.json" % str(time.time())
# reassignment to json
json_str = json.dumps(reassignment)
json_str = json.dumps(json_str)
# create command
cmd = "echo %s > %s && " % (json_str, json_file)
cmd += "%s " % self.path.script( "kafka-reassign-partitions.sh", node)
cmd += "--zookeeper %s " % self.zk_connect_setting()
cmd += "--reassignment-json-file %s " % json_file
cmd += "--execute"
if throttle is not None:
cmd += " --throttle %d" % throttle
cmd += " && sleep 1 && rm -f %s" % json_file
# send command
self.logger.info("Executing parition reassignment...")
self.logger.debug(cmd)
output = ""
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug("Verify partition reassignment:")
self.logger.debug(output)
def search_data_files(self, topic, messages):
"""Check if a set of messages made it into the Kakfa data files. Note that
this method takes no account of replication. It simply looks for the
payload in all the partition files of the specified topic. 'messages' should be
an array of numbers. The list of missing messages is returned.
"""
payload_match = "payload: " + "$|payload: ".join(str(x) for x in messages) + "$"
found = set([])
self.logger.debug("number of unique missing messages we will search for: %d",
len(messages))
for node in self.nodes:
# Grab all .log files in directories prefixed with this topic
files = node.account.ssh_capture("find %s* -regex '.*/%s-.*/[^/]*.log'" % (KafkaService.DATA_LOG_DIR_PREFIX, topic))
# Check each data file to see if it contains the messages we want
for log in files:
cmd = "%s kafka.tools.DumpLogSegments --print-data-log --files %s | grep -E \"%s\"" % \
(self.path.script("kafka-run-class.sh", node), log.strip(), payload_match)
for line in node.account.ssh_capture(cmd, allow_fail=True):
for val in messages:
if line.strip().endswith("payload: "+str(val)):
self.logger.debug("Found %s in data-file [%s] in line: [%s]" % (val, log.strip(), line.strip()))
found.add(val)
self.logger.debug("Number of unique messages found in the log: %d",
len(found))
missing = list(set(messages) - found)
if len(missing) > 0:
self.logger.warn("The following values were not found in the data files: " + str(missing))
return missing
def restart_node(self, node, clean_shutdown=True):
"""Restart the given node."""
self.stop_node(node, clean_shutdown)
self.start_node(node)
def isr_idx_list(self, topic, partition=0):
""" Get in-sync replica list the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find in-sync replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
isr_idx_list = partition_state["isr"]
self.logger.info("Isr for topic %s and partition %d is now: %s" % (topic, partition, isr_idx_list))
return isr_idx_list
def replicas(self, topic, partition=0):
""" Get the assigned replicas for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find assigned replicas for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s" % (topic)
assignemnt = self.zk.query(zk_path, chroot=self.zk_chroot)
if assignemnt is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
assignemnt = json.loads(assignemnt)
self.logger.info(assignemnt)
replicas = assignemnt["partitions"][str(partition)]
self.logger.info("Assigned replicas for topic %s and partition %d is now: %s" % (topic, partition, replicas))
return [self.get_node(replica) for replica in replicas]
def leader(self, topic, partition=0):
""" Get the leader replica for the given topic and partition.
"""
self.logger.debug("Querying zookeeper to find leader replica for topic %s and partition %d" % (topic, partition))
zk_path = "/brokers/topics/%s/partitions/%d/state" % (topic, partition)
partition_state = self.zk.query(zk_path, chroot=self.zk_chroot)
if partition_state is None:
raise Exception("Error finding partition state for topic %s and partition %d." % (topic, partition))
partition_state = json.loads(partition_state)
self.logger.info(partition_state)
leader_idx = int(partition_state["leader"])
self.logger.info("Leader for topic %s and partition %d is now: %d" % (topic, partition, leader_idx))
return self.get_node(leader_idx)
def cluster_id(self):
""" Get the current cluster id
"""
self.logger.debug("Querying ZooKeeper to retrieve cluster id")
cluster = self.zk.query("/cluster/id", chroot=self.zk_chroot)
try:
return json.loads(cluster)['id'] if cluster else None
except:
self.logger.debug("Data in /cluster/id znode could not be parsed. Data = %s" % cluster)
raise
def list_consumer_groups(self, node=None, command_config=None):
""" Get list of consumer groups.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --list" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not line.startswith("SLF4J"):
output += line
self.logger.debug(output)
return output
def describe_consumer_group(self, group, node=None, command_config=None):
""" Describe a consumer group.
"""
if node is None:
node = self.nodes[0]
consumer_group_script = self.path.script("kafka-consumer-groups.sh", node)
if command_config is None:
command_config = ""
else:
command_config = "--command-config " + command_config
cmd = "%s --bootstrap-server %s %s --group %s --describe" % \
(consumer_group_script,
self.bootstrap_servers(self.security_protocol),
command_config, group)
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
if not (line.startswith("SLF4J") or line.startswith("TOPIC") or line.startswith("Could not fetch offset")):
output += line
self.logger.debug(output)
return output
def zk_connect_setting(self):
return self.zk.connect_setting(self.zk_chroot)
def bootstrap_servers(self, protocol='PLAINTEXT', validate=True, offline_nodes=[]):
"""Return comma-delimited list of brokers in this cluster formatted as HOSTNAME1:PORT1,HOSTNAME:PORT2,...
This is the format expected by many config files.
"""
port_mapping = self.port_mappings[protocol]
self.logger.info("Bootstrap client port is: " + str(port_mapping.number))
if validate and not port_mapping.open:
raise ValueError("We are retrieving bootstrap servers for the port: %s which is not currently open. - " % str(port_mapping))
return ','.join([node.account.hostname + ":" + str(port_mapping.number) for node in self.nodes if node not in offline_nodes])
def controller(self):
""" Get the controller node
"""
self.logger.debug("Querying zookeeper to find controller broker")
controller_info = self.zk.query("/controller", chroot=self.zk_chroot)
if controller_info is None:
raise Exception("Error finding controller info")
controller_info = json.loads(controller_info)
self.logger.debug(controller_info)
controller_idx = int(controller_info["brokerid"])
self.logger.info("Controller's ID: %d" % (controller_idx))
return self.get_node(controller_idx)
def is_registered(self, node):
"""
Check whether a broker is registered in Zookeeper
"""
self.logger.debug("Querying zookeeper to see if broker %s is registered", node)
broker_info = self.zk.query("/brokers/ids/%s" % self.idx(node), chroot=self.zk_chroot)
self.logger.debug("Broker info: %s", broker_info)
return broker_info is not None
def get_offset_shell(self, topic, partitions, max_wait_ms, offsets, time):
node = self.nodes[0]
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " kafka.tools.GetOffsetShell"
cmd += " --topic %s --broker-list %s --max-wait-ms %s --offsets %s --time %s" % (topic, self.bootstrap_servers(self.security_protocol), max_wait_ms, offsets, time)
if partitions:
cmd += ' --partitions %s' % partitions
cmd += " 2>> %s/get_offset_shell.log" % KafkaService.PERSISTENT_ROOT
cmd += " | tee -a %s/get_offset_shell.log &" % KafkaService.PERSISTENT_ROOT
output = ""
self.logger.debug(cmd)
for line in node.account.ssh_capture(cmd):
output += line
self.logger.debug(output)
return output
def java_class_name(self):
return "kafka.Kafka"
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import netaddr
from oslo_config import cfg
from oslo_utils import strutils
from neutron_lib._i18n import _
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from neutron_lib.placement import utils as pl_utils
from neutron_lib.utils import net as net_utils
def convert_to_boolean(data):
"""Convert a data value into a python bool.
:param data: The data value to convert to a python bool. This function
supports string types, bools, and ints for conversion of representation
to python bool.
:returns: The bool value of 'data' if it can be coerced.
:raises InvalidInput: If the value can't be coerced to a python bool.
"""
try:
return strutils.bool_from_string(data, strict=True)
except ValueError as e:
msg = _("'%s' cannot be converted to boolean") % data
raise n_exc.InvalidInput(error_message=msg) from e
def convert_to_boolean_if_not_none(data):
"""Uses convert_to_boolean() on the data if the data is not None.
:param data: The data value to convert.
:returns: The 'data' returned from convert_to_boolean() if 'data' is not
None. None is returned if data is None.
"""
if data is not None:
return convert_to_boolean(data)
def convert_to_int(data):
"""Convert a data value to a python int.
:param data: The data value to convert to a python int via python's
built-in int() constructor.
:returns: The int value of the data.
:raises InvalidInput: If the value can't be converted to an int.
"""
try:
return int(data)
except (ValueError, TypeError) as e:
msg = _("'%s' is not an integer") % data
raise n_exc.InvalidInput(error_message=msg) from e
def convert_to_int_if_not_none(data):
"""Uses convert_to_int() on the data if the data is not None.
:param data: The data value to convert.
:returns: The 'data' returned from convert_to_int() if 'data' is not None.
None is returned if data is None.
"""
if data is not None:
return convert_to_int(data)
return data
def convert_to_positive_float_or_none(val):
"""Converts a value to a python float if the value is positive.
:param val: The value to convert to a positive python float.
:returns: The value as a python float. If the val is None, None is
returned.
:raises ValueError, InvalidInput: A ValueError is raised if the 'val'
is a float, but is negative. InvalidInput is raised if 'val' can't be
converted to a python float.
"""
# NOTE(salv-orlando): This conversion function is currently used by
# a vendor specific extension only at the moment It is used for
# port's RXTX factor in neutron.plugins.vmware.extensions.qos.
# It is deemed however generic enough to be in this module as it
# might be used in future for other API attributes.
if val is None:
return
try:
val = float(val)
if val < 0:
raise ValueError()
except (ValueError, TypeError) as e:
msg = _("'%s' must be a non negative decimal") % val
raise n_exc.InvalidInput(error_message=msg) from e
return val
def convert_kvp_str_to_list(data):
"""Convert a value of the form 'key=value' to ['key', 'value'].
:param data: The string to parse for a key value pair.
:returns: A list where element 0 is the key and element 1 is the value.
:raises InvalidInput: If 'data' is not a key value string.
"""
kvp = [x.strip() for x in data.split('=', 1)]
if len(kvp) == 2 and kvp[0]:
return kvp
msg = _("'%s' is not of the form <key>=[value]") % data
raise n_exc.InvalidInput(error_message=msg)
def convert_kvp_list_to_dict(kvp_list):
"""Convert a list of 'key=value' strings to a dict.
:param kvp_list: A list of key value pair strings. For more info on the
format see; convert_kvp_str_to_list().
:returns: A dict who's key value pairs are populated by parsing 'kvp_list'.
:raises InvalidInput: If any of the key value strings are malformed.
"""
if kvp_list == ['True']:
# No values were provided (i.e. '--flag-name')
return {}
kvp_map = {}
for kvp_str in kvp_list:
key, value = convert_kvp_str_to_list(kvp_str)
kvp_map.setdefault(key, set())
kvp_map[key].add(value)
return dict((x, list(y)) for x, y in kvp_map.items())
def convert_none_to_empty_list(value):
"""Convert value to an empty list if it's None.
:param value: The value to convert.
:returns: An empty list of 'value' is None, otherwise 'value'.
"""
return [] if value is None else value
def convert_none_to_empty_dict(value):
"""Convert the value to an empty dict if it's None.
:param value: The value to convert.
:returns: An empty dict if 'value' is None, otherwise 'value'.
"""
return {} if value is None else value
def convert_none_to_empty_string(value):
"""Convert the value to an empty string if it's None.
:param value: The value to convert.
:returns: An empty string if 'value' is None, otherwise 'value'.
"""
return '' if value is None else value
def convert_to_list(data):
"""Convert a value into a list.
:param data: The value to convert.
:return: A new list wrapped around 'data' whereupon the list is empty
if 'data' is None.
"""
if data is None:
return []
elif hasattr(data, '__iter__') and not isinstance(data, str):
return list(data)
else:
return [data]
def convert_ip_to_canonical_format(value):
"""IP Address is validated and then converted to canonical format.
:param value: The IP Address which needs to be checked.
:returns: - None if 'value' is None,
- 'value' if 'value' is IPv4 address,
- 'value' if 'value' is not an IP Address
- canonical IPv6 address if 'value' is IPv6 address.
"""
try:
ip = netaddr.IPAddress(value)
if ip.version == constants.IP_VERSION_6:
return str(ip.format(dialect=netaddr.ipv6_compact))
except (netaddr.core.AddrFormatError, ValueError):
pass
return value
def convert_cidr_to_canonical_format(value):
"""CIDR is validated and converted to canonical format.
:param value: The CIDR which needs to be checked.
:returns: - 'value' if 'value' is CIDR with IPv4 address,
- CIDR with canonical IPv6 address if 'value' is IPv6 CIDR.
:raises: InvalidInput if 'value' is None, not a valid CIDR or
invalid IP Format.
"""
error_message = _("%s is not in a CIDR format") % value
try:
cidr = netaddr.IPNetwork(value)
return str(convert_ip_to_canonical_format(
cidr.ip)) + "/" + str(cidr.prefixlen)
except netaddr.core.AddrFormatError as e:
raise n_exc.InvalidInput(error_message=error_message) from e
def convert_string_to_case_insensitive(data):
"""Convert a string value into a lower case string.
This effectively makes the string case-insensitive.
:param data: The value to convert.
:return: The lower-cased string representation of the value, or None is
'data' is None.
:raises InvalidInput: If the value is not a string.
"""
try:
return data.lower()
except AttributeError as e:
error_message = _("Input value %s must be string type") % data
raise n_exc.InvalidInput(error_message=error_message) from e
def convert_to_protocol(data):
"""Validate that a specified IP protocol is valid.
For the authoritative list mapping protocol names to numbers, see the IANA:
http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
:param data: The value to verify is an IP protocol.
:returns: If data is an int between 0 and 255 or None, return that; if
data is a string then return it lower-cased if it matches one of the
allowed protocol names.
:raises exceptions.InvalidInput: If data is an int < 0, an
int > 255, or a string that does not match one of the allowed protocol
names.
"""
if data is None:
return
val = convert_string_to_case_insensitive(data)
if val in constants.IPTABLES_PROTOCOL_MAP:
return data
error_message = _("IP protocol '%s' is not supported. Only protocol "
"names and their integer representation (0 to "
"255) are supported") % data
try:
if 0 <= convert_to_int(data) <= 255:
return data
else:
raise n_exc.InvalidInput(error_message=error_message)
except n_exc.InvalidInput as e:
raise n_exc.InvalidInput(error_message=error_message) from e
def convert_to_string(data):
"""Convert a data value into a string.
:param data: The data value to convert to a string.
:returns: The string value of 'data' if data is not None
"""
if data is not None:
return str(data)
def convert_prefix_forced_case(data, prefix):
"""If <prefix> is a prefix of data, case insensitive, then force its case
This converter forces the case of a given prefix of a string.
Example, with prefix="Foo":
* 'foobar' converted into 'Foobar'
* 'fOozar' converted into 'Foozar'
* 'FOObaz' converted into 'Foobaz'
:param data: The data to convert
:returns: if data is a string starting with <prefix> in a case insensitive
comparison, then the return value is data with this prefix
replaced by <prefix>
"""
plen = len(prefix)
if (isinstance(data, str) and len(data) >= plen and
data[0:plen].lower() == prefix.lower()):
return prefix + data[plen:]
return data
def convert_uppercase_ip(data):
"""Uppercase "ip" if present at start of data case-insensitive
Can be used for instance to accept both "ipv4" and "IPv4".
:param data: The data to convert
:returns: if data is a string starting with "ip" case insensitive, then
the return value is data with the first two letter uppercased
"""
return convert_prefix_forced_case(data, "IP")
def convert_to_mac_if_none(data):
"""Convert to a random mac address if data is None
:param data: The data value
:return: Random mac address if data is None, else return data.
"""
if data is None:
return net_utils.get_random_mac(cfg.CONF.base_mac.split(':'))
return data
def convert_to_sanitized_mac_address(mac_address):
"""Return a MAC address with format xx:xx:xx:xx:xx:xx
:param mac_address: The MAC address value
:return: A string with the MAC address formatted. If the MAC address
provided is invalid, the same input value is returned; the goal
of this method is not to validate it.
"""
try:
return str(netaddr.EUI(mac_address, dialect=netaddr.mac_unix_expanded))
except netaddr.core.AddrFormatError:
return mac_address
def convert_to_sanitized_binding_profile_allocation(allocation, port_id,
min_bw_rules):
"""Return binding-profile.allocation in the new format
:param allocation: binding-profile.allocation attribute containting a
string with RP UUID
:param port_id: ID of the port that is being sanitized
:param min_bw_rules: A list of minimum bandwidth rules associated with the
port.
:return: A dict with allocation in {'<group_uuid>': '<rp_uuid>'} format.
"""
if isinstance(allocation, dict):
return allocation
group_id = str(
pl_utils.resource_request_group_uuid(uuid.UUID(port_id), min_bw_rules))
return {group_id: allocation}
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Float, ForeignKey
from sqlalchemy import Index, Integer, MetaData, String, Table, Text
from sqlalchemy import dialects
from sqlalchemy.types import NullType
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
# NOTE(dprince): We skip these columns for now.
# These get cleaned up in 184_fix_159_migration_sync_shadow_table
_SHADOW_SKIPS = [
('instances', 'access_ip_v4'),
('instances', 'access_ip_v6'),
('networks', 'gateway'),
('networks', 'gateway_v6'),
('networks', 'netmask'),
('networks', 'netmask_v6'),
('networks', 'broadcast'),
('networks', 'dns1'),
('networks', 'dns2'),
('networks', 'vpn_public_address'),
('networks', 'vpn_private_address'),
('networks', 'dhcp_start'),
('fixed_ips', 'address'),
('floating_ips', 'address'),
('console_pools', 'address')]
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = meta.tables.keys()
meta.bind = migrate_engine
for table_name in table_names:
if table_name == 'security_group_default_rules':
#NOTE(dprince): Skip for now. This is fixed in
# 183_fix_157_migration_sync_shadow_table
continue
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
elif (table_name, column.name) in _SHADOW_SKIPS:
column_copy = Column(column.name, Inet())
elif (table_name, column.name) == ('cells', 'deleted'):
#NOTE(dprince): Skip for now. This is fixed in
# 181_fix_179_migration_sync_shadow_table
column_copy = Column(column.name, Boolean())
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1),
'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2),
'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3),
'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4),
'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5)
}
try:
i = instance_types_table.insert()
for name, values in default_inst_types.iteritems():
i.execute({'name': name, 'memory_mb': values["mem"],
'vcpus': values["vcpus"], 'deleted': 0,
'root_gb': values["root_gb"],
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
'flavorid': values["flavid"],
'disabled': False,
'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
LOG.exception(_('Exception while seeding instance_types table'))
raise
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=False),
Column('delete_on_termination', Boolean),
Column('virtual_name', String(length=255)),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('rpc_host', String(length=255)),
Column('rpc_port', Integer),
Column('rpc_virtual_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column('instance_uuid', String(length=36),
ForeignKey('instances.uuid',
name='consoles_instance_uuid_fkey')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False,
unique=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_backend_config = Table('sm_backend_config', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('flavor_id', Integer, ForeignKey('sm_flavors.id'),
nullable=False),
Column('sr_uuid', String(length=255)),
Column('sr_type', String(length=255)),
Column('config_params', String(length=2047)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_flavors = Table('sm_flavors', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('label', String(length=255)),
Column('description', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
sm_volume = Table('sm_volume', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(length=36), primary_key=True,
nullable=False, autoincrement=False),
Column('backend_id', Integer, nullable=False),
Column('vdi_uuid', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255), unique=True),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_storage_arrays = Table('virtual_storage_arrays', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('project_id', String(length=255)),
Column('availability_zone', String(length=255)),
Column('instance_type_id', Integer, nullable=False),
Column('image_ref', String(length=255)),
Column('vc_count', Integer, nullable=False),
Column('vol_count', Integer, nullable=False),
Column('status', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_metadata = Table('volume_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_type_extra_specs = Table('volume_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_type_id', Integer, ForeignKey('volume_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_types = Table('volume_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
if migrate_engine.name == 'sqlite':
# NOTE(dprince): We should remove this conditional when we compress 201
# which also adds an index on instances.uuid
# NOTE(dprince): This is a bit of a hack to avoid changing migration
# 201... but also keep test_archive_deleted_rows_fk_constraint
# passing. Not having this cause fkey mismatch errors. We
# name it differently to avoid causing failures in 201.
# See comments here: https://review.openstack.org/#/c/54172/5
Index('uuid2', instances.c.uuid, unique=True).create(migrate_engine)
else:
Index('project_id', instances.c.project_id).create(migrate_engine)
Index('uuid', instances.c.uuid, unique=True).create(migrate_engine)
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, sm_flavors, sm_backend_config,
snapshots, volume_types,
volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
iscsi_targets, key_pairs, migrations, networks,
provider_fw_rules, quota_classes, quota_usages, quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, sm_volume, snapshot_id_mappings, task_log,
virtual_interfaces,
virtual_storage_arrays, volume_id_mappings, volume_metadata,
volume_type_extra_specs, volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception(_('Exception while creating table.'))
raise
# task log unique constraint
task_log_uc = "uniq_task_name_x_host_x_period_beginning_x_period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_vlan_x_deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_flavorid_x_deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_name_x_deleted').create()
# keypair contraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='key_pairs_uniq_name_and_user_id').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_id_x_project_id_x_deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_address_x_deleted').create()
indexes = [
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.virtual_name,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('networks_host_idx', networks.c.host),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
]
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
# TODO(dprince): review these for removal. Some of these indexes
# were automatically created by SQLAlchemy migrate and *may* no longer
# be in use
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('backend_id', sm_volume.c.backend_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.created_at),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('volume_metadata_volume_id_fkey', volume_metadata.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
Index('instances_host_deleted_idx', instances.c.host),
Index('networks_bridge_deleted_idx', networks.c.bridge),
]
# created later (to preserve ordering for schema diffs)
mysql_post_indexes = [
Index('instances_uuid_deleted_idx', instances.c.uuid),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.instance_uuid, migrations.c.status),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id),
Index('certificates_user_id_deleted_idx', certificates.c.user_id),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.allocated),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.fixed_ip_id,
floating_ips.c.project_id),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id),
Index('networks_project_id_deleted_idx', networks.c.project_id),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id),
Index('networks_vlan_deleted_idx', networks.c.vlan),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
]
# Common indexes (indexes we apply to all databases)
common_indexes = [
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host)
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at).create()
Index('networks_cidr_v6_idx', networks.c.cidr_v6).create()
# MySQL/PostgreSQL indexes
if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql':
for index in indexes:
index.create(migrate_engine)
for index in common_indexes:
index.create(migrate_engine)
# NOTE(dprince): We should remove this conditional when we compress 201
# which also adds an index on instances.uuid
# See comments here: https://review.openstack.org/#/c/54172/5
if migrate_engine.name == 'sqlite':
Index('project_id', dns_domains.c.project_id).drop
# special case for migrations_by_host_nodes_and_status_idx index
if migrate_engine.name == "mysql":
for index in mysql_post_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
fkeys = [
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
'security_group_instance_association_instance_uuid_fkey'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[sm_volume.c.backend_id],
[sm_backend_config.c.id],
'sm_volume_ibfk_2'],
[[sm_volume.c.id],
[volumes.c.id],
'sm_volume_id_fkey'],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[volume_metadata.c.volume_id],
[volumes.c.id],
'volume_metadata_volume_id_fkey'],
]
for fkey_pair in fkeys:
if migrate_engine.name == 'mysql':
# For MySQL we name our fkeys explicitly so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == "mysql":
# In Folsom we explicitly converted migrate_version to UTF8.
sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
# Set default DB charset to UTF8.
sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \
migrate_engine.url.database
migrate_engine.execute(sql)
_create_shadow_tables(migrate_engine)
# populate initial instance types
_populate_instance_types(instance_types)
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from Havana is unsupported.')
| |
"""
HIPE functions.
"""
def mkLinemask(obs, freqRanges=None, lineFreqs=None, vexp=15, usevexp=True, \
lineWidths=None, vlsr=0):
"""
mkLinemask(obs, freqRanges=None, lineFreqs=None, vexp=15, usevexp=True, \
lineWidths=None, vlsr=0)
Make Linemask table for HIFI pointed-observation spectra by specifying the
frequency ranges for masking. Alternatively, users can provide the line
frequencies and the evenlope expansion velocity, or the corresponding
line widths for calculating the frequency ranges.
If the Linemask created by this function is to used with fitBaseline(),
remember to set domask=0 and doreuse=False for the latter, i.e.,
fitBaseline(..., domask=0, doreuse=False, maskTable=Linemask).
It is alright to set domask=2 if one would like HIPE to determine
automatically whether additional masks are needed. However, it is found
that domask=1 may nullify some of the masks in Linemask. Reason unknown.
PARAMETERS
obs: HIPE ObservationContext
freqRanges: list
List of tuples (or lists) containing the frequency ranges for masking,
i.e., [(freq_start1, freq_end1), (freq_start2, freq_end2), ... ].
If this parameter is given, the other parameters below are not used.
The default value is None.
lineFreqs: list, array, or sequence-like of double
The frequencies (GHz) of all the lines. The default value is None.
vexp: double
The envelope expansion velocity (km/s) of the object. If usevexp=True,
this velocity will be used to calculate the line widths, and lineWidths
will be neglected. If usevexp=False, then this parameter is neglected.
The default value is 15 (km/s).
usevexp: boolean
If usevexp=True (default), then the line widths of the given lines in
lineFreqs will be caculated with vexp.
lineWidths: list or sequence-like of double
The full-width-at-zero-intensity (GHz) of all the lines can be provided
manually with a list in 1-to-1 correspondance to lineFreqs. Note that
this parameter is neglected if usevexp=True (default).
vlsr: double
The local-standard-of-rest velocity (km/s) of the object. The default
value is 0 (km/s).
RETURN
Tuple (Linemask, freq_pairs)
Linemask: TableDataset
The Linemask table
freq_pairs: Double1d array
Array containing pairs of frequencies defining the mask ranges.
If freqRanges is used, then this is equivalent to the output of
numpy.ravel(freqRanges) in Python.
"""
## Create an empty table
Linemask = TableDataset(description="Line masks created by mkLinemask().")
## Create all the empty columns in the table
col_list = ["freq_1", "freq_2", "weight", "origin", "peak", "median", \
"dataset", "scan"]
for col_name in col_list:
Linemask[col_name] = Column(Double1d())
## Create meta data
Linemask.meta["HifiTimelineProduct"] = StringParameter()
Linemask.meta["dataset"] = DoubleParameter()
Linemask.meta["scan"] = DoubleParameter()
Linemask.meta["subband"] = DoubleParameter()
## Define an array that will carry the freq1, freq2 pairs
freq_pairs = Double1d()
## Create the mask parameters
if freqRanges is not None:
l = freqRanges
else:
l = lineFreqs
for i in range(len(l)):
## If freqRanges is given, use it
if freqRanges is not None:
freq1a = i[0]
freq2a = i[1]
## Else, use lineFreqs
else:
## Adjust doppler shift for each frequency
c = 299792.458
freq_vlsr = (1 - vlsr / c) * lineFreqs[i]
## Calculate the starting and ending frequencies of each mask
if usevexp:
## Use vexp to calculate line widths
freq_1a = (1 - (vlsr + vexp) / c) * lineFreqs[i]
freq_2a = (1 - (vlsr - vexp) / c) * lineFreqs[i]
else:
## Use the lineWidths list
freq_1a = freq_vlsr - lineWidths[i] / 2
freq_2a = freq_vlsr + lineWidths[i] / 2
## Create another set of frequenices for the other side band
loFreq = obs.meta["loFreqAvg"].double
if freq_1a != loFreq:
freq_1b = 2 * loFreq - freq_1a
if freq_2a != loFreq:
freq_2b = 2 * loFreq - freq_2a
## Append freq_pairs list
for freq in [freq_1a, freq_2a, freq_2b, freq_1b]:
freq_pairs.append(freq)
## Add rows of mask parameters to the table
Linemask.addRow([freq_1a, freq_2a, 0.0, 2.0, 0.0, 0.0, 1.0, 0.0])
Linemask.addRow([freq_2b, freq_1b, 0.0, 2.0, 0.0, 0.0, 1.0, 0.0])
## Return a tuble containing the Linemask table and freq_pairs array
return (Linemask, freq_pairs)
def stat(sp, excludePairs=None, **kwargs):
"""
rms(sp, excludePairs=None, **kwargs)
Return three arrays in a list containing the rms, mean, and median values
of different segments of the given spectral dataset. The values are
computed by the build-in statistics() function. The definition of segments
can be changed depending on the value of the "mode" parameter of
statistics().
PARAMETERS
sp: HIPE SpectrumContainer
excludePairs: list, array, or sequence-like of Double
Pairs of starting and ending sky frequencies defining the range to be
excluded from rms computation. The freq_pairs output from mkLinemask()
can be put here. The format is [freq1a, freq1b, freq2a, freq2b, ...].
Either this parameter or "exclude" in statistics() should be used.
The default value is None.
**kwargs: dict
Additional keyword arguments are passsed to statistics().
RETURN
list [rms_array, mean_array, median_array]
rms_array: Double1d array
Array containing the rms values of the segments.
mean_array: Double1d array
Array containing the mean values of the segments.
median_array: Double1d array
Array containing the median values of the segments.
"""
if excludePairs is not None:
## Change sky_freq_pairs into list-of-tuples form
freq_tuples_list = []
for i in range(0, len(excludePairs), 2):
freq_tuples_list.append((excludePairs[i], excludePairs[i+1]))
## Compute spectrum stat with the "exclude" parameter
stats = statistics(ds=sp, exclude=freq_tuples_list, **kwargs)
else:
## Compute spectrum stat
stats = statistics(ds=sp, **kwargs)
## Store the stat values in a list
rms_list = \
[stats[col].data[0] for col in stats.columnNames if 'rms_' in col]
mean_list = \
[stats[col].data[0] for col in stats.columnNames if 'mean_' in col]
median_list = \
[stats[col].data[0] for col in stats.columnNames if 'median_' in col]
## Return a list of arrays containing the stat values
rms_array = Double1d(rms_list)
mean_array = Double1d(mean_list)
median_array = Double1d(median_list)
return [rms_array, mean_array, median_array]
def unionIntervals(intervals):
"""
unionIntervals(intervals)
Return a list of union interval(s) of the input intervals, e.g.,
given [[1,2], [4,6], [5,8]] will result in [[1,2], [4,8]].
PARAMETERS
intervals: list or sequence-like
list of lists/tuples defining the intervals, e.g., [[0,1], [5,8], ...]
RETURN
union_intervals: list
list of list(s) defining the union interval(s)
"""
union_intervals = []
for interval in sorted(intervals):
interval = list(interval)
if union_intervals and union_intervals[-1][1] >= interval[0]:
union_intervals[-1][1] = max(union_intervals[-1][1], interval[1])
else:
union_intervals.append(interval)
return union_intervals
def findMaskRanges(obs, backend, channelNum=100, sigmaNum=1.5, excludeNum=5, \
widthFactor=2):
"""
findMaskRanges(obs, backend, channelNum=100, sigmaNum=1.5, excludeNum=5, \
widthFactor=2)
Simple semi-automatic alogrithm for line and line-like feature detection.
Return the frequency ranges for making Linemasks.
PARAMETERS
obs: HIPE ObservationContext
backend: str
Must be one of the following: 'WBS-H-LSB', 'WBS-H-USB', 'WBS-V-LSB',
'WBS-V-USB', 'HRS-H-LSB', 'HRS-H-USB', 'HRS-V-LSB', 'HRS-V-USB'.
channelNum: int
Number of channels to be included in one "channel group" when
considering possible detections. The default value is 100.
sigmaNum: double
If the (mean) flux of a channel group is sigmaNum larger or smaller
than the mean flux of all the channel groups, this group is labelled as
line containing. The default value is 1.5.
excludeNum: int
The excludeNum channels with the largest flux and smallest flux will be
excluded from the calculation of the mean channel group flux. The Default
value is 5.
widthFactor: double
The factor to be multiplied to the "line width" predicted from the
channel groups. Note that the original predicted value may not reflect
the real line width, it is better to be more conservative to make it
wider especially when making Linemasks. The default value is 2.
RETURN
freqRanges: list
List containing tuple(s) of the two frequencies defining a range where
a mask should be applied, in the format of
[(freq_start1, freq_end1), (freq_start2, freq_end2), ... ].
This parameter can be directly input to mkLinemask.
"""
## Read Level 2.5 spectrum from obs
sp_25 = obs.refs["level2_5"].product.refs["spectrum"].product.\
refs["spectrum_"+backend].product["dataset"].copy()
freq_array = sp0_25["wave"].data
flux_array = sp0_25["flux"].data
## Compute the average flux in each divided group of channels
## The last group will include also the remainding channels (if any), i.e.,
## more channels than the other groups
flux_mean_array = Double1d()
n = channelNum
start_ch_list = range(0, len(flux_array), n)[:-1]
for i in start_ch_list:
if i != start_ch_list[-1]:
if not IS_NAN(MEAN(flux_array[i:i+n])):
flux_mean_array.append(MEAN(flux_array[i:i+n]))
else:
flux_mean_array.append(0)
else:
if not IS_NAN(MEAN(flux_array[i:])):
flux_mean_array.append(MEAN(flux_array[i:]))
else:
flux_mean_array.append(0)
## Compute mean and std of flux_mean_array after removing excludeNum (int)
## max and min values (respectively), then compare all the values of the
## orginal flux_mean_array to find the indices of the values deviate from
## mean by sigma*sigmaNum or larger
flux_mean_list = list(flux_mean_array)
for i in range(excludeNum):
flux_mean_list.remove(max(flux_mean_list))
flux_mean_list.remove(min(flux_mean_list))
mean = MEAN(flux_mean_list)
sigma = STDDEV(flux_mean_list)
deviate_index_pair_list = [[list(flux_mean_array).index(f)*n, \
list(flux_mean_array).index(f)*n + n] \
for f in flux_mean_array \
if f > (mean + sigma*sigmaNum) \
or f < (mean - sigma*sigmaNum)]
## Get the corresponding frequency ranges after applying the width factor
freqRanges = []
for index_pair in deviate_index_pair_list:
half_width = abs(freq_array[index_pair[1]]-freq_array[index_pair[0]])/2
cent_freq = (freq_array[index_pair[0]]+freq_array[index_pair[1]])/2
start_freq = cent_freq - half_width * widthFactor
end_freq = cent_freq + half_width * widthFactor
freqRanges.append((start_freq, end_freq))
## Find the union ranges
freqRanges = unionIntervals(freqRanges)
## Return freqRanges
return freqRanges
| |
__author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import argmax, array
from random import sample, choice, shuffle
from pybrain.utilities import fListToString, Named
class Coevolution(Named):
""" Population-based generational evolutionary algorithm
with fitness being based (paritally) on a relative measure. """
# algorithm parameters
populationSize = 50
selectionProportion = 0.5
elitism = False
parentChildAverage = 1. # proportion of the child
tournamentSize = None
hallOfFameEvaluation = 0. # proportion of HoF evaluations in relative fitness
useSharedSampling = False
# an external absolute evaluator
absEvaluator = None
absEvalProportion = 0
# execution settings
maxGenerations = None
maxEvaluations = None
verbose = False
def __init__(self, relEvaluator, seeds, **args):
"""
:arg relevaluator: an anti-symmetric function that can evaluate 2 elements
:arg seeds: a list of initial guesses
"""
# set parameters
self.setArgs(**args)
self.relEvaluator = relEvaluator
if self.tournamentSize == None:
self.tournamentSize = self.populationSize
# initialize algorithm variables
self.steps = 0
self.generation = 0
# the best host and the best parasite from each generation
self.hallOfFame = []
# the relative fitnesses from each generation (of the selected individuals)
self.hallOfFitnesses = []
# this dictionary stores all the results between 2 players (first one starting):
# { (player1, player2): [games won, total games, cumulative score, list of scores] }
self.allResults = {}
# this dictionary stores the opponents a player has played against.
self.allOpponents = {}
# a list of all previous populations
self.oldPops = []
# build initial populations
self._initPopulation(seeds)
def learn(self, maxSteps=None):
""" Toplevel function, can be called iteratively.
:return: best evaluable found in the last generation. """
if maxSteps != None:
maxSteps += self.steps
while True:
if maxSteps != None and self.steps + self._stepsPerGeneration() > maxSteps:
break
if self.maxEvaluations != None and self.steps + self._stepsPerGeneration() > self.maxEvaluations:
break
if self.maxGenerations != None and self.generation >= self.maxGenerations:
break
self._oneGeneration()
return self.hallOfFame[-1]
def _oneGeneration(self):
self.oldPops.append(self.pop)
self.generation += 1
fitnesses = self._evaluatePopulation()
# store best in hall of fame
besti = argmax(array(fitnesses))
best = self.pop[besti]
bestFits = sorted(fitnesses)[::-1][:self._numSelected()]
self.hallOfFame.append(best)
self.hallOfFitnesses.append(bestFits)
if self.verbose:
print 'Generation', self.generation
print ' relat. fits:', fListToString(sorted(fitnesses), 4)
if len(best.params) < 20:
print ' best params:', fListToString(best.params, 4)
self.pop = self._selectAndReproduce(self.pop, fitnesses)
def _averageWithParents(self, pop, childportion):
for i, p in enumerate(pop[:]):
if p.parent != None:
tmp = p.copy()
tmp.parent = p.parent
tmp._setParameters(p.params * childportion + p.parent.params * (1 - childportion))
pop[i] = tmp
def _evaluatePopulation(self):
hoFtournSize = min(self.generation, int(self.tournamentSize * self.hallOfFameEvaluation))
tournSize = self.tournamentSize - hoFtournSize
if self.useSharedSampling:
opponents = self._sharedSampling(tournSize, self.pop, self.oldPops[-1])
else:
opponents = self.pop
if len(opponents) < tournSize:
tournSize = len(opponents)
self._doTournament(self.pop, opponents, tournSize)
if hoFtournSize > 0:
hoF = list(set(self.hallOfFame))
self._doTournament(self.pop, hoF, hoFtournSize)
fitnesses = []
for p in self.pop:
fit = 0
for opp in opponents:
fit += self._beats(p, opp)
if hoFtournSize > 0:
for opp in hoF:
fit += self._beats(p, opp)
if self.absEvalProportion > 0 and self.absEvaluator != None:
fit = (1 - self.absEvalProportion) * fit + self.absEvalProportion * self.absEvaluator(p)
fitnesses.append(fit)
return fitnesses
def _initPopulation(self, seeds):
if self.parentChildAverage < 1:
for s in seeds:
s.parent = None
self.pop = self._extendPopulation(seeds, self.populationSize)
def _extendPopulation(self, seeds, size):
""" build a population, with mutated copies from the provided
seed pool until it has the desired size. """
res = seeds[:]
for dummy in range(size - len(seeds)):
chosen = choice(seeds)
tmp = chosen.copy()
tmp.mutate()
if self.parentChildAverage < 1:
tmp.parent = chosen
res.append(tmp)
return res
def _selectAndReproduce(self, pop, fits):
""" apply selection and reproduction to host population, according to their fitness."""
# combine population with their fitness, then sort, only by fitness
s = zip(fits, pop)
shuffle(s)
s.sort(key=lambda x:-x[0])
# select...
selected = map(lambda x: x[1], s[:self._numSelected()])
# ... and reproduce
if self.elitism:
newpop = self._extendPopulation(selected, self.populationSize)
if self.parentChildAverage < 1:
self._averageWithParents(newpop, self.parentChildAverage)
else:
newpop = self._extendPopulation(selected, self.populationSize
+ self._numSelected()) [self._numSelected():]
if self.parentChildAverage < 1:
self._averageWithParents(newpop[self._numSelected():], self.parentChildAverage)
return newpop
def _beats(self, h, p):
""" determine the empirically observed score of p playing opp (starting or not).
If they never played, assume 0. """
if (h, p) not in self.allResults:
return 0
else:
hpgames, hscore = self.allResults[(h, p)][1:3]
phgames, pscore = self.allResults[(p, h)][1:3]
return (hscore - pscore) / float(hpgames + phgames)
def _doTournament(self, pop1, pop2, tournamentSize=None):
""" Play a tournament.
:key tournamentSize: If unspecified, play all-against-all
"""
# TODO: Preferably select high-performing opponents?
for p in pop1:
pop3 = pop2[:]
while p in pop3:
pop3.remove(p)
if tournamentSize != None and tournamentSize < len(pop3):
opps = sample(pop3, tournamentSize)
else:
opps = pop3
for opp in opps:
self._relEval(p, opp)
self._relEval(opp, p)
def _globalScore(self, p):
""" The average score over all evaluations for a player. """
if p not in self.allOpponents:
return 0.
scoresum, played = 0., 0
for opp in self.allOpponents[p]:
scoresum += self.allResults[(p, opp)][2]
played += self.allResults[(p, opp)][1]
scoresum -= self.allResults[(opp, p)][2]
played += self.allResults[(opp, p)][1]
# slightly bias the global score in favor of players with more games (just for tie-breaking)
played += 0.01
return scoresum / played
def _sharedSampling(self, numSelect, selectFrom, relativeTo):
""" Build a shared sampling set of opponents """
if numSelect < 1:
return []
# determine the player of selectFrom with the most wins against players from relativeTo (and which ones)
tmp = {}
for p in selectFrom:
beaten = []
for opp in relativeTo:
if self._beats(p, opp) > 0:
beaten.append(opp)
tmp[p] = beaten
beatlist = map(lambda (p, beaten): (len(beaten), self._globalScore(p), p), tmp.items())
shuffle(beatlist)
beatlist.sort(key=lambda x: x[:2])
best = beatlist[-1][2]
unBeaten = list(set(relativeTo).difference(tmp[best]))
otherSelect = selectFrom[:]
otherSelect.remove(best)
return [best] + self._sharedSampling(numSelect - 1, otherSelect, unBeaten)
def _relEval(self, p, opp):
""" a single relative evaluation (in one direction) with the involved bookkeeping."""
if p not in self.allOpponents:
self.allOpponents[p] = []
self.allOpponents[p].append(opp)
if (p, opp) not in self.allResults:
self.allResults[(p, opp)] = [0, 0, 0., []]
res = self.relEvaluator(p, opp)
if res > 0:
self.allResults[(p, opp)][0] += 1
self.allResults[(p, opp)][1] += 1
self.allResults[(p, opp)][2] += res
self.allResults[(p, opp)][3].append(res)
self.steps += 1
def __str__(self):
s = 'Coevolution ('
s += str(self._numSelected())
if self.elitism:
s += '+' + str(self.populationSize - self._numSelected())
else:
s += ',' + str(self.populationSize)
s += ')'
if self.parentChildAverage < 1:
s += ' p_c_avg=' + str(self.parentChildAverage)
return s
def _numSelected(self):
return int(self.populationSize * self.selectionProportion)
def _stepsPerGeneration(self):
res = self.populationSize * self.tournamentSize * 2
return res
if __name__ == '__main__':
# TODO: convert to unittest
x = Coevolution(None, [None], populationSize=1)
x.allResults[(1, 2)] = [1, 1, 1, []]
x.allResults[(2, 1)] = [-1, 1, -1, []]
x.allResults[(2, 5)] = [1, 1, 2, []]
x.allResults[(5, 2)] = [-1, 1, -1, []]
x.allResults[(2, 3)] = [1, 1, 3, []]
x.allResults[(3, 2)] = [-1, 1, -1, []]
x.allResults[(4, 3)] = [1, 1, 4, []]
x.allResults[(3, 4)] = [-1, 1, -1, []]
x.allOpponents[1] = [2]
x.allOpponents[2] = [1, 5]
x.allOpponents[3] = [2, 4]
x.allOpponents[4] = [3]
x.allOpponents[5] = [2]
print x._sharedSampling(4, [1, 2, 3, 4, 5], [1, 2, 3, 4, 6, 7, 8, 9])
print 'should be', [4, 1, 2, 5]
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generic utils."""
import codecs
import errno
import logging
import os
import Queue
import re
import stat
import sys
import tempfile
import threading
import time
import urlparse
import subprocess2
class Error(Exception):
"""gclient exception class."""
pass
def SplitUrlRevision(url):
"""Splits url and returns a two-tuple: url, rev"""
if url.startswith('ssh:'):
# Make sure ssh://user-name@example.com/~/test.git@stable works
regex = r'(ssh://(?:[-\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
components = re.search(regex, url).groups()
else:
components = url.split('@', 1)
if len(components) == 1:
components += [None]
return tuple(components)
def IsDateRevision(revision):
"""Returns true if the given revision is of the form "{ ... }"."""
return bool(revision and re.match(r'^\{.+\}$', str(revision)))
def MakeDateRevision(date):
"""Returns a revision representing the latest revision before the given
date."""
return "{" + date + "}"
def SyntaxErrorToError(filename, e):
"""Raises a gclient_utils.Error exception with the human readable message"""
try:
# Try to construct a human readable error message
if filename:
error_message = 'There is a syntax error in %s\n' % filename
else:
error_message = 'There is a syntax error\n'
error_message += 'Line #%s, character %s: "%s"' % (
e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
except:
# Something went wrong, re-raise the original exception
raise e
else:
raise Error(error_message)
class PrintableObject(object):
def __str__(self):
output = ''
for i in dir(self):
if i.startswith('__'):
continue
output += '%s = %s\n' % (i, str(getattr(self, i, '')))
return output
def FileRead(filename, mode='rU'):
with open(filename, mode=mode) as f:
# codecs.open() has different behavior than open() on python 2.6 so use
# open() and decode manually.
s = f.read()
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s
def FileWrite(filename, content, mode='w'):
with codecs.open(filename, mode=mode, encoding='utf-8') as f:
f.write(content)
def rmtree(path):
"""shutil.rmtree() on steroids.
Recursively removes a directory, even if it's marked read-only.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
On POSIX systems, things are a little bit simpler. The modes of the files
to be deleted doesn't matter, only the modes of the directories containing
them are significant. As the directory tree is traversed, each directory
has its mode set appropriately before descending into it. This should
result in the entire tree being removed, with the possible exception of
*path itself, because nothing attempts to change the mode of its parent.
Doing so would be hazardous, as it's not a directory slated for removal.
In the ordinary case, this is not a problem: for our purposes, the user
will never lack write permission on *path's parent.
"""
if not os.path.exists(path):
return
if os.path.islink(path) or not os.path.isdir(path):
raise Error('Called rmtree(%s) in non-directory' % path)
if sys.platform == 'win32':
# Some people don't have the APIs installed. In that case we'll do without.
win32api = None
win32con = None
try:
# Unable to import 'XX'
# pylint: disable=F0401
import win32api, win32con
except ImportError:
pass
else:
# On POSIX systems, we need the x-bit set on the directory to access it,
# the r-bit to see its contents, and the w-bit to remove files from it.
# The actual modes of the files within the directory is irrelevant.
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
def remove(func, subpath):
if sys.platform == 'win32':
os.chmod(subpath, stat.S_IWRITE)
if win32api and win32con:
win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
try:
func(subpath)
except OSError, e:
if e.errno != errno.EACCES or sys.platform != 'win32':
raise
# Failed to delete, try again after a 100ms sleep.
time.sleep(0.1)
func(subpath)
for fn in os.listdir(path):
# If fullpath is a symbolic link that points to a directory, isdir will
# be True, but we don't want to descend into that as a directory, we just
# want to remove the link. Check islink and treat links as ordinary files
# would be treated regardless of what they reference.
fullpath = os.path.join(path, fn)
if os.path.islink(fullpath) or not os.path.isdir(fullpath):
remove(os.remove, fullpath)
else:
# Recurse.
rmtree(fullpath)
remove(os.rmdir, path)
# TODO(maruel): Rename the references.
RemoveDirectory = rmtree
def safe_makedirs(tree):
"""Creates the directory in a safe manner.
Because multiple threads can create these directories concurently, trap the
exception and pass on.
"""
count = 0
while not os.path.exists(tree):
count += 1
try:
os.makedirs(tree)
except OSError, e:
# 17 POSIX, 183 Windows
if e.errno not in (17, 183):
raise
if count > 40:
# Give up.
raise
def CheckCallAndFilterAndHeader(args, always=False, header=None, **kwargs):
"""Adds 'header' support to CheckCallAndFilter.
If |always| is True, a message indicating what is being done
is printed to stdout all the time even if not output is generated. Otherwise
the message header is printed only if the call generated any ouput.
"""
stdout = kwargs.setdefault('stdout', sys.stdout)
if header is None:
header = "\n________ running '%s' in '%s'\n" % (
' '.join(args), kwargs.get('cwd', '.'))
if always:
stdout.write(header)
else:
filter_fn = kwargs.get('filter_fn')
def filter_msg(line):
if line is None:
stdout.write(header)
elif filter_fn:
filter_fn(line)
kwargs['filter_fn'] = filter_msg
kwargs['call_filter_on_first_line'] = True
# Obviously.
kwargs.setdefault('print_stdout', True)
return CheckCallAndFilter(args, **kwargs)
class Wrapper(object):
"""Wraps an object, acting as a transparent proxy for all properties by
default.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
def __getattr__(self, name):
return getattr(self._wrapped, name)
class AutoFlush(Wrapper):
"""Creates a file object clone to automatically flush after N seconds."""
def __init__(self, wrapped, delay):
super(AutoFlush, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__last_flushed_at = time.time()
self.delay = delay
@property
def autoflush(self):
return self
def write(self, out, *args, **kwargs):
self._wrapped.write(out, *args, **kwargs)
should_flush = False
self.lock.acquire()
try:
if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
should_flush = True
self.__last_flushed_at = time.time()
finally:
self.lock.release()
if should_flush:
self.flush()
class Annotated(Wrapper):
"""Creates a file object clone to automatically prepends every line in worker
threads with a NN> prefix.
"""
def __init__(self, wrapped, include_zero=False):
super(Annotated, self).__init__(wrapped)
if not hasattr(self, 'lock'):
self.lock = threading.Lock()
self.__output_buffers = {}
self.__include_zero = include_zero
@property
def annotated(self):
return self
def write(self, out):
index = getattr(threading.currentThread(), 'index', 0)
if not index and not self.__include_zero:
# Unindexed threads aren't buffered.
return self._wrapped.write(out)
self.lock.acquire()
try:
# Use a dummy array to hold the string so the code can be lockless.
# Strings are immutable, requiring to keep a lock for the whole dictionary
# otherwise. Using an array is faster than using a dummy object.
if not index in self.__output_buffers:
obj = self.__output_buffers[index] = ['']
else:
obj = self.__output_buffers[index]
finally:
self.lock.release()
# Continue lockless.
obj[0] += out
while '\n' in obj[0]:
line, remaining = obj[0].split('\n', 1)
if line:
self._wrapped.write('%d>%s\n' % (index, line))
obj[0] = remaining
def flush(self):
"""Flush buffered output."""
orphans = []
self.lock.acquire()
try:
# Detect threads no longer existing.
indexes = (getattr(t, 'index', None) for t in threading.enumerate())
indexes = filter(None, indexes)
for index in self.__output_buffers:
if not index in indexes:
orphans.append((index, self.__output_buffers[index][0]))
for orphan in orphans:
del self.__output_buffers[orphan[0]]
finally:
self.lock.release()
# Don't keep the lock while writting. Will append \n when it shouldn't.
for orphan in orphans:
if orphan[1]:
self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
return self._wrapped.flush()
def MakeFileAutoFlush(fileobj, delay=10):
autoflush = getattr(fileobj, 'autoflush', None)
if autoflush:
autoflush.delay = delay
return fileobj
return AutoFlush(fileobj, delay)
def MakeFileAnnotated(fileobj, include_zero=False):
if getattr(fileobj, 'annotated', None):
return fileobj
return Annotated(fileobj)
def CheckCallAndFilter(args, stdout=None, filter_fn=None,
print_stdout=None, call_filter_on_first_line=False,
**kwargs):
"""Runs a command and calls back a filter function if needed.
Accepts all subprocess2.Popen() parameters plus:
print_stdout: If True, the command's stdout is forwarded to stdout.
filter_fn: A function taking a single string argument called with each line
of the subprocess2's output. Each line has the trailing newline
character trimmed.
stdout: Can be any bufferable output.
stderr is always redirected to stdout.
"""
assert print_stdout or filter_fn
stdout = stdout or sys.stdout
filter_fn = filter_fn or (lambda x: None)
kid = subprocess2.Popen(
args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
**kwargs)
# Do a flush of stdout before we begin reading from the subprocess2's stdout
stdout.flush()
# Also, we need to forward stdout to prevent weird re-ordering of output.
# This has to be done on a per byte basis to make sure it is not buffered:
# normally buffering is done for each line, but if svn requests input, no
# end-of-line character is output after the prompt and it would not show up.
try:
in_byte = kid.stdout.read(1)
if in_byte:
if call_filter_on_first_line:
filter_fn(None)
in_line = ''
while in_byte:
if in_byte != '\r':
if print_stdout:
stdout.write(in_byte)
if in_byte != '\n':
in_line += in_byte
else:
filter_fn(in_line)
in_line = ''
else:
filter_fn(in_line)
in_line = ''
in_byte = kid.stdout.read(1)
# Flush the rest of buffered output. This is only an issue with
# stdout/stderr not ending with a \n.
if len(in_line):
filter_fn(in_line)
rv = kid.wait()
except KeyboardInterrupt:
print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
raise
if rv:
raise subprocess2.CalledProcessError(
rv, args, kwargs.get('cwd', None), None, None)
return 0
def FindGclientRoot(from_dir, filename='.gclient'):
"""Tries to find the gclient root."""
real_from_dir = os.path.realpath(from_dir)
path = real_from_dir
while not os.path.exists(os.path.join(path, filename)):
split_path = os.path.split(path)
if not split_path[1]:
return None
path = split_path[0]
# If we did not find the file in the current directory, make sure we are in a
# sub directory that is controlled by this configuration.
if path != real_from_dir:
entries_filename = os.path.join(path, filename + '_entries')
if not os.path.exists(entries_filename):
# If .gclient_entries does not exist, a previous call to gclient sync
# might have failed. In that case, we cannot verify that the .gclient
# is the one we want to use. In order to not to cause too much trouble,
# just issue a warning and return the path anyway.
print >> sys.stderr, ("%s file in parent directory %s might not be the "
"file you want to use" % (filename, path))
return path
scope = {}
try:
exec(FileRead(entries_filename), scope)
except SyntaxError, e:
SyntaxErrorToError(filename, e)
all_directories = scope['entries'].keys()
path_to_check = real_from_dir[len(path)+1:]
while path_to_check:
if path_to_check in all_directories:
return path
path_to_check = os.path.dirname(path_to_check)
return None
logging.info('Found gclient root at ' + path)
return path
def PathDifference(root, subpath):
"""Returns the difference subpath minus root."""
root = os.path.realpath(root)
subpath = os.path.realpath(subpath)
if not subpath.startswith(root):
return None
# If the root does not have a trailing \ or /, we add it so the returned
# path starts immediately after the seperator regardless of whether it is
# provided.
root = os.path.join(root, '')
return subpath[len(root):]
def FindFileUpwards(filename, path=None):
"""Search upwards from the a directory (default: current) to find a file.
Returns nearest upper-level directory with the passed in file.
"""
if not path:
path = os.getcwd()
path = os.path.realpath(path)
while True:
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
return path
(new_path, _) = os.path.split(path)
if new_path == path:
return None
path = new_path
def GetGClientRootAndEntries(path=None):
"""Returns the gclient root and the dict of entries."""
config_file = '.gclient_entries'
root = FindFileUpwards(config_file, path)
if not root:
print "Can't find %s" % config_file
return None
config_path = os.path.join(root, config_file)
env = {}
execfile(config_path, env)
config_dir = os.path.dirname(config_path)
return config_dir, env['entries']
def lockedmethod(method):
"""Method decorator that holds self.lock for the duration of the call."""
def inner(self, *args, **kwargs):
try:
try:
self.lock.acquire()
except KeyboardInterrupt:
print >> sys.stderr, 'Was deadlocked'
raise
return method(self, *args, **kwargs)
finally:
self.lock.release()
return inner
class WorkItem(object):
"""One work item."""
# On cygwin, creating a lock throwing randomly when nearing ~100 locks.
# As a workaround, use a single lock. Yep you read it right. Single lock for
# all the 100 objects.
lock = threading.Lock()
def __init__(self, name):
# A unique string representing this work item.
self._name = name
def run(self, work_queue):
"""work_queue is passed as keyword argument so it should be
the last parameters of the function when you override it."""
pass
@property
def name(self):
return self._name
class ExecutionQueue(object):
"""Runs a set of WorkItem that have interdependencies and were WorkItem are
added as they are processed.
In gclient's case, Dependencies sometime needs to be run out of order due to
From() keyword. This class manages that all the required dependencies are run
before running each one.
Methods of this class are thread safe.
"""
def __init__(self, jobs, progress, ignore_requirements):
"""jobs specifies the number of concurrent tasks to allow. progress is a
Progress instance."""
# Set when a thread is done or a new item is enqueued.
self.ready_cond = threading.Condition()
# Maximum number of concurrent tasks.
self.jobs = jobs
# List of WorkItem, for gclient, these are Dependency instances.
self.queued = []
# List of strings representing each Dependency.name that was run.
self.ran = []
# List of items currently running.
self.running = []
# Exceptions thrown if any.
self.exceptions = Queue.Queue()
# Progress status
self.progress = progress
if self.progress:
self.progress.update(0)
self.ignore_requirements = ignore_requirements
def enqueue(self, d):
"""Enqueue one Dependency to be executed later once its requirements are
satisfied.
"""
assert isinstance(d, WorkItem)
self.ready_cond.acquire()
try:
self.queued.append(d)
total = len(self.queued) + len(self.ran) + len(self.running)
logging.debug('enqueued(%s)' % d.name)
if self.progress:
self.progress._total = total + 1
self.progress.update(0)
self.ready_cond.notifyAll()
finally:
self.ready_cond.release()
def flush(self, *args, **kwargs):
"""Runs all enqueued items until all are executed."""
kwargs['work_queue'] = self
self.ready_cond.acquire()
try:
while True:
# Check for task to run first, then wait.
while True:
if not self.exceptions.empty():
# Systematically flush the queue when an exception logged.
self.queued = []
self._flush_terminated_threads()
if (not self.queued and not self.running or
self.jobs == len(self.running)):
logging.debug('No more worker threads or can\'t queue anything.')
break
# Check for new tasks to start.
for i in xrange(len(self.queued)):
# Verify its requirements.
if (self.ignore_requirements or
not (set(self.queued[i].requirements) - set(self.ran))):
# Start one work item: all its requirements are satisfied.
self._run_one_task(self.queued.pop(i), args, kwargs)
break
else:
# Couldn't find an item that could run. Break out the outher loop.
break
if not self.queued and not self.running:
# We're done.
break
# We need to poll here otherwise Ctrl-C isn't processed.
try:
self.ready_cond.wait(10)
except KeyboardInterrupt:
# Help debugging by printing some information:
print >> sys.stderr, (
('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
'Running: %d') % (
self.jobs,
len(self.queued),
', '.join(self.ran),
len(self.running)))
for i in self.queued:
print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
raise
# Something happened: self.enqueue() or a thread terminated. Loop again.
finally:
self.ready_cond.release()
assert not self.running, 'Now guaranteed to be single-threaded'
if not self.exceptions.empty():
# To get back the stack location correctly, the raise a, b, c form must be
# used, passing a tuple as the first argument doesn't work.
e = self.exceptions.get()
raise e[0], e[1], e[2]
if self.progress:
self.progress.end()
def _flush_terminated_threads(self):
"""Flush threads that have terminated."""
running = self.running
self.running = []
for t in running:
if t.isAlive():
self.running.append(t)
else:
t.join()
sys.stdout.flush()
if self.progress:
self.progress.update(1, t.item.name)
if t.item.name in self.ran:
raise Error(
'gclient is confused, "%s" is already in "%s"' % (
t.item.name, ', '.join(self.ran)))
if not t.item.name in self.ran:
self.ran.append(t.item.name)
def _run_one_task(self, task_item, args, kwargs):
if self.jobs > 1:
# Start the thread.
index = len(self.ran) + len(self.running) + 1
new_thread = self._Worker(task_item, index, args, kwargs)
self.running.append(new_thread)
new_thread.start()
else:
# Run the 'thread' inside the main thread. Don't try to catch any
# exception.
task_item.run(*args, **kwargs)
self.ran.append(task_item.name)
if self.progress:
self.progress.update(1, ', '.join(t.item.name for t in self.running))
class _Worker(threading.Thread):
"""One thread to execute one WorkItem."""
def __init__(self, item, index, args, kwargs):
threading.Thread.__init__(self, name=item.name or 'Worker')
logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
self.item = item
self.index = index
self.args = args
self.kwargs = kwargs
def run(self):
"""Runs in its own thread."""
logging.debug('_Worker.run(%s)' % self.item.name)
work_queue = self.kwargs['work_queue']
try:
self.item.run(*self.args, **self.kwargs)
except Exception:
# Catch exception location.
logging.info('Caught exception in thread %s' % self.item.name)
logging.info(str(sys.exc_info()))
work_queue.exceptions.put(sys.exc_info())
logging.info('_Worker.run(%s) done' % self.item.name)
work_queue.ready_cond.acquire()
try:
work_queue.ready_cond.notifyAll()
finally:
work_queue.ready_cond.release()
def GetEditor(git):
"""Returns the most plausible editor to use."""
if git:
editor = os.environ.get('GIT_EDITOR')
else:
editor = os.environ.get('SVN_EDITOR')
if not editor:
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform.startswith('win'):
editor = 'notepad'
else:
editor = 'vim'
return editor
def RunEditor(content, git):
"""Opens up the default editor in the system to get the CL description."""
file_handle, filename = tempfile.mkstemp(text=True)
# Make sure CRLF is handled properly by requiring none.
if '\r' in content:
print >> sys.stderr, (
'!! Please remove \\r from your change description !!')
fileobj = os.fdopen(file_handle, 'w')
# Still remove \r if present.
fileobj.write(re.sub('\r?\n', '\n', content))
fileobj.close()
try:
cmd = '%s %s' % (GetEditor(git), filename)
if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
# Msysgit requires the usage of 'env' to be present.
cmd = 'env ' + cmd
try:
# shell=True to allow the shell to handle all forms of quotes in
# $EDITOR.
subprocess2.check_call(cmd, shell=True)
except subprocess2.CalledProcessError:
return None
return FileRead(filename)
finally:
os.remove(filename)
def UpgradeToHttps(url):
"""Upgrades random urls to https://.
Do not touch unknown urls like ssh:// or git://.
Do not touch http:// urls with a port number,
Fixes invalid GAE url.
"""
if not url:
return url
if not re.match(r'[a-z\-]+\://.*', url):
# Make sure it is a valid uri. Otherwise, urlparse() will consider it a
# relative url and will use http:///foo. Note that it defaults to http://
# for compatibility with naked url like "localhost:8080".
url = 'http://%s' % url
parsed = list(urlparse.urlparse(url))
# Do not automatically upgrade http to https if a port number is provided.
if parsed[0] == 'http' and not re.match(r'^.+?\:\d+$', parsed[1]):
parsed[0] = 'https'
return urlparse.urlunparse(parsed)
def ParseCodereviewSettingsContent(content):
"""Process a codereview.settings file properly."""
lines = (l for l in content.splitlines() if not l.strip().startswith("#"))
try:
keyvals = dict([x.strip() for x in l.split(':', 1)] for l in lines if l)
except ValueError:
raise Error(
'Failed to process settings, please fix. Content:\n\n%s' % content)
def fix_url(key):
if keyvals.get(key):
keyvals[key] = UpgradeToHttps(keyvals[key])
fix_url('CODE_REVIEW_SERVER')
fix_url('VIEW_VC')
return keyvals
def NumLocalCpus():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
# Mac OS 10.6 only
# pylint: disable=E1101
return int(os.sysconf('SC_NPROCESSORS_ONLN'))
| |
"""Transport implementation."""
# Copyright (C) 2009 Barry Pederson <bp@barryp.org>
from __future__ import absolute_import, unicode_literals
import errno
import re
import socket
import ssl
from contextlib import contextmanager
from .exceptions import UnexpectedFrame
from .five import items
from .platform import KNOWN_TCP_OPTS, SOL_TCP, pack, unpack
from .utils import get_errno, set_cloexec
try:
from ssl import SSLError
except ImportError: # pragma: no cover
class SSLError(Exception): # noqa
"""Dummy SSL exception."""
_UNAVAIL = {errno.EAGAIN, errno.EINTR, errno.ENOENT, errno.EWOULDBLOCK}
AMQP_PORT = 5672
EMPTY_BUFFER = bytes()
SIGNED_INT_MAX = 0x7FFFFFFF
# Yes, Advanced Message Queuing Protocol Protocol is redundant
AMQP_PROTOCOL_HEADER = 'AMQP\x00\x00\x09\x01'.encode('latin_1')
# Match things like: [fe80::1]:5432, from RFC 2732
IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?')
DEFAULT_SOCKET_SETTINGS = {
'TCP_NODELAY': 1,
'TCP_USER_TIMEOUT': 1000,
'TCP_KEEPIDLE': 60,
'TCP_KEEPINTVL': 10,
'TCP_KEEPCNT': 9,
}
def to_host_port(host, default=AMQP_PORT):
"""Convert hostname:port string to host, port tuple."""
port = default
m = IPV6_LITERAL.match(host)
if m:
host = m.group(1)
if m.group(2):
port = int(m.group(2))
else:
if ':' in host:
host, port = host.rsplit(':', 1)
port = int(port)
return host, port
class _AbstractTransport(object):
"""Common superclass for TCP and SSL transports."""
def __init__(self, host, connect_timeout=None,
read_timeout=None, write_timeout=None,
socket_settings=None, raise_on_initial_eintr=True, **kwargs):
self.connected = False
self.sock = None
self.raise_on_initial_eintr = raise_on_initial_eintr
self._read_buffer = EMPTY_BUFFER
self.host, self.port = to_host_port(host)
self.connect_timeout = connect_timeout
self.read_timeout = read_timeout
self.write_timeout = write_timeout
self.socket_settings = socket_settings
def connect(self):
try:
# are we already connected?
if self.connected:
return
self._connect(self.host, self.port, self.connect_timeout)
self._init_socket(
self.socket_settings, self.read_timeout, self.write_timeout,
)
# we've sent the banner; signal connect
# EINTR, EAGAIN, EWOULDBLOCK would signal that the banner
# has _not_ been sent
self.connected = True
except (OSError, IOError, SSLError):
# if not fully connected, close socket, and reraise error
if self.sock and not self.connected:
self.sock.close()
self.sock = None
raise
@contextmanager
def having_timeout(self, timeout):
if timeout is None:
yield self.sock
else:
sock = self.sock
prev = sock.gettimeout()
if prev != timeout:
sock.settimeout(timeout)
try:
yield self.sock
except SSLError as exc:
if 'timed out' in str(exc):
# http://bugs.python.org/issue10272
raise socket.timeout()
elif 'The operation did not complete' in str(exc):
# Non-blocking SSL sockets can throw SSLError
raise socket.timeout()
raise
except socket.error as exc:
if get_errno(exc) == errno.EWOULDBLOCK:
raise socket.timeout()
raise
finally:
if timeout != prev:
sock.settimeout(prev)
def _connect(self, host, port, timeout):
e = None
# Below we are trying to avoid additional DNS requests for AAAA if A
# succeeds. This helps a lot in case when a hostname has an IPv4 entry
# in /etc/hosts but not IPv6. Without the (arguably somewhat twisted)
# logic below, getaddrinfo would attempt to resolve the hostname for
# both IP versions, which would make the resolver talk to configured
# DNS servers. If those servers are for some reason not available
# during resolution attempt (either because of system misconfiguration,
# or network connectivity problem), resolution process locks the
# _connect call for extended time.
addr_types = (socket.AF_INET, socket.AF_INET6)
addr_types_num = len(addr_types)
for n, family in enumerate(addr_types):
# first, resolve the address for a single address family
try:
entries = socket.getaddrinfo(
host, port, family, socket.SOCK_STREAM, SOL_TCP)
entries_num = len(entries)
except socket.gaierror:
# we may have depleted all our options
if n + 1 >= addr_types_num:
# if getaddrinfo succeeded before for another address
# family, reraise the previous socket.error since it's more
# relevant to users
raise (e
if e is not None
else socket.error(
"failed to resolve broker hostname"))
continue # pragma: no cover
# now that we have address(es) for the hostname, connect to broker
for i, res in enumerate(entries):
af, socktype, proto, _, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
try:
set_cloexec(self.sock, True)
except NotImplementedError:
pass
self.sock.settimeout(timeout)
self.sock.connect(sa)
except socket.error as ex:
e = ex
if self.sock is not None:
self.sock.close()
self.sock = None
# we may have depleted all our options
if i + 1 >= entries_num and n + 1 >= addr_types_num:
raise
else:
# hurray, we established connection
return
def _init_socket(self, socket_settings, read_timeout, write_timeout):
self.sock.settimeout(None) # set socket back to blocking mode
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._set_socket_options(socket_settings)
# set socket timeouts
for timeout, interval in ((socket.SO_SNDTIMEO, write_timeout),
(socket.SO_RCVTIMEO, read_timeout)):
if interval is not None:
sec = int(interval)
usec = int((interval - sec) * 1000000)
self.sock.setsockopt(
socket.SOL_SOCKET, timeout,
pack('ll', sec, usec),
)
self._setup_transport()
self._write(AMQP_PROTOCOL_HEADER)
def _get_tcp_socket_defaults(self, sock):
tcp_opts = {}
for opt in KNOWN_TCP_OPTS:
enum = None
if opt == 'TCP_USER_TIMEOUT':
try:
from socket import TCP_USER_TIMEOUT as enum
except ImportError:
# should be in Python 3.6+ on Linux.
enum = 18
elif hasattr(socket, opt):
enum = getattr(socket, opt)
if enum:
if opt in DEFAULT_SOCKET_SETTINGS:
tcp_opts[enum] = DEFAULT_SOCKET_SETTINGS[opt]
elif hasattr(socket, opt):
tcp_opts[enum] = sock.getsockopt(
SOL_TCP, getattr(socket, opt))
return tcp_opts
def _set_socket_options(self, socket_settings):
tcp_opts = self._get_tcp_socket_defaults(self.sock)
if socket_settings:
tcp_opts.update(socket_settings)
for opt, val in items(tcp_opts):
self.sock.setsockopt(SOL_TCP, opt, val)
def _read(self, n, initial=False):
"""Read exactly n bytes from the peer."""
raise NotImplementedError('Must be overriden in subclass')
def _setup_transport(self):
"""Do any additional initialization of the class."""
pass
def _shutdown_transport(self):
"""Do any preliminary work in shutting down the connection."""
pass
def _write(self, s):
"""Completely write a string to the peer."""
raise NotImplementedError('Must be overriden in subclass')
def close(self):
if self.sock is not None:
self._shutdown_transport()
# Call shutdown first to make sure that pending messages
# reach the AMQP broker if the program exits after
# calling this method.
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
self.connected = False
def read_frame(self, unpack=unpack):
read = self._read
read_frame_buffer = EMPTY_BUFFER
try:
frame_header = read(7, True)
read_frame_buffer += frame_header
frame_type, channel, size = unpack('>BHI', frame_header)
# >I is an unsigned int, but the argument to sock.recv is signed,
# so we know the size can be at most 2 * SIGNED_INT_MAX
if size > SIGNED_INT_MAX:
part1 = read(SIGNED_INT_MAX)
part2 = read(size - SIGNED_INT_MAX)
payload = b''.join([part1, part2])
else:
payload = read(size)
read_frame_buffer += payload
ch = ord(read(1))
except socket.timeout:
self._read_buffer = read_frame_buffer + self._read_buffer
raise
except (OSError, IOError, SSLError, socket.error) as exc:
# Don't disconnect for ssl read time outs
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise UnexpectedFrame(
'Received {0:#04x} while expecting 0xce'.format(ch))
def write(self, s):
try:
self._write(s)
except socket.timeout:
raise
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
class SSLTransport(_AbstractTransport):
"""Transport that works over SSL."""
def __init__(self, host, connect_timeout=None, ssl=None, **kwargs):
self.sslopts = ssl if isinstance(ssl, dict) else {}
self._read_buffer = EMPTY_BUFFER
super(SSLTransport, self).__init__(
host, connect_timeout=connect_timeout, **kwargs)
def _setup_transport(self):
"""Wrap the socket in an SSL object."""
self.sock = self._wrap_socket(self.sock, **self.sslopts)
self.sock.do_handshake()
self._quick_recv = self.sock.read
def _wrap_socket(self, sock, context=None, **sslopts):
if context:
return self._wrap_context(sock, sslopts, **context)
return self._wrap_socket_sni(sock, **sslopts)
def _wrap_context(self, sock, sslopts, check_hostname=None, **ctx_options):
ctx = ssl.create_default_context(**ctx_options)
ctx.check_hostname = check_hostname
return ctx.wrap_socket(sock, **sslopts)
def _wrap_socket_sni(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=ssl.CERT_NONE,
ca_certs=None, do_handshake_on_connect=True,
suppress_ragged_eofs=True, server_hostname=None,
ciphers=None, ssl_version=None):
"""Socket wrap with SNI headers.
Default `ssl.wrap_socket` method augmented with support for
setting the server_hostname field required for SNI hostname header
"""
opts = dict(sock=sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# Setup the right SSL version; default to optimal versions across
# ssl implementations
if ssl_version is not None:
opts['ssl_version'] = ssl_version
else:
# older versions of python 2.7 and python 2.6 do not have the
# ssl.PROTOCOL_TLS defined the equivalent is ssl.PROTOCOL_SSLv23
# we default to PROTOCOL_TLS and fallback to PROTOCOL_SSLv23
if hasattr(ssl, 'PROTOCOL_TLS'):
opts['ssl_version'] = ssl.PROTOCOL_TLS
else:
opts['ssl_version'] = ssl.PROTOCOL_SSLv23
sock = ssl.wrap_socket(**opts)
# Set SNI headers if supported
if (server_hostname is not None) and (
hasattr(ssl, 'HAS_SNI') and ssl.HAS_SNI) and (
hasattr(ssl, 'SSLContext')):
context = ssl.SSLContext(opts['ssl_version'])
context.verify_mode = cert_reqs
context.check_hostname = True
context.load_cert_chain(certfile, keyfile)
sock = context.wrap_socket(sock, server_hostname=server_hostname)
return sock
def _shutdown_transport(self):
"""Unwrap a Python 2.6 SSL socket, so we can call shutdown()."""
if self.sock is not None:
try:
unwrap = self.sock.unwrap
except AttributeError:
return
self.sock = unwrap()
def _read(self, n, initial=False,
_errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR)):
# According to SSL_read(3), it can at most return 16kb of data.
# Thus, we use an internal read buffer like TCPTransport._read
# to get the exact number of bytes wanted.
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf)) # see note above
except socket.error as exc:
# ssl.sock.read may cause a SSLerror without errno
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
# ssl.sock.read may cause ENOENT if the
# operation couldn't be performed (Issue celery#1414).
if exc.errno in _errnos:
if initial and self.raise_on_initial_eintr:
raise socket.timeout()
continue
raise
if not s:
raise IOError('Server unexpectedly closed connection')
rbuf += s
except: # noqa
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result
def _write(self, s):
"""Write a string out to the SSL socket fully."""
write = self.sock.write
while s:
try:
n = write(s)
except (ValueError, AttributeError):
# AG: sock._sslobj might become null in the meantime if the
# remote connection has hung up.
# In python 3.2, an AttributeError is raised because the SSL
# module tries to access self._sslobj.write (w/ self._sslobj ==
# None)
# In python 3.4, a ValueError is raised is self._sslobj is
# None. So much for portability... :/
n = 0
if not n:
raise IOError('Socket closed')
s = s[n:]
class TCPTransport(_AbstractTransport):
"""Transport that deals directly with TCP socket."""
def _setup_transport(self):
# Setup to _write() directly to the socket, and
# do our own buffered reads.
self._write = self.sock.sendall
self._read_buffer = EMPTY_BUFFER
self._quick_recv = self.sock.recv
def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)):
"""Read exactly n bytes from the socket."""
recv = self._quick_recv
rbuf = self._read_buffer
try:
while len(rbuf) < n:
try:
s = recv(n - len(rbuf))
except socket.error as exc:
if exc.errno in _errnos:
if initial and self.raise_on_initial_eintr:
raise socket.timeout()
continue
raise
if not s:
raise IOError('Server unexpectedly closed connection')
rbuf += s
except: # noqa
self._read_buffer = rbuf
raise
result, self._read_buffer = rbuf[:n], rbuf[n:]
return result
def Transport(host, connect_timeout=None, ssl=False, **kwargs):
"""Create transport.
Given a few parameters from the Connection constructor,
select and create a subclass of _AbstractTransport.
"""
transport = SSLTransport if ssl else TCPTransport
return transport(host, connect_timeout=connect_timeout, ssl=ssl, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
===============================================================================
module __OrdinaryPercolation__: Ordinary Percolation Algorithm
===============================================================================
"""
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
from OpenPNM.Algorithms import GenericAlgorithm
from OpenPNM.Base import logging
logger = logging.getLogger(__name__)
class OrdinaryPercolation(GenericAlgorithm):
r"""
Simulates a capillary drainage experiment by applying a list of increasing
capillary pressures.
Parameters
----------
network : OpenPNM Network Object
The network upon which the simulation will be run
name : string, optional
The name to assign to the Algorithm Object
"""
def __init__(self, network, name=None, **kwargs):
super().__init__(network=network, name=name)
if len(kwargs.keys()) > 0:
self.setup(**kwargs)
def setup(self,
invading_phase,
defending_phase=None,
t_entry='throat.capillary_pressure',
**kwargs):
r"""
invading_phase : OpenPNM Phase Object
The invading phase to be injected into the Network
p_inlets : array_like
The injection points from which the invading phase accesses the
Network. If no inlets are specified then the algorithm assumes
no access limitations apply to the invading phase, which is
equivalent to performaing a standard bond ordinary percolation.
Notes
-----
The 'inlet' pores are initially filled with invading fluid to start the
simulation. To avoid the capillary pressure curve showing a non-zero
starting saturation at low pressures, it is necessary to apply boundary
pores that have zero-volume, and set these as the inlets.
"""
self['throat.entry_pressure'] = invading_phase[t_entry]
self['pore.inv_Pc'] = sp.inf
self['throat.inv_Pc'] = sp.inf
self['pore.inv_sat'] = sp.inf
self['throat.inv_sat'] = sp.inf
self._inv_phase = invading_phase
self._def_phase = defending_phase
self._trapping = False
def set_inlets(self, pores):
r"""
Specify inlet locations
Parameters
----------
pores : array_like
The injection points from which the invading phase accesses the
Network. If no inlets are specified then the algorithm assumes
no access limitations apply to the invading phase, which is
equivalent to performaing a standard bond ordinary percolation.
Notes
-----
The 'inlet' pores are initially filled with invading fluid to start the
simulation. To avoid the capillary pressure curve showing a non-zero
starting saturation at low pressures, it is necessary to apply boundary
pores that have zero-volume, and set these as the inlets.
"""
Ps = sp.array(pores)
if sp.size(Ps) > 0:
if Ps.dtype == bool:
Ps = self._net.Ps[Ps]
self['pore.inlets'] = False
self['pore.inlets'][Ps] = True
def set_outlets(self, pores, defending_phase=None):
r"""
Specify outlet locations
Parameters
----------
pores : array_like
The pores through which the defending phase exits the Network.
defending_phase : OpenPNM Phase Object
The Phase object defining the defending phase. The defending Phase
may be specified during the ``setup`` step, or through this method.
"""
if defending_phase is not None:
self._def_phase = defending_phase
self._trapping = True
Ps = sp.array(pores)
if sp.size(Ps) > 0:
if Ps.dtype == bool:
Ps = self._net.Ps[Ps]
self['pore.outlets'] = False
self['pore.outlets'][Ps] = True
def run(self, npts=25, inv_points=None, access_limited=True, **kwargs):
r"""
Parameters
----------
npts : int (default = 25)
The number of pressure points to apply. The list of pressures
is logarithmically spaced between the lowest and highest throat
entry pressures in the network.
inv_points : array_like, optional
A list of specific pressure point(s) to apply.
"""
if 'inlets' in kwargs.keys():
print('Inlets recieved, passing to set_inlets')
self.set_inlets(pores=kwargs['inlets'])
if 'outlets' in kwargs.keys():
print('Outlets recieved, passing to set_outlets')
self.set_outlets(pores=kwargs['outlets'])
self._AL = access_limited
if inv_points is None:
logger.info('Generating list of invasion pressures')
min_p = sp.amin(self['throat.entry_pressure']) * 0.98 # nudge down
max_p = sp.amax(self['throat.entry_pressure']) * 1.02 # bump up
inv_points = sp.logspace(sp.log10(min_p),
sp.log10(max_p),
npts)
self._npts = sp.size(inv_points)
# Execute calculation
self._do_outer_iteration_stage(inv_points)
def _do_outer_iteration_stage(self, inv_points):
# Generate curve from points
for inv_val in inv_points:
# Apply one applied pressure and determine invaded pores
logger.info('Applying capillary pressure: ' + str(inv_val))
self._do_one_inner_iteration(inv_val)
# Find invasion sequence values (to correspond with IP algorithm)
self['pore.inv_seq'] = sp.searchsorted(sp.unique(self['pore.inv_Pc']),
self['pore.inv_Pc'])
self['throat.inv_seq'] = sp.searchsorted(sp.unique(self['throat.inv_Pc']),
self['throat.inv_Pc'])
if self._trapping:
self.evaluate_trapping(self['pore.outlets'])
def _do_one_inner_iteration(self, inv_val):
r"""
Determine which throats are invaded at a given applied capillary
pressure.
"""
# Generate a tlist containing boolean values for throat state
Tinvaded = self['throat.entry_pressure'] <= inv_val
# Find all pores that can be invaded at specified pressure
[pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded,
t_labels=True)
if self._AL:
# Identify clusters connected to invasion sites
inv_clusters = sp.unique(pclusters[self['pore.inlets']])
inv_clusters = inv_clusters[inv_clusters >= 0]
else:
# All clusters are invasion sites
inv_clusters = pclusters
# Find pores on the invading clusters
pmask = np.in1d(pclusters, inv_clusters)
# Store current applied pressure in newly invaded pores
pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask)
self['pore.inv_Pc'][pinds] = inv_val
# Find throats on the invading clusters
tmask = np.in1d(tclusters, inv_clusters)
# Store current applied pressure in newly invaded throats
tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask)
self['throat.inv_Pc'][tinds] = inv_val
# Store total network saturation
tsat = sp.sum(self._net['throat.volume'][self['throat.inv_Pc'] <= inv_val])
psat = sp.sum(self._net['pore.volume'][self['pore.inv_Pc'] <= inv_val])
total = sp.sum(self._net['throat.volume']) + sp.sum(self._net['pore.volume'])
self['pore.inv_sat'][pinds] = (tsat + psat)/total
self['throat.inv_sat'][tinds] = (tsat + psat)/total
def evaluate_trapping(self, p_outlets):
r"""
Finds trapped pores and throats after a full ordinary
percolation simulation has been run.
Parameters
----------
p_outlets : array_like
A list of pores that define the wetting phase outlets.
Disconnection from these outlets results in trapping.
Returns
-------
It creates arrays called ``pore.trapped`` and ``throat.trapped``, but
also adjusts the ``pore.inv_Pc`` and ``throat.inv_Pc`` arrays to set
trapped locations to have infinite invasion pressure.
"""
self['pore.trapped'] = sp.zeros([self.Np, ], dtype=float)
self['throat.trapped'] = sp.zeros([self.Nt, ], dtype=float)
try:
# Get points used in OP
inv_points = sp.unique(self['pore.inv_Pc'])
except:
raise Exception('Orindary percolation has not been run!')
tind = self._net.throats()
conns = self._net.find_connected_pores(tind)
for inv_val in inv_points[0:-1]:
# Find clusters of defender pores
Pinvaded = self['pore.inv_Pc'] <= inv_val
Cstate = sp.sum(Pinvaded[conns], axis=1)
Tinvaded = self['throat.inv_Pc'] <= inv_val
# 0 = all open, 1=1 pore filled,
# 2=2 pores filled 3=2 pores + 1 throat filled
Cstate = Cstate + Tinvaded
clusters = self._net.find_clusters(Cstate == 0)
# Clean up clusters (invaded = -1, defended >=0)
clusters = clusters * (~Pinvaded) - (Pinvaded)
# Identify clusters connected to outlet sites
out_clusters = sp.unique(clusters[p_outlets])
trapped_pores = ~sp.in1d(clusters, out_clusters)
trapped_pores[Pinvaded] = False
if sum(trapped_pores) > 0:
inds = (self['pore.trapped'] == 0) * trapped_pores
self['pore.trapped'][inds] = inv_val
trapped_throats = self._net.find_neighbor_throats(trapped_pores)
trapped_throat_array = np.asarray([False] * len(Cstate))
trapped_throat_array[trapped_throats] = True
inds = (self['throat.trapped'] == 0) * trapped_throat_array
self['throat.trapped'][inds] = inv_val
inds = (self['throat.trapped'] == 0) * (Cstate == 2)
self['throat.trapped'][inds] = inv_val
self['pore.trapped'][self['pore.trapped'] > 0] = sp.inf
self['throat.trapped'][self['throat.trapped'] > 0] = sp.inf
self['pore.inv_Pc'][self['pore.trapped'] > 0] = sp.inf
self['throat.inv_Pc'][self['throat.trapped'] > 0] = sp.inf
def evaluate_late_pore_filling(self, Pc, Swp_init=0.75, eta=3.0,
wetting_phase=False):
r"""
Compute the volume fraction of the phase in each pore given an initial
wetting phase fraction (Swp_init) and a growth exponent (eta)
returns the fraction of the pore volume occupied by wetting or
non-wetting phase.
Assumes Non-wetting phase displaces wetting phase
"""
Swp = Swp_init*(self['pore.inv_Pc']/Pc)**eta
Swp[self['pore.inv_Pc'] > Pc] = 1.0
Snwp = 1-Swp
if wetting_phase:
return Swp
else:
return Snwp
def return_results(self, Pc=0, seq=None, sat=None, occupancy='occupancy'):
r"""
Updates the occupancy status of invading and defending phases
as determined by the OP algorithm
"""
p_inv = self['pore.inv_Pc']
self._inv_phase['pore.inv_Pc'] = p_inv
t_inv = self['throat.inv_Pc']
self._inv_phase['throat.inv_Pc'] = t_inv
# Apply invasion sequence values (to correspond with IP algorithm)
p_seq = self['pore.inv_seq']
self._inv_phase['pore.inv_seq'] = p_seq
t_seq = self['throat.inv_seq']
self._inv_phase['throat.inv_seq'] = t_seq
# Apply saturation to pores and throats
self._inv_phase['pore.inv_sat'] = self['pore.inv_sat']
self._inv_phase['throat.inv_sat'] = self['throat.inv_sat']
if sat is not None:
p_inv = self['pore.inv_sat'] <= sat
t_inv = self['throat.inv_sat'] <= sat
# Apply occupancy to invading phase
temp = sp.array(p_inv, dtype=sp.float_, ndmin=1)
self._inv_phase['pore.' + occupancy] = temp
temp = sp.array(t_inv, dtype=sp.float_, ndmin=1)
self._inv_phase['throat.' + occupancy] = temp
# Apply occupancy to defending phase
if self._def_phase is not None:
temp = sp.array(~p_inv, dtype=sp.float_, ndmin=1)
self._def_phase['pore.' + occupancy] = temp
temp = sp.array(~t_inv, dtype=sp.float_, ndmin=1)
self._def_phase['throat.' + occupancy] = temp
elif seq is not None:
p_seq = self['pore.inv_seq'] <= seq
t_seq = self['throat.inv_seq'] <= seq
# Apply occupancy to invading phase
temp = sp.array(p_seq, dtype=sp.float_, ndmin=1)
self._inv_phase['pore.' + occupancy] = temp
temp = sp.array(t_seq, dtype=sp.float_, ndmin=1)
self._inv_phase['throat.' + occupancy] = temp
# Apply occupancy to defending phase
if self._def_phase is not None:
temp = sp.array(~p_seq, dtype=sp.float_, ndmin=1)
self._def_phase['pore.' + occupancy] = temp
temp = sp.array(~t_seq, dtype=sp.float_, ndmin=1)
self._def_phase['throat.' + occupancy] = temp
else:
p_inv = self['pore.inv_Pc'] <= Pc
t_inv = self['throat.inv_Pc'] <= Pc
# Apply occupancy to invading phase
temp = sp.array(p_inv, dtype=sp.float_, ndmin=1)
self._inv_phase['pore.' + occupancy] = temp
temp = sp.array(t_inv, dtype=sp.float_, ndmin=1)
self._inv_phase['throat.' + occupancy] = temp
# Apply occupancy to defending phase
if self._def_phase is not None:
temp = sp.array(~p_inv, dtype=sp.float_, ndmin=1)
self._def_phase['pore.' + occupancy] = temp
temp = sp.array(~t_inv, dtype=sp.float_, ndmin=1)
self._def_phase['throat.' + occupancy] = temp
def plot_drainage_curve(self, pore_volume='volume', throat_volume='volume',
pore_label='all', throat_label='all'):
r"""
Plot drainage capillary pressure curve
"""
try:
PcPoints = sp.unique(self['pore.inv_Pc'])
except:
raise Exception('Cannot print drainage curve: ordinary percolation \
simulation has not been run')
pores = self._net.pores(labels=pore_label)
throats = self._net.throats(labels=throat_label)
Snwp_t = sp.zeros_like(PcPoints)
Snwp_p = sp.zeros_like(PcPoints)
Snwp_all = sp.zeros_like(PcPoints)
Pvol = self._net['pore.' + pore_volume]
Tvol = self._net['throat.' + throat_volume]
Pvol_tot = sp.sum(Pvol)
Tvol_tot = sp.sum(Tvol)
vol_tot = Pvol_tot + Tvol_tot
for i in range(0, sp.size(PcPoints)):
Pc = PcPoints[i]
Snwp_p[i] = sp.sum(Pvol[self['pore.inv_Pc'][pores] <= Pc]) / vol_tot
Snwp_t[i] = sp.sum(Tvol[self['throat.inv_Pc'][throats] <= Pc]) / vol_tot
Snwp_all[i] = (sp.sum(Tvol[self['throat.inv_Pc'][throats] <= Pc]) +
sp.sum(Pvol[self['pore.inv_Pc'][pores] <= Pc])) / vol_tot
if sp.mean(self._inv_phase['pore.contact_angle']) < 90:
Snwp_p = 1 - Snwp_p
Snwp_t = 1 - Snwp_t
Snwp_all = 1 - Snwp_all
PcPoints *= -1
fig = plt.figure()
plt.plot(PcPoints, Snwp_all, 'g.-')
plt.plot(PcPoints, Snwp_p, 'r.-')
plt.plot(PcPoints, Snwp_t, 'b.-')
r"""
TODO: Add legend to distinguish the pore and throat curves
"""
return fig
def plot_primary_drainage_curve(self, pore_volume='volume',
throat_volume='volume', pore_label='all',
throat_label='all'):
r"""
Plot the primary drainage curve as the capillary pressure on ordinate
and total saturation of the wetting phase on the abscissa.
This is the preffered style in the petroleum engineering
"""
try:
PcPoints = sp.unique(self['pore.inv_Pc'])
except:
raise Exception('Cannot print drainage curve: ordinary percolation \
simulation has not been run')
pores = self._net.pores(labels=pore_label)
throats = self._net.throats(labels=throat_label)
p_inv = self['pore.inv_Pc']
t_inv = self['throat.inv_Pc']
Snwp_t = sp.zeros_like(PcPoints)
Snwp_p = sp.zeros_like(PcPoints)
Snwp_all = sp.zeros_like(PcPoints)
Swp_all = sp.zeros_like(PcPoints)
Pvol = self._net['pore.' + pore_volume]
Tvol = self._net['throat.' + throat_volume]
Pvol_tot = sp.sum(Pvol)
Tvol_tot = sp.sum(Tvol)
for i in range(0, sp.size(PcPoints)):
Pc = PcPoints[i]
Snwp_p[i] = sp.sum(Pvol[p_inv[pores] <= Pc]) / Pvol_tot
Snwp_t[i] = sp.sum(Tvol[t_inv[throats] <= Pc]) / Tvol_tot
Snwp_all[i] = (sp.sum(Tvol[t_inv[throats] <= Pc]) +
sp.sum(Pvol[p_inv[pores] <= Pc])) / \
(Tvol_tot + Pvol_tot)
Swp_all[i] = 1 - Snwp_all[i]
fig = plt.figure()
plt.plot(Swp_all, PcPoints, 'k.-')
plt.xlim(xmin=0)
plt.xlabel('Saturation of wetting phase')
plt.ylabel('Capillary Pressure [Pa]')
plt.title('Primay Drainage Curve')
plt.grid(True)
return fig
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utils: Various utilities methods.
"""
__author__ = "Andrea Ballatore"
__copyright__ = "Copyright 2014"
__license__ = ""
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__date__ = "December 2014"
__status__ = "Development"
import logging
import math
import re
def _init_log(module_name):
assert module_name
return logging.getLogger(module_name)
log = _init_log("utils")
def _json_pretty_print( jsonObj ):
s = json.dumps(jsonObj, indent=3, sort_keys=True)
return s
def _to_unicode(s):
if (type(s) is str):
try:
return unicode(s)
#return s
except UnicodeDecodeError:
#print "UnicodeDecodeError on string: "+s
return s
else: return s
def _read_file( filePath ):
""" Loads file into a string """
with open (filePath, "r") as f:
res = f.read()#.replace('\n', '')
return res
return None
def _is_str(*objs):
""" Checks if objs are strings """
for i in range(len(objs)):
b = (type(objs[i]) is str) or (type(objs[i]) is unicode)
if not b: return False
return True
def _is_nan(*objs):
""" Checks if objs are not-a-number """
for i in range(len(objs)):
b = math.isnan(objs[i])
if not b: return False
return True
def _is_number(*objs):
""" Checks if objs are numbers """
for i in range(len(objs)):
b = isinstance(objs[i], (int, long, float, complex))
if not b: return False
return True
def _write_str_to_file( s, fn ):
assert _is_str(s,fn)
with open(fn, "w") as text_file: text_file.write(s)
log.info(str(len(s))+" chars written in "+fn)
def _wrap_cdata_text( s ):
ss = "<![CDATA[\n" + s + "\n]]>"
return ss
def _read_str_from_file( fn ):
content = False
with open(fn) as f:
content = f.readlines()
return "".join(content)
def _cut_str( s, maxchar ):
if s is None: return s
assert _is_str(s)
if len(s)>maxchar: return s[:maxchar]+"..."
else: return s
def _get_ellipse_coords( x, y, a, b, angle=0.0, k=2):
""" Draws an ellipse using (360*k + 1) discrete points; based on pseudo code
given at http://en.wikipedia.org/wiki/Ellipse
k = 1 means 361 points (degree by degree)
a = major axis distance,
b = minor axis distance,
x = offset along the x-axis
y = offset along the y-axis
angle = clockwise rotation [in degrees] of the ellipse;
* angle=0 : the ellipse is aligned with the positive x-axis
* angle=30 : rotated 30 degrees clockwise from positive x-axis
"""
pts = np.zeros((360*k+1, 2))
beta = -angle * np.pi/180.0
sin_beta = np.sin(beta)
cos_beta = np.cos(beta)
alpha = np.radians(np.r_[0.:360.:1j*(360*k+1)])
sin_alpha = np.sin(alpha)
cos_alpha = np.cos(alpha)
pts[:, 0] = x + (a * cos_alpha * cos_beta - b * sin_alpha * sin_beta)
pts[:, 1] = y + (a * cos_alpha * sin_beta + b * sin_alpha * cos_beta)
return pts
def _sort_dict_by_value(d, asc=True):
s = sorted(d.items(), key=itemgetter(1), reverse=not asc)
return s
def _valid_XML_char_ordinal(i):
return ( # conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def _clean_str_for_xml( s ):
clean_s = ''.join(c for c in s if _valid_XML_char_ordinal(ord(c)))
#print clean_s
clean_s = clean_s.decode('utf-8')
#print clean_s
return clean_s
def _str_to_ascii( a ):
"""
Decode any string to ASCII.
This avoids many unicode problems, but loses non English characters.
@param a a string
@return an ASCII string
"""
assert _is_str(a)
return a.decode('ascii', 'ignore')
def _split_list(alist, wanted_parts):
length = len(alist)
sublists = [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
i = 0
for s in sublists: i+=len(s)
assert i == len(alist)
return sublists
def float_eq( a, b, err=1e-08):
"""
Check if floats a and b are equal within tolerance err
@return boolean
"""
return abs(a - b) <= err
"""
Utility functions for fields.py
"""
def _pixel_to_coords(col, row, transform):
"""Returns the geographic coordinate pair (lon, lat) for the given col, row, and geotransform."""
lon = transform[0] + (col * transform[1]) + (row * transform[2])
lat = transform[3] + (col * transform[4]) + (row * transform[2])
return lon, lat
def _coords_to_pixel(y, x, transform):
"""Returns raster coordinate pair (col, row) for the given lon, lat, and geotransform."""
col = int((y - transform[0]) / transform[1])
row = int((x - transform[3]) / transform[5])
return col, row
def _rasterize_layer(layer, reference=None, ncols=None, nrows=None, projection=None, transform=None):
"""Returns a 2d numpy array of the rasterized layer."""
import gdal, fields
if isinstance(reference, gdal.Dataset):
ncols = reference.RasterYSize
nrows = reference.RasterXSize
projection = reference.GetProjection()
transform = reference.GetGeoTransform()
elif isinstance(reference, fields.GeoTiffField):
nrows, ncols = reference.data.shape
projection = reference.projection
transform = reference.transform
elif not all([ncols, nrows, projection, transform]):
raise ValueError("Must specify either a reference raster/field or pass the nrows, ncols, projection, and transform parameters.")
raster = gdal.GetDriverByName('MEM').Create('', ncols, nrows, 1, gdal.GDT_Byte)
raster.SetProjection(projection)
raster.SetGeoTransform(transform)
raster.GetRasterBand(1).Fill(0)
gdal.RasterizeLayer(raster, [1], layer, None, None, [1], ['ALL_TOUCHED=TRUE'])
return raster.ReadAsArray()
| |
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from toscaparser import tosca_template
from tacker.common import exceptions
from tacker.common import utils
from tacker.extensions import nfvo
from tacker import objects
from tacker.objects import fields
from tacker.tosca import utils as toscautils
from tacker.vnfm.lcm_user_data import utils as userdata_utils
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def _get_vim(context, vim_connection_info):
vim_client_obj = vim_client.VimClient()
if vim_connection_info:
vim_id = vim_connection_info[0].vim_id
access_info = vim_connection_info[0].access_info
if access_info:
region_name = access_info.get('region')
else:
region_name = None
extra = vim_connection_info[0].extra
else:
vim_id = None
region_name = None
extra = {}
try:
vim_res = vim_client_obj.get_vim(
context, vim_id, region_name=region_name)
except nfvo.VimNotFoundException:
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
vim_res['vim_auth'].update({'region': region_name})
if extra:
for key, value in extra.items():
vim_res['extra'][key] = value
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
'vim_type': vim_res['vim_type'],
'access_info': vim_res['vim_auth'],
'tenant_id': vim_res['tenant'],
'extra': vim_res.get('extra', {})}
return vim_info
def _get_vnfd_dict(context, vnfd_id, flavour_id):
vnfd_dict = _get_flavour_based_vnfd(
_get_vnf_package_path(context, vnfd_id), flavour_id)
# Remove requirements from substitution mapping
vnfd_dict.get('topology_template').get(
'substitution_mappings').pop('requirements')
return vnfd_dict
def get_vnfd_dict(context, vnfd_id, flavour_id):
return _get_vnfd_dict(context, vnfd_id, flavour_id)
def _get_vnflcm_interface(context, interface, vnf_instance, flavour_id):
'''Gets the interface found in vnfd
...
node_templates:
VNF:
interfaces:
Vnflcm:
<interface>
'''
interface_value = None
vnfd_dict = _get_vnfd_dict(context, vnf_instance.vnfd_id, flavour_id)
if not isinstance(vnfd_dict, dict):
raise exceptions.InvalidContentType(msg="VNFD not valid")
if vnfd_dict.get('topology_template'):
topology_template = vnfd_dict.get('topology_template')
if topology_template.get('node_templates'):
node_templates = topology_template.get('node_templates')
if node_templates.get('VNF'):
vnf = node_templates.get('VNF')
if vnf.get('interfaces'):
interfaces = vnf.get('interfaces')
if interfaces.get('Vnflcm'):
vnflcm = interfaces.get('Vnflcm')
if vnflcm:
interface_value = vnflcm.get(interface)
return interface_value
def _build_affected_resources(vnf_instance,
change_type=fields.ResourceChangeType.ADDED):
'''build affected resources from vnf_instance instantiated info '''
affected_resources = {}
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
if hasattr(instantiated_vnf_info, 'instance_id'):
if instantiated_vnf_info.instance_id:
affected_resources['affectedVnfcs'] = []
affected_resources['affectedVirtualLinks'] = []
affected_resources['affectedVirtualStorages'] = []
# build AffectedVnfc
vnfc_resource_info = \
instantiated_vnf_info.vnfc_resource_info
for vnfc_resource in vnfc_resource_info:
data = {}
data['id'] = vnfc_resource.id
data['vduId'] = vnfc_resource.vdu_id
data['changeType'] = change_type
data['computeResource'] = \
vnfc_resource.compute_resource.to_dict()
data['metadata'] = vnfc_resource.metadata
affected_resources['affectedVnfcs'].append(data)
# build AffectedVirtualLink
vnf_virtual_link = \
instantiated_vnf_info.vnf_virtual_link_resource_info
for vnf_vl_info in vnf_virtual_link:
data = {}
data['id'] = vnf_vl_info.id
data['vnfVirtualLinkDescId'] = \
vnf_vl_info.vnf_virtual_link_desc_id
data['changeType'] = change_type
data['networkResource'] = \
vnf_vl_info.network_resource.to_dict()
data['metadata'] = {}
affected_resources['affectedVirtualLinks'].append(data)
# build affectedVirtualStorages
virtual_storage = \
instantiated_vnf_info.virtual_storage_resource_info
for vnf_storage_info in virtual_storage:
data = {}
data['id'] = vnf_storage_info.id
data['virtualStorageDescId'] = \
vnf_storage_info.virtual_storage_desc_id
data['changeType'] = change_type
data['storageResource'] = \
vnf_storage_info.storage_resource.to_dict()
data['metadata'] = {}
affected_resources['affectedVirtualStorages'].append(data)
return utils.convert_snakecase_to_camelcase(affected_resources)
def _get_affected_resources(old_vnf_instance=None,
new_vnf_instance=None, extra_list=None):
'''get_affected_resources
returns affected resources in new_vnf_instance not present
in old_vnf_instance.
if extra_list (list of physical resource ids) is present,
included to affected resources
'''
def _get_affected_cpids(affected_vnfc, vnf_instance):
affected_cpids = []
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
for vnfc_resource in instantiated_vnf_info.vnfc_resource_info:
if vnfc_resource.id == affected_vnfc['id']:
for vnfc_cp in vnfc_resource.vnfc_cp_info:
if vnfc_cp.cpd_id:
affected_cpids.append(vnfc_cp.cpd_id)
if vnfc_cp.vnf_ext_cp_id:
affected_cpids.append(vnfc_cp.vnf_ext_cp_id)
return affected_cpids
def _get_added_storageids(affected_vnfc, vnf_instance):
affected_storage_ids = []
instantiated_vnf_info = vnf_instance.instantiated_vnf_info
for vnfc_resource in instantiated_vnf_info.vnfc_resource_info:
if vnfc_resource.id == affected_vnfc['id']:
for storage_resource_id in vnfc_resource.storage_resource_ids:
virtual_storage = \
instantiated_vnf_info.virtual_storage_resource_info
for virt_storage_res_info in virtual_storage:
if virt_storage_res_info.id == storage_resource_id:
affected_storage_ids.append(
virt_storage_res_info.virtual_storage_desc_id)
return affected_storage_ids
def diff_list(old_list, new_list):
diff = []
for item in new_list:
if item not in old_list:
diff.append(item)
return diff
affected_resources = {}
affected_resources['affectedVnfcs'] = []
affected_resources['affectedVirtualLinks'] = []
affected_resources['affectedVirtualStorages'] = []
if not old_vnf_instance:
affected_resources = _build_affected_resources(
new_vnf_instance, fields.ResourceChangeType.ADDED)
# add affected cpids and add added storageids
for affected_vnfc in affected_resources['affectedVnfcs']:
affected_vnfc['affectedVnfcCpIds'] = _get_affected_cpids(
affected_vnfc, new_vnf_instance)
affected_vnfc['addedStorageResourceIds'] = _get_added_storageids(
affected_vnfc, new_vnf_instance)
elif not new_vnf_instance:
affected_resources = _build_affected_resources(old_vnf_instance,
fields.ResourceChangeType.REMOVED)
# add affected cpids and add remove storageids
for affected_vnfc in affected_resources['affectedVnfcs']:
affected_vnfc['affectedVnfcCpIds'] = _get_affected_cpids(
affected_vnfc, old_vnf_instance)
affected_vnfc['removedStorageResourceIds'] = _get_added_storageids(
affected_vnfc, old_vnf_instance)
elif old_vnf_instance and new_vnf_instance:
old_affected_resources = _build_affected_resources(old_vnf_instance)
new_affected_resources = _build_affected_resources(new_vnf_instance,
fields.ResourceChangeType.MODIFIED)
# get resource_ids
old_vnfc_resource_ids = []
for vnfc_resource in old_affected_resources.get('affectedVnfcs', []):
old_vnfc_resource_ids.append(
vnfc_resource['computeResource']['resourceId'])
# remove extra_list items in old_vnfc_resource_ids
# so that this items will be considered new
if extra_list:
for item in extra_list:
if item in old_vnfc_resource_ids:
index = old_vnfc_resource_ids.index(item)
old_vnfc_resource_ids.pop(index)
new_vnfc_resource_ids = []
for vnfc_resource in new_affected_resources.get('affectedVnfcs', []):
resource_id = vnfc_resource['computeResource']['resourceId']
new_vnfc_resource_ids.append(resource_id)
old_vnf_vl_resource_ids = []
for vnf_vl_info in old_affected_resources.get(
'affectedVirtualLinks', []):
resource_id = vnf_vl_info['networkResource']['resourceId']
old_vnf_vl_resource_ids.append(resource_id)
new_vnf_vl_resource_ids = []
for vnf_vl_info in new_affected_resources.get(
'affectedVirtualLinks', []):
resource_id = vnf_vl_info['networkResource']['resourceId']
new_vnf_vl_resource_ids.append(resource_id)
old_vnf_storage_resource_ids = []
for vnf_storage_info in old_affected_resources.get(
'affectedVirtualStorages', []):
resource_id = vnf_storage_info['storageResource']['resourceId']
old_vnf_storage_resource_ids.append(resource_id)
new_vnf_storage_resource_ids = []
for vnf_storage_info in new_affected_resources.get(
'affectedVirtualStorages', []):
resource_id = vnf_storage_info['storageResource']['resourceId']
new_vnf_storage_resource_ids.append(resource_id)
# get difference between resource_ids
vnfc_resource_ids = diff_list(old_vnfc_resource_ids,
new_vnfc_resource_ids)
vnf_vl_resource_ids = diff_list(old_vnf_vl_resource_ids,
new_vnf_vl_resource_ids)
vnf_storage_resource_ids = diff_list(old_vnf_storage_resource_ids,
new_vnf_storage_resource_ids)
# return new affected resources
for affected_vls in new_affected_resources.get(
'affectedVirtualLinks', []):
if (affected_vls['networkResource']
['resourceId'] in vnf_vl_resource_ids):
affected_resources['affectedVirtualLinks'].append(affected_vls)
for affected_storage in new_affected_resources.get(
'affectedVirtualStorages', []):
if (affected_storage['storageResource']
['resourceId'] in vnf_storage_resource_ids):
affected_resources['affectedVirtualStorages'].append(
affected_storage)
for affected_vnfc in new_affected_resources.get('affectedVnfcs', []):
if (affected_vnfc['computeResource']
['resourceId'] in vnfc_resource_ids):
# update affected affectedVnfcCpIds
affected_vnfc['affectedVnfcCpIds'] = _get_affected_cpids(
affected_vnfc, new_vnf_instance)
affected_resources['affectedVnfcs'].append(affected_vnfc)
return affected_resources
def _get_vnf_package_id(context, vnfd_id):
vnf_package = objects.VnfPackageVnfd.get_by_id(context, vnfd_id)
return vnf_package.package_uuid
def _create_grant_request(vnfd_dict, package_uuid):
node_templates = vnfd_dict.get('topology_template',
{}).get('node_templates', {})
vnf_software_images = {}
if not node_templates:
return vnf_software_images
def _build_vnf_software_image(sw_image_data, artifact_image_path):
vnf_sw_image = objects.VnfSoftwareImage()
vnf_sw_image.image_path = artifact_image_path
vnf_sw_image.name = sw_image_data.get('name')
vnf_sw_image.version = sw_image_data.get('version')
if sw_image_data.get('checksum'):
checksum = sw_image_data.get('checksum')
if checksum.get('algorithm'):
vnf_sw_image.algorithm = checksum.get('algorithm')
if checksum.get('hash'):
vnf_sw_image.hash = checksum.get('hash')
vnf_sw_image.container_format = sw_image_data.get(
'container_format')
vnf_sw_image.disk_format = sw_image_data.get('disk_format')
if sw_image_data.get('min_disk'):
min_disk = utils.MemoryUnit.convert_unit_size_to_num(
sw_image_data.get('min_disk'), 'GB')
vnf_sw_image.min_disk = min_disk
else:
vnf_sw_image.min_disk = 0
if sw_image_data.get('min_ram'):
min_ram = utils.MemoryUnit.convert_unit_size_to_num(
sw_image_data.get('min_ram'), 'MB')
vnf_sw_image.min_ram = min_ram
else:
vnf_sw_image.min_ram = 0
return vnf_sw_image
def _get_image_path(artifact_image_path, package_uuid):
vnf_package_path = CONF.vnf_package.vnf_package_csar_path
artifact_image_path = os.path.join(
vnf_package_path, package_uuid,
artifact_image_path.split('../')[-1])
return artifact_image_path
for node, value in node_templates.items():
if not value.get(
'type') in ['tosca.nodes.nfv.Vdu.Compute',
'tosca.nodes.nfv.Vdu.VirtualBlockStorage']:
continue
sw_image_data = value.get('properties', {}).get('sw_image_data')
artifacts = value.get('artifacts', {})
for artifact, sw_image in artifacts.items():
artifact_image_path = None
if isinstance(sw_image, str):
artifact_image_path = sw_image
elif sw_image.get('type') == 'tosca.artifacts.nfv.SwImage':
artifact_image_path = sw_image.get('file', {})
if sw_image_data and artifact_image_path:
is_url = utils.is_url(artifact_image_path)
if not is_url:
artifact_image_path = _get_image_path(artifact_image_path,
package_uuid)
vnf_software_image = _build_vnf_software_image(
sw_image_data, artifact_image_path)
vnf_software_images[node] = vnf_software_image
break
return vnf_software_images
def _make_final_vnf_dict(vnfd_dict, id, name, param_values, vnf_dict=None):
if vnf_dict:
final_vnf_dict = vnf_dict
final_vnf_dict['vnfd']['attributes'].\
update({'vnfd': str(vnfd_dict)})
final_vnf_dict['attributes'].\
update({'param_values': str(param_values)})
final_vnf_dict['attributes'].\
update({'stack_name': name or ("vnflcm_" + id)})
return final_vnf_dict
else:
return {'vnfd': {
'attributes': {
'vnfd': str(vnfd_dict)}},
'id': id,
'name': name,
'attributes': {
'param_values': str(param_values),
'stack_name': name or ("vnflcm_" + id)}}
def _get_flavour_based_vnfd(csar_path, flavour_id):
ext = (".yaml", ".yml")
file_path_and_data = {}
imp_list = []
for item in os.listdir(csar_path):
src_path = os.path.join(csar_path, item)
if os.path.isdir(src_path):
for file in os.listdir(src_path):
if file.endswith(ext):
source_file_path = os.path.join(src_path, file)
with open(source_file_path) as file_obj:
data = yaml.safe_load(file_obj)
substitution_map = data.get(
'topology_template',
{}).get('substitution_mappings', {})
if substitution_map.get(
'properties', {}).get('flavour_id') == flavour_id:
if data.get('imports'):
for imp in data.get('imports'):
imp_path = os.path.join(src_path, imp)
imp_list.append(imp_path)
data.update({'imports': imp_list})
return data
elif src_path.endswith(ext):
with open(src_path) as file_obj:
file_data = yaml.safe_load(file_obj)
substitution_map = file_data.get(
'topology_template', {}).get('substitution_mappings', {})
if substitution_map.get(
'properties', {}).get('flavour_id') == flavour_id:
if file_data.get('imports'):
for imp in file_data.get('imports'):
imp_list.append(os.path.join(src_path, imp))
file_data.update({'imports': imp_list})
return file_data
return file_path_and_data
def _get_param_data(vnfd_dict, instantiate_vnf_req):
param_value = {}
additional_param = instantiate_vnf_req.additional_params
if additional_param is None:
additional_param = {}
substitution_map = vnfd_dict.get('topology_template',
{}).get('substitution_mappings', {})
input_attributes = vnfd_dict.get('topology_template', {}).get('inputs')
if substitution_map is not None:
subs_map_node_type = substitution_map.get('node_type')
# Get properties in lower-level VNFD for top-level VNFD
node_templates = vnfd_dict.get('topology_template',
{}).get('node_templates', {})
for node in node_templates.values():
if node.get('type') == subs_map_node_type:
node_property = node.get('properties', {})
if node_property:
param_value.update(node_property)
# Import `_type.yaml` file and get default properties.
# If new value provided in additional_param, the property is updated.
import_paths = vnfd_dict.get('imports', {})
for imp_path in import_paths:
with open(imp_path) as file_obj:
import_data = yaml.safe_load(file_obj)
imp_node_type = import_data.get('node_types')
if imp_node_type:
for key, value in imp_node_type.items():
if key == subs_map_node_type:
properties = value.get('properties')
if properties:
for key, prop in properties.items():
if additional_param.get(key):
param_value.update({
key: additional_param.get(key)})
# If the parameter is provided in lower-level
# VNFD, use it. Otherwise use the default.
elif not param_value.get(key):
param_value.update(
{key: prop.get('default')})
for input_attr, value in input_attributes.items():
if additional_param.get(input_attr):
param_value.update({input_attr: additional_param.get(
input_attr)})
return param_value
def _get_vim_connection_info_from_vnf_req(vnf_instance, instantiate_vnf_req):
vim_connection_obj_list = []
if not instantiate_vnf_req.vim_connection_info:
# add default vim
if len(vnf_instance.vim_connection_info):
vim_connection_obj_list.append(vnf_instance.vim_connection_info[0])
return vim_connection_obj_list
for vim_connection in instantiate_vnf_req.vim_connection_info:
vim_conn = objects.VimConnectionInfo(id=vim_connection.id,
vim_id=vim_connection.vim_id, vim_type=vim_connection.vim_type,
access_info=vim_connection.access_info,
interface_info=vim_connection.interface_info)
vim_connection_obj_list.append(vim_conn)
# add default vim
if len(vnf_instance.vim_connection_info):
if vim_conn.id and vnf_instance.vim_connection_info[0].id:
is_default_vim_exist = [vim_conn for vim_conn
in vim_connection_obj_list
if vim_conn.id == vnf_instance.vim_connection_info[0].id]
if not len(is_default_vim_exist):
vim_connection_obj_list.append(vnf_instance.
vim_connection_info[0])
return vim_connection_obj_list
def _build_instantiated_vnf_info(vnfd_dict, instantiate_vnf_req,
vnf_instance, vim_id):
inst_vnf_info = vnf_instance.instantiated_vnf_info
inst_vnf_info.vnf_state = fields.VnfOperationalStateType.STARTED
node_templates = vnfd_dict.get(
'topology_template', {}).get('node_templates')
vnfc_resource_info, virtual_storage_resource_info = \
_get_vnfc_resource_info(vnfd_dict, instantiate_vnf_req, vim_id)
inst_vnf_info.vnfc_resource_info = vnfc_resource_info
tmp_insta_vnf_info = copy.deepcopy(inst_vnf_info)
inst_vnf_info.ext_cp_info = _set_ext_cp_info(instantiate_vnf_req,
inst_vnf_info=tmp_insta_vnf_info)
inst_vnf_info.ext_virtual_link_info = _set_ext_virtual_link_info(
instantiate_vnf_req, inst_vnf_info.ext_cp_info)
inst_vnf_info.virtual_storage_resource_info = \
virtual_storage_resource_info
inst_vnf_info.vnf_virtual_link_resource_info = \
_build_vnf_virtual_link_resource_info(
node_templates, instantiate_vnf_req,
inst_vnf_info.vnfc_resource_info, vim_id)
inst_vnf_info.ext_managed_virtual_link_info = \
_build_ext_managed_virtual_link_info(instantiate_vnf_req,
inst_vnf_info)
inst_vnf_info.additional_params = instantiate_vnf_req.additional_params
vnf_instance.instantiated_vnf_info = inst_vnf_info
def _update_instantiated_vnf_info(change_ext_conn_req, vnf_instance):
inst_vnf_info = vnf_instance.instantiated_vnf_info
tmp_insta_vnf_info = copy.deepcopy(inst_vnf_info)
inst_vnf_info.ext_cp_info = _update_ext_cp_info(change_ext_conn_req,
inst_vnf_info=tmp_insta_vnf_info)
inst_vnf_info.ext_virtual_link_info = _update_ext_virtual_link_info(
change_ext_conn_req, inst_vnf_info=tmp_insta_vnf_info)
inst_vnf_info.vnf_virtual_link_resource_info = \
_update_vnf_virtual_link_resource_info(change_ext_conn_req,
inst_vnf_info)
vnf_instance.instantiated_vnf_info = inst_vnf_info
def _get_compute_nodes(vnfd_dict, instantiate_vnf_req):
"""Read the node templates and prepare VDU data in below format
{
'VDU1': {
'CP': [CP1, CP2],
'VIRTUAL_STORAGE': [virtual_storage1]
},
}
"""
node_templates = vnfd_dict.get(
'topology_template', {}).get('node_templates')
vdu_resources = {}
for key, value in node_templates.items():
if value.get('type') != 'tosca.nodes.nfv.Vdu.Compute':
continue
desired_capacity = _convert_desired_capacity(
instantiate_vnf_req.instantiation_level_id, vnfd_dict, key)
cp_list = _get_cp_for_vdu(key, node_templates)
virtual_storages = []
requirements = value.get('requirements', [])
for requirement in requirements:
if requirement.get('virtual_storage'):
virtual_storages.append(
requirement.get('virtual_storage'))
vdu_resources[key] = {"CP": cp_list,
"VIRTUAL_STORAGE": virtual_storages,
"COUNT": desired_capacity}
return vdu_resources
def _get_virtual_link_nodes(node_templates):
virtual_link_nodes = {}
for key, value in node_templates.items():
if value.get('type') == 'tosca.nodes.nfv.VnfVirtualLink':
cp_list = _get_cp_for_vl(key, node_templates)
virtual_link_nodes[key] = cp_list
return virtual_link_nodes
def _get_cp_for_vdu(vdu, node_templates):
cp_list = []
for key, value in node_templates.items():
if value.get('type') != 'tosca.nodes.nfv.VduCp':
continue
requirements = value.get('requirements', [])
for requirement in requirements:
if requirement.get('virtual_binding') and vdu == \
requirement.get('virtual_binding'):
cp_list.append(key)
return cp_list
def _get_cp_for_vl(vl, node_templates):
cp_list = []
for key, value in node_templates.items():
if value.get('type') != 'tosca.nodes.nfv.VduCp':
continue
requirements = value.get('requirements', [])
for requirement in requirements:
if requirement.get('virtual_link') and vl == \
requirement.get('virtual_link'):
cp_list.append(key)
return cp_list
def _build_vnf_virtual_link_resource_info(node_templates, instantiate_vnf_req,
vnfc_resource_info, vim_id):
virtual_link_nodes_with_cp = _get_virtual_link_nodes(node_templates)
# Read the external networks and extcps from InstantiateVnfRequest
for ext_virt_link in instantiate_vnf_req.ext_virtual_links:
virtual_link_nodes_with_cp[ext_virt_link.id] = [extcp.cpd_id for extcp
in ext_virt_link.ext_cps]
virtual_link_resource_info_list = []
def _get_network_resource(vl_node):
resource_handle = objects.ResourceHandle()
found = False
for ext_mg_vl in instantiate_vnf_req.ext_managed_virtual_links:
if ext_mg_vl.vnf_virtual_link_desc_id == vl_node:
resource_handle.resource_id = ext_mg_vl.resource_id
resource_handle.vim_connection_id = \
ext_mg_vl.vim_connection_id
# TODO(tpatil): This cannot be set here.
resource_handle.vim_level_resource_type = \
'OS::Neutron::Net'
found = True
break
if not found:
# check if it exists in the ext_virtual_links
for ext_virt_link in instantiate_vnf_req.ext_virtual_links:
if ext_virt_link.id == vl_node:
resource_handle.resource_id = ext_virt_link.resource_id
resource_handle.vim_connection_id = \
ext_virt_link.vim_connection_id
# TODO(tpatil): This cannot be set here.
resource_handle.vim_level_resource_type = \
'OS::Neutron::Net'
found = True
break
return resource_handle
def _get_vnf_link_port_info(cp):
vnf_link_port_info = objects.VnfLinkPortInfo()
vnf_link_port_info.id = uuidutils.generate_uuid()
resource_handle = objects.ResourceHandle()
for ext_virt_link in instantiate_vnf_req.ext_virtual_links:
for extcp in ext_virt_link.ext_cps:
if extcp.cpd_id == cp:
for cpconfig in extcp.cp_config:
if cpconfig.link_port_id:
resource_handle.resource_id = \
cpconfig.link_port_id
resource_handle.vim_connection_id = \
ext_virt_link.vim_connection_id
# TODO(tpatil): This shouldn't be set here.
resource_handle.vim_level_resource_type = \
'OS::Neutron::Port'
break
vnf_link_port_info.resource_handle = resource_handle
return vnf_link_port_info
for node, cp_list in virtual_link_nodes_with_cp.items():
vnf_vl_resource_info = objects.VnfVirtualLinkResourceInfo()
vnf_vl_resource_info.id = uuidutils.generate_uuid()
vnf_vl_resource_info.vnf_virtual_link_desc_id = node
vnf_vl_resource_info.network_resource = _get_network_resource(node)
vnf_link_port_info_list = []
for cp in cp_list:
for vnfc_resource in vnfc_resource_info:
for vnfc_cp in vnfc_resource.vnfc_cp_info:
if vnfc_cp.cpd_id == cp:
vnf_link_port_info = _get_vnf_link_port_info(cp)
vnf_link_port_info.cp_instance_id = vnfc_cp.id
# Identifier of the "vnfLinkPorts" structure in the
# "vnfVirtualLinkResourceInfo" structure.
vnfc_cp.vnf_link_port_id = vnf_link_port_info.id
vnf_link_port_info_list.append(vnf_link_port_info)
vnf_vl_resource_info.vnf_link_ports = vnf_link_port_info_list
virtual_link_resource_info_list.append(vnf_vl_resource_info)
return virtual_link_resource_info_list
def _update_vnf_virtual_link_resource_info(change_ext_conn_req,
inst_vnf_info):
def _update(change_ext_conn_req, vnf_vl_resource_info):
for ext_virtual_link in change_ext_conn_req.ext_virtual_links:
if (ext_virtual_link.id ==
vnf_vl_resource_info.vnf_virtual_link_desc_id):
res_handle = objects.ResourceHandle()
res_handle.resource_id = ext_virtual_link.resource_id
nw_res = vnf_vl_resource_info.network_resource
res_handle.vim_connection_id = nw_res.vim_connection_id
res_handle.vim_level_resource_type = \
nw_res.vim_level_resource_type
new_vnf_vl_resource_info = \
objects.VnfVirtualLinkResourceInfo(
id=vnf_vl_resource_info.id,
vnf_virtual_link_desc_id=vnf_vl_resource_info.
vnf_virtual_link_desc_id,
network_resource=res_handle,
vnf_link_ports=vnf_vl_resource_info.vnf_link_ports)
return new_vnf_vl_resource_info
return None
vnf_virtual_link_resource_list = []
for vnf_vl_res_info in inst_vnf_info.vnf_virtual_link_resource_info:
updated_vnf_vl_res_info = \
_update(change_ext_conn_req, vnf_vl_res_info)
if updated_vnf_vl_res_info:
vnf_virtual_link_resource_list.append(updated_vnf_vl_res_info)
else:
vnf_virtual_link_resource_list.append(vnf_vl_res_info)
return vnf_virtual_link_resource_list
def _build_vnf_cp_info(instantiate_vnf_req, cp_list):
vnfc_cp_info_list = []
if not cp_list:
return vnfc_cp_info_list
def _set_vnf_exp_cp_id_protocol_data(vnfc_cp_info):
for ext_virt_link in instantiate_vnf_req.ext_virtual_links:
for extcp in ext_virt_link.ext_cps:
if extcp.cpd_id == cp:
vnfc_cp_info.cp_protocol_info = \
_set_cp_protocol_info(extcp)
for cpconfig in extcp.cp_config:
vnfc_cp_info.vnf_ext_cp_id = cpconfig.link_port_id
break
for cp in cp_list:
vnfc_cp_info = objects.VnfcCpInfo()
vnfc_cp_info.id = uuidutils.generate_uuid()
vnfc_cp_info.cpd_id = cp
_set_vnf_exp_cp_id_protocol_data(vnfc_cp_info)
vnfc_cp_info_list.append(vnfc_cp_info)
return vnfc_cp_info_list
def _build_virtual_storage_info(virtual_storages):
for storage_node in virtual_storages:
virtual_storage = objects.VirtualStorageResourceInfo()
virtual_storage.id = uuidutils.generate_uuid()
virtual_storage.virtual_storage_desc_id = storage_node
virtual_storage.storage_resource = objects.ResourceHandle()
yield virtual_storage
def _get_vnfc_resource_info(vnfd_dict, instantiate_vnf_req, vim_id):
vdu_resources = _get_compute_nodes(vnfd_dict, instantiate_vnf_req)
vnfc_resource_info_list = []
virtual_storage_resource_info_list = []
def _build_vnfc_resource_info(vdu, vdu_resource):
vnfc_resource_info = objects.VnfcResourceInfo()
vnfc_resource_info.id = uuidutils.generate_uuid()
vnfc_resource_info.vdu_id = vdu
vnfc_resource_info.compute_resource = objects.ResourceHandle()
vnfc_cp_info_list = _build_vnf_cp_info(instantiate_vnf_req,
vdu_resource.get("CP"))
vnfc_resource_info.vnfc_cp_info = vnfc_cp_info_list
virtual_storages = vdu_resource.get("VIRTUAL_STORAGE")
vdu_storages = []
for storage in _build_virtual_storage_info(virtual_storages):
vdu_storages.append(storage)
virtual_storage_resource_info_list.append(storage)
storage_resource_ids = [info.id for info in vdu_storages]
vnfc_resource_info.storage_resource_ids = storage_resource_ids
return vnfc_resource_info
for vdu, vdu_resource in vdu_resources.items():
count = vdu_resource.get('COUNT', 1)
for num_instance in range(count):
vnfc_resource_info = _build_vnfc_resource_info(vdu, vdu_resource)
vnfc_resource_info_list.append(vnfc_resource_info)
return vnfc_resource_info_list, virtual_storage_resource_info_list
def _set_ext_cp_info(instantiate_vnf_req, inst_vnf_info=None):
ext_cp_info_list = []
vnfc_info = []
if inst_vnf_info.vnfc_resource_info:
vnfc_info = inst_vnf_info.vnfc_resource_info
if not instantiate_vnf_req.ext_virtual_links:
return ext_cp_info_list
for ext_virt_link in instantiate_vnf_req.ext_virtual_links:
if not ext_virt_link.ext_cps:
continue
for ext_cp in ext_virt_link.ext_cps:
ext_cp_info = objects.VnfExtCpInfo(
id=uuidutils.generate_uuid(),
cpd_id=ext_cp.cpd_id,
cp_protocol_info=_set_cp_protocol_info(ext_cp),
ext_link_port_id=_get_ext_link_port_id(ext_virt_link,
ext_cp.cpd_id),
associated_vnfc_cp_id=_get_associated_vnfc_cp_id(vnfc_info,
ext_cp.cpd_id))
ext_cp_info_list.append(ext_cp_info)
return ext_cp_info_list
def _update_ext_cp_info(change_ext_conn_req, inst_vnf_info):
def _update(change_ext_conn_req, ext_cp_info):
for ext_virt_link in change_ext_conn_req.ext_virtual_links:
if not ext_virt_link.ext_cps:
continue
for ext_cp in ext_virt_link.ext_cps:
if ext_cp.cpd_id == ext_cp_info.cpd_id:
new_ext_cp_info = objects.VnfExtCpInfo(
id=ext_cp_info.id,
cpd_id=ext_cp.cpd_id,
cp_protocol_info=_set_cp_protocol_info(ext_cp),
associated_vnfc_cp_id=ext_cp_info.
associated_vnfc_cp_id)
return new_ext_cp_info
return None
ext_cp_info_list = []
for ext_cp_info in inst_vnf_info.ext_cp_info:
updated_ext_cp_info = _update(change_ext_conn_req, ext_cp_info)
if updated_ext_cp_info:
ext_cp_info_list.append(updated_ext_cp_info)
else:
ext_cp_info_list.append(ext_cp_info)
return ext_cp_info_list
def _get_ext_link_port_id(ext_virtual_link, cpd_id):
if not ext_virtual_link.ext_link_ports:
return
for ext_link in ext_virtual_link.ext_link_ports:
if ext_link.id == cpd_id:
return ext_link.id
def _get_associated_vnfc_cp_id(vnfc_info, cpd_id):
if not isinstance(vnfc_info, list):
return
for vnfc in vnfc_info:
if vnfc.vnfc_cp_info:
for cp_info in vnfc.vnfc_cp_info:
if cp_info.cpd_id == cpd_id:
return vnfc.id
def _build_ip_over_ethernet_address_info(cp_protocol_data):
"""Convert IpOverEthernetAddressData to IpOverEthernetAddressInfo"""
if not cp_protocol_data.ip_over_ethernet:
return
ip_over_ethernet_add_info = objects.IpOverEthernetAddressInfo()
ip_over_ethernet_add_info.mac_address = \
cp_protocol_data.ip_over_ethernet.mac_address
if not cp_protocol_data.ip_over_ethernet.ip_addresses:
return ip_over_ethernet_add_info
ip_address_list = []
for ip_address in cp_protocol_data.ip_over_ethernet.ip_addresses:
ip_address_info = objects.vnf_instantiated_info.IpAddress(
type=ip_address.type,
addresses=ip_address.fixed_addresses,
is_dynamic=True if ip_address.num_dynamic_addresses else False,
subnet_id=ip_address.subnet_id)
ip_address_list.append(ip_address_info)
ip_over_ethernet_add_info.ip_addresses = ip_address_list
return ip_over_ethernet_add_info
def _build_cp_protocol_info(cp_protocol_data):
ip_over_ethernet_add_info = _build_ip_over_ethernet_address_info(
cp_protocol_data)
cp_protocol_info = objects.CpProtocolInfo(
layer_protocol=cp_protocol_data.layer_protocol,
ip_over_ethernet=ip_over_ethernet_add_info)
return cp_protocol_info
def _set_cp_protocol_info(ext_cp):
"""Convert CpProtocolData to CpProtocolInfo"""
cp_protocol_info_list = []
if not ext_cp.cp_config:
return cp_protocol_info_list
for cp_config in ext_cp.cp_config:
for cp_protocol_data in cp_config.cp_protocol_data:
cp_protocol_info = _build_cp_protocol_info(cp_protocol_data)
cp_protocol_info_list.append(cp_protocol_info)
return cp_protocol_info_list
def _set_ext_virtual_link_info(instantiate_vnf_req, ext_cp_info):
ext_virtual_link_list = []
if not instantiate_vnf_req.ext_virtual_links:
return ext_virtual_link_list
for ext_virtual_link in instantiate_vnf_req.ext_virtual_links:
res_handle = objects.ResourceHandle()
res_handle.resource_id = ext_virtual_link.resource_id
res_handle.vim_connection_id = ext_virtual_link.vim_connection_id
ext_virtual_link_info = objects.ExtVirtualLinkInfo(
id=ext_virtual_link.id,
resource_handle=res_handle,
ext_link_ports=_set_ext_link_port(ext_virtual_link,
ext_cp_info))
ext_virtual_link_list.append(ext_virtual_link_info)
return ext_virtual_link_list
def _update_ext_virtual_link_info(change_ext_conn_req, inst_vnf_info):
def _update(change_ext_conn_req, ext_virtual_link_info):
for ext_virtual_link in change_ext_conn_req.ext_virtual_links:
if ext_virtual_link.id == ext_virtual_link_info.id:
res_handle = objects.ResourceHandle()
res_handle.resource_id = ext_virtual_link.resource_id
new_ext_virtual_link_info = objects.ExtVirtualLinkInfo(
id=ext_virtual_link_info.id,
resource_handle=res_handle,
ext_link_ports=ext_virtual_link_info.ext_link_ports)
res_handle.vim_connection_id = \
ext_virtual_link_info.resource_handle.vim_connection_id
new_ext_virtual_link_info = objects.ExtVirtualLinkInfo(
id=ext_virtual_link_info.id,
resource_handle=res_handle,
ext_link_ports=ext_virtual_link_info.ext_link_ports)
return new_ext_virtual_link_info
return None
ext_virtual_link_list = []
for ext_virtual_link_info in inst_vnf_info.ext_virtual_link_info:
updated_ext_virtual_link_info = \
_update(change_ext_conn_req, ext_virtual_link_info)
if updated_ext_virtual_link_info:
ext_virtual_link_list.append(updated_ext_virtual_link_info)
else:
ext_virtual_link_list.append(ext_virtual_link_info)
return ext_virtual_link_list
def _set_ext_link_port(ext_virtual_links, ext_cp_info):
ext_link_port_list = []
if not ext_virtual_links.ext_link_ports:
return ext_link_port_list
for ext_link_port in ext_virtual_links.ext_link_ports:
resource_handle = ext_link_port.resource_handle.obj_clone()
cp_instance_id = None
if ext_virtual_links.ext_cps:
for ext_cp in ext_cp_info:
cp_instance_id = ext_cp.id
ext_link_port_info = objects.ExtLinkPortInfo(id=ext_link_port.id,
resource_handle=resource_handle, cp_instance_id=cp_instance_id)
ext_link_port_list.append(ext_link_port_info)
return ext_link_port_list
def _build_ext_managed_virtual_link_info(instantiate_vnf_req, inst_vnf_info):
def _network_resource(ext_managed_vl):
resource_handle = objects.ResourceHandle(
resource_id=ext_managed_vl.resource_id)
# TODO(tpatil): Remove hard coding of resource type as
# OS::Neutron::Net resource type is specific to OpenStack infra
# driver. It could be different for other infra drivers like
# Kubernetes.
resource_handle.vim_level_resource_type = 'OS::Neutron::Net'
resource_handle.vim_connection_id = \
ext_managed_vl.vim_connection_id
return resource_handle
ext_managed_virtual_link_info = []
ext_managed_virt_link_from_req = \
instantiate_vnf_req.ext_managed_virtual_links
for ext_managed_vl in ext_managed_virt_link_from_req:
ext_managed_virt_info = objects.ExtManagedVirtualLinkInfo()
ext_managed_virt_info.id = ext_managed_vl.id
ext_managed_virt_info.vnf_virtual_link_desc_id =\
ext_managed_vl.vnf_virtual_link_desc_id
ext_managed_virt_info.network_resource =\
_network_resource(ext_managed_vl)
# Populate the vnf_link_ports from vnf_virtual_link_resource_info
# of instantiated_vnf_info.
for vnf_vl_res_info in inst_vnf_info.vnf_virtual_link_resource_info:
if ext_managed_vl.vnf_virtual_link_desc_id ==\
vnf_vl_res_info.vnf_virtual_link_desc_id:
vnf_link_ports = []
for vnf_lp in vnf_vl_res_info.vnf_link_ports:
vnf_link_ports.append(vnf_lp.obj_clone())
ext_managed_virt_info.vnf_link_ports = vnf_link_ports
ext_managed_virtual_link_info.append(ext_managed_virt_info)
return ext_managed_virtual_link_info
def _convert_desired_capacity(inst_level_id, vnfd_dict, vdu):
aspect_delta_dict = {}
aspect_vdu_dict = {}
inst_level_dict = {}
aspect_id_dict = {}
vdu_delta_dict = {}
aspect_max_level_dict = {}
desired_capacity = 1
tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=vnfd_dict)
tosca_policies = tosca.topology_template.policies
default_inst_level_id = toscautils._extract_policy_info(
tosca_policies, inst_level_dict,
aspect_delta_dict, aspect_id_dict,
aspect_vdu_dict, vdu_delta_dict,
aspect_max_level_dict)
if vdu_delta_dict.get(vdu) is None:
return desired_capacity
if inst_level_id:
instantiation_level = inst_level_id
elif default_inst_level_id:
instantiation_level = default_inst_level_id
else:
return desired_capacity
al_dict = inst_level_dict.get(instantiation_level)
if not al_dict:
return desired_capacity
for aspect_id, level_num in al_dict.items():
delta_id = aspect_id_dict.get(aspect_id)
if delta_id is not None:
delta_num = \
aspect_delta_dict.get(aspect_id).get(delta_id)
vdus = aspect_vdu_dict.get(aspect_id)
initial_delta = None
if vdu in vdus:
initial_delta = vdu_delta_dict.get(vdu)
if initial_delta is not None:
desired_capacity = initial_delta + delta_num * level_num
return desired_capacity
def _get_vnf_package_path(context, vnfd_id):
return os.path.join(CONF.vnf_package.vnf_package_csar_path,
_get_vnf_package_id(context, vnfd_id))
def get_base_nest_hot_dict(context, flavour_id, vnfd_id):
base_hot_path = os.path.join(_get_vnf_package_path(context, vnfd_id),
'BaseHOT', flavour_id)
nested_hot_path = os.path.join(base_hot_path, 'nested')
ext = (".yaml", ".yml")
base_hot_dict = None
nested_hot_dict = {}
if os.path.exists(base_hot_path):
for file in os.listdir(base_hot_path):
if file.endswith(ext):
with open(os.path.join(base_hot_path, file)) as file_obj:
base_hot_dict = yaml.safe_load(file_obj)
if os.path.exists(nested_hot_path):
for file in os.listdir(nested_hot_path):
if file.endswith(ext):
with open(os.path.join(nested_hot_path, file)) as file_obj:
nested_hot = yaml.safe_load(file_obj)
nested_hot_dict[file] = nested_hot
LOG.debug("Loaded base hot: %s", base_hot_dict)
LOG.debug("Loaded nested_hot_dict: %s", nested_hot_dict)
return base_hot_dict, nested_hot_dict
def get_extract_policy_infos(tosca):
aspect_delta_dict = {}
aspect_vdu_dict = {}
inst_level_dict = {}
aspect_id_dict = {}
vdu_delta_dict = {}
aspect_max_level_dict = {}
tosca_policies = tosca.topology_template.policies
default_inst_level_id = toscautils._extract_policy_info(
tosca_policies, inst_level_dict,
aspect_delta_dict, aspect_id_dict,
aspect_vdu_dict, vdu_delta_dict,
aspect_max_level_dict)
extract_policy_infos = dict()
extract_policy_infos['inst_level_dict'] = inst_level_dict
extract_policy_infos['aspect_delta_dict'] = aspect_delta_dict
extract_policy_infos['aspect_id_dict'] = aspect_id_dict
extract_policy_infos['aspect_vdu_dict'] = aspect_vdu_dict
extract_policy_infos['vdu_delta_dict'] = vdu_delta_dict
extract_policy_infos['aspect_max_level_dict'] = aspect_max_level_dict
extract_policy_infos['default_inst_level_id'] = default_inst_level_id
return extract_policy_infos
def get_scale_delta_num(extract_policy_infos, aspect_id):
delta_num = 1
if extract_policy_infos['aspect_id_dict'] is None:
return delta_num
delta_id = extract_policy_infos['aspect_id_dict'].get(aspect_id)
if delta_id is None:
return delta_num
delta_num = \
extract_policy_infos['aspect_delta_dict'].get(aspect_id).get(delta_id)
return delta_num
def get_default_scale_status(context, vnf_instance, vnfd_dict):
default_scale_status = None
vnfd_dict = _get_vnfd_dict(context,
vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=vnfd_dict)
extract_policy_infos = get_extract_policy_infos(tosca)
if extract_policy_infos['inst_level_dict'] is None:
return default_scale_status
default_inst_level_id = extract_policy_infos['default_inst_level_id']
default_al_dict = \
extract_policy_infos['inst_level_dict'].get(default_inst_level_id)
if default_al_dict is None:
return default_scale_status
default_scale_status = []
for aspect_id, level_num in default_al_dict.items():
default_scale_status.append(
objects.ScaleInfo(
aspect_id=aspect_id,
scale_level=level_num))
return default_scale_status
def get_target_vdu_def_dict(extract_policy_infos, aspect_id, tosca):
vdu_def_dict = {}
tosca_node_tpls = tosca.topology_template.nodetemplates
if extract_policy_infos['aspect_vdu_dict'] is None:
return vdu_def_dict
vdus = extract_policy_infos['aspect_vdu_dict'].get(aspect_id)
if vdus is None:
return vdu_def_dict
for nt in tosca_node_tpls:
for node_name, node_value in nt.templates.items():
if node_name in vdus:
vdu_def_dict[node_name] = node_value
return vdu_def_dict
def _get_changed_ext_connectivity(
old_vnf_instance=None, new_vnf_instance=None):
changed_ext_connectivities = []
if not old_vnf_instance or not new_vnf_instance:
return changed_ext_connectivities
old_vnf_vl_res_info = \
old_vnf_instance.instantiated_vnf_info.\
vnf_virtual_link_resource_info
new_vnf_vl_res_info = \
new_vnf_instance.instantiated_vnf_info.\
vnf_virtual_link_resource_info
def _compare_vnf_link_ports(old_vnf_link_ports,
new_vnf_link_ports):
differed_vnf_link_ports = []
for old_vnf_link_port in old_vnf_link_ports:
for new_vnf_link_port in new_vnf_link_ports:
if old_vnf_link_port.id == new_vnf_link_port.id:
if (old_vnf_link_port.resource_handle.resource_id !=
new_vnf_link_port.resource_handle.resource_id):
differed_vnf_link_ports.append(new_vnf_link_port)
return differed_vnf_link_ports
for old_vl_res in old_vnf_vl_res_info:
for new_vl_res in new_vnf_vl_res_info:
if old_vl_res.id == new_vl_res.id:
changed_ext_connectivity = objects.ExtVirtualLinkInfo(
id=new_vl_res.id,
resource_handle=new_vl_res.network_resource,
ext_link_ports=[])
differed_vnf_link_ports = _compare_vnf_link_ports(
old_vl_res.vnf_link_ports,
new_vl_res.vnf_link_ports)
for link_port in differed_vnf_link_ports:
changed_ext_link_port = objects.ExtLinkPortInfo(
id=link_port.id,
resource_handle=link_port.resource_handle,
cp_instance_id=link_port.cp_instance_id)
changed_ext_connectivity.ext_link_ports.\
append(changed_ext_link_port)
if changed_ext_connectivity.ext_link_ports:
changed_ext_connectivities.append(
changed_ext_connectivity)
LOG.debug('changed_ext_connectivities: {}'.format(
changed_ext_connectivities))
return changed_ext_connectivities
def get_stack_param(context, vnf_dict, heal_vnf_request, inst_vnf_info):
stack_param = {}
vnfc_resources = []
# get vnfc resources
if not heal_vnf_request.vnfc_instance_id:
# include all vnfc resources
vnfc_resources = [
resource for resource in inst_vnf_info.vnfc_resource_info]
else:
for vnfc_resource in inst_vnf_info.vnfc_resource_info:
if vnfc_resource.id in heal_vnf_request.vnfc_instance_id:
vnfc_resources.append(vnfc_resource)
def _update_stack_params(
context, base_hot_dict, nested_hot_dict, vnfd_dict):
param_base_hot_dict = copy.deepcopy(nested_hot_dict)
param_base_hot_dict['heat_template'] = base_hot_dict
initial_param_dict = (
userdata_utils.create_initial_param_server_port_dict(
param_base_hot_dict)
)
del initial_param_dict['nfv']['CP']
vdu_flavor_dict = (
userdata_utils.create_vdu_flavor_capability_name_dict(vnfd_dict)
)
vdu_image_dict = userdata_utils.create_sw_image_dict(vnfd_dict)
final_param_dict = userdata_utils.create_final_param_dict(
initial_param_dict, vdu_flavor_dict, vdu_image_dict, {})
return final_param_dict['nfv']['VDU']
# get HOT dict
base_hot_dict, nested_hot_dict = get_base_nest_hot_dict(
context, inst_vnf_info.flavour_id, vnf_dict['vnfd_id'])
vnfd_dict = yaml.safe_load(
vnf_dict['vnfd']['attributes']['vnfd_' + inst_vnf_info.flavour_id])
if 'stack_param' in vnf_dict['attributes'].keys():
stack_param = yaml.safe_load(
vnf_dict['attributes']['stack_param'])
updated_vdu_params = _update_stack_params(
context, base_hot_dict, nested_hot_dict, vnfd_dict)
for vnfc_resource in vnfc_resources:
vdu_id = vnfc_resource.vdu_id
if (updated_vdu_params.get(vdu_id) and
stack_param['nfv']['VDU'].get(vdu_id)):
stack_param['nfv']['VDU'].update({
vdu_id: updated_vdu_params.get(vdu_id)
})
return stack_param
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for NetApp volume driver
"""
import BaseHTTPServer
import httplib
import StringIO
from lxml import etree
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import netapp
LOG = logging.getLogger("cinder.volume.driver")
WSDL_HEADER = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<definitions xmlns="http://schemas.xmlsoap.org/wsdl/"
xmlns:na="http://www.netapp.com/management/v1"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="NetAppDfm"
targetNamespace="http://www.netapp.com/management/v1">"""
WSDL_TYPES = """<types>
<xsd:schema attributeFormDefault="unqualified" elementFormDefault="qualified"
targetNamespace="http://www.netapp.com/management/v1">
<xsd:element name="ApiProxy">
<xsd:complexType>
<xsd:all>
<xsd:element name="Request" type="na:Request"/>
<xsd:element name="Target" type="xsd:string"/>
<xsd:element minOccurs="0" name="Timeout" type="xsd:integer"/>
<xsd:element minOccurs="0" name="Username" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="ApiProxyResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Response" type="na:Response"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditBegin">
<xsd:complexType>
<xsd:all>
<xsd:element name="DatasetNameOrId" type="na:ObjNameOrId"/>
<xsd:element minOccurs="0" name="Force" type="xsd:boolean"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditBeginResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="EditLockId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditCommit">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="AssumeConfirmation"
type="xsd:boolean"/>
<xsd:element name="EditLockId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditCommitResult">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="IsProvisioningFailure"
type="xsd:boolean"/>
<xsd:element minOccurs="0" name="JobIds" type="na:ArrayOfJobInfo"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditRollback">
<xsd:complexType>
<xsd:all>
<xsd:element name="EditLockId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetEditRollbackResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DatasetListInfoIterEnd">
<xsd:complexType>
<xsd:all>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetListInfoIterEndResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DatasetListInfoIterNext">
<xsd:complexType>
<xsd:all>
<xsd:element name="Maximum" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetListInfoIterNextResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Datasets" type="na:ArrayOfDatasetInfo"/>
<xsd:element name="Records" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetListInfoIterStart">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="ObjectNameOrId"
type="na:ObjNameOrId"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetListInfoIterStartResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Records" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterEnd">
<xsd:complexType>
<xsd:all>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterEndResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterNext">
<xsd:complexType>
<xsd:all>
<xsd:element name="Maximum" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterNextResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="DatasetMembers"
type="na:ArrayOfDatasetMemberInfo"/>
<xsd:element name="Records" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterStart">
<xsd:complexType>
<xsd:all>
<xsd:element name="DatasetNameOrId" type="na:ObjNameOrId"/>
<xsd:element minOccurs="0" name="IncludeExportsInfo"
type="xsd:boolean"/>
<xsd:element minOccurs="0" name="IncludeIndirect"
type="xsd:boolean"/>
<xsd:element minOccurs="0" name="MemberType" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetMemberListInfoIterStartResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Records" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetProvisionMember">
<xsd:complexType>
<xsd:all>
<xsd:element name="EditLockId" type="xsd:integer"/>
<xsd:element name="ProvisionMemberRequestInfo"
type="na:ProvisionMemberRequestInfo"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetProvisionMemberResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DatasetRemoveMember">
<xsd:complexType>
<xsd:all>
<xsd:element name="DatasetMemberParameters"
type="na:ArrayOfDatasetMemberParameter"/>
<xsd:element minOccurs="0" name="Destroy" type="xsd:boolean"/>
<xsd:element name="EditLockId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DatasetRemoveMemberResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterEnd">
<xsd:complexType>
<xsd:all>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterEndResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterNext">
<xsd:complexType>
<xsd:all>
<xsd:element name="Maximum" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterNextResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="ProgressEvents"
type="na:ArrayOfDpJobProgressEventInfo"/>
<xsd:element name="Records" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterStart">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="JobId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DpJobProgressEventListIterStartResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Records" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DfmAbout">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="IncludeDirectorySizeInfo"
type="xsd:boolean"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="DfmAboutResult">
<xsd:complexType>
<xsd:all/>
</xsd:complexType>
</xsd:element>
<xsd:element name="HostListInfoIterEnd">
<xsd:complexType>
<xsd:all>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="HostListInfoIterEndResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="HostListInfoIterNext">
<xsd:complexType>
<xsd:all>
<xsd:element name="Maximum" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="HostListInfoIterNextResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Hosts" type="na:ArrayOfHostInfo"/>
<xsd:element name="Records" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="HostListInfoIterStart">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="ObjectNameOrId"
type="na:ObjNameOrId"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="HostListInfoIterStartResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Records" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="LunListInfoIterEnd">
<xsd:complexType>
<xsd:all>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="LunListInfoIterEndResult">
<xsd:complexType/>
</xsd:element>
<xsd:element name="LunListInfoIterNext">
<xsd:complexType>
<xsd:all>
<xsd:element name="Maximum" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="LunListInfoIterNextResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Luns" type="na:ArrayOfLunInfo"/>
<xsd:element name="Records" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="LunListInfoIterStart">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="ObjectNameOrId"
type="na:ObjNameOrId"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="LunListInfoIterStartResult">
<xsd:complexType>
<xsd:all>
<xsd:element name="Records" type="xsd:integer"/>
<xsd:element name="Tag" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="StorageServiceDatasetProvision">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="AssumeConfirmation"
type="xsd:boolean"/>
<xsd:element name="DatasetName" type="na:ObjName"/>
<xsd:element name="StorageServiceNameOrId" type="na:ObjNameOrId"/>
<xsd:element minOccurs="0" name="StorageSetDetails"
type="na:ArrayOfStorageSetInfo"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:element name="StorageServiceDatasetProvisionResult">
<xsd:complexType>
<xsd:all>
<xsd:element minOccurs="0" name="ConformanceAlerts"
type="na:ArrayOfConformanceAlert"/>
<xsd:element name="DatasetId" type="na:ObjId"/>
<xsd:element minOccurs="0" name="DryRunResults"
type="na:ArrayOfDryRunResult"/>
</xsd:all>
</xsd:complexType>
</xsd:element>
<xsd:complexType name="ArrayOfDatasetInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="DatasetInfo"
type="na:DatasetInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfDatasetMemberInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="DatasetMemberInfo"
type="na:DatasetMemberInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfDatasetMemberParameter">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="DatasetMemberParameter"
type="na:DatasetMemberParameter"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfDfmMetadataField">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="DfmMetadataField"
type="na:DfmMetadataField"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfDpJobProgressEventInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="DpJobProgressEventInfo"
type="na:DpJobProgressEventInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfHostInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="HostInfo" type="na:HostInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfJobInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="JobInfo" type="na:JobInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfLunInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="LunInfo" type="na:LunInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfStorageSetInfo">
<xsd:sequence>
<xsd:element maxOccurs="unbounded" name="StorageSetInfo"
type="na:StorageSetInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="DatasetExportInfo">
<xsd:all>
<xsd:element minOccurs="0" name="DatasetExportProtocol"
type="na:DatasetExportProtocol"/>
<xsd:element minOccurs="0" name="DatasetLunMappingInfo"
type="na:DatasetLunMappingInfo"/>
</xsd:all>
</xsd:complexType>
<xsd:simpleType name="DatasetExportProtocol">
<xsd:restriction base="xsd:string"/>
</xsd:simpleType>
<xsd:complexType name="DatasetInfo">
<xsd:all>
<xsd:element name="DatasetId" type="na:ObjId"/>
<xsd:element name="DatasetName" type="na:ObjName"/>
<xsd:element name="DatasetMetadata" type="na:ArrayOfDfmMetadataField"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="DatasetLunMappingInfo">
<xsd:all>
<xsd:element name="IgroupOsType" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="DatasetMemberInfo">
<xsd:all>
<xsd:element name="MemberId" type="na:ObjId"/>
<xsd:element name="MemberName" type="na:ObjName"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="DatasetMemberParameter">
<xsd:all>
<xsd:element name="ObjectNameOrId" type="na:ObjNameOrId"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="DfmMetadataField">
<xsd:all>
<xsd:element name="FieldName" type="xsd:string"/>
<xsd:element name="FieldValue" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="DpJobProgressEventInfo">
<xsd:all>
<xsd:element name="EventStatus" type="na:ObjStatus"/>
<xsd:element name="EventType" type="xsd:string"/>
<xsd:element minOccurs="0" name="ProgressLunInfo"
type="na:ProgressLunInfo"/>
</xsd:all>
</xsd:complexType>
<xsd:simpleType name="DpPolicyNodeName">
<xsd:restriction base="xsd:string"/>
</xsd:simpleType>
<xsd:simpleType name="HostId">
<xsd:restriction base="xsd:integer"/>
</xsd:simpleType>
<xsd:complexType name="HostInfo">
<xsd:all>
<xsd:element name="HostAddress" type="xsd:string"/>
<xsd:element name="HostId" type="na:HostId"/>
<xsd:element name="HostName" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="JobInfo">
<xsd:all>
<xsd:element name="JobId" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="LunInfo">
<xsd:all>
<xsd:element name="HostId" type="na:ObjId"/>
<xsd:element name="LunPath" type="na:ObjName"/>
</xsd:all>
</xsd:complexType>
<xsd:simpleType name="ObjId">
<xsd:restriction base="xsd:integer"/>
</xsd:simpleType>
<xsd:simpleType name="ObjName">
<xsd:restriction base="xsd:string"/>
</xsd:simpleType>
<xsd:simpleType name="ObjNameOrId">
<xsd:restriction base="xsd:string"/>
</xsd:simpleType>
<xsd:simpleType name="ObjStatus">
<xsd:restriction base="xsd:string"/>
</xsd:simpleType>
<xsd:complexType name="ProgressLunInfo">
<xsd:all>
<xsd:element name="LunPathId" type="na:ObjId"/>
<xsd:element name="LunName" type="na:ObjName"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="ProvisionMemberRequestInfo">
<xsd:all>
<xsd:element minOccurs="0" name="Description" type="xsd:string"/>
<xsd:element minOccurs="0" name="MaximumSnapshotSpace"
type="xsd:integer"/>
<xsd:element name="Name" type="xsd:string"/>
<xsd:element name="Size" type="xsd:integer"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="Request">
<xsd:all>
<xsd:element minOccurs="0" name="Args">
<xsd:complexType>
<xsd:sequence>
<xsd:any maxOccurs="unbounded" minOccurs="0"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="Name" type="xsd:string">
</xsd:element>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="Response">
<xsd:all>
<xsd:element minOccurs="0" name="Errno" type="xsd:integer"/>
<xsd:element minOccurs="0" name="Reason" type="xsd:string"/>
<xsd:element minOccurs="0" name="Results">
<xsd:complexType>
<xsd:sequence>
<xsd:any maxOccurs="unbounded" minOccurs="0"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="Status" type="xsd:string"/>
</xsd:all>
</xsd:complexType>
<xsd:complexType name="StorageSetInfo">
<xsd:all>
<xsd:element minOccurs="0" name="DatasetExportInfo"
type="na:DatasetExportInfo"/>
<xsd:element minOccurs="0" name="DpNodeName"
type="na:DpPolicyNodeName"/>
<xsd:element minOccurs="0" name="ServerNameOrId"
type="na:ObjNameOrId"/>
</xsd:all>
</xsd:complexType>
</xsd:schema></types>"""
WSDL_TRAILER = """<service name="DfmService">
<port binding="na:DfmBinding" name="DfmPort">
<soap:address location="https://HOST_NAME:8488/apis/soap/v1"/>
</port></service></definitions>"""
RESPONSE_PREFIX = """<?xml version="1.0" encoding="UTF-8"?>
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:na="http://www.netapp.com/management/v1"><env:Header/><env:Body>"""
RESPONSE_SUFFIX = """</env:Body></env:Envelope>"""
APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext',
'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit',
'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout',
'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext',
'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart',
'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd',
'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd',
'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd',
'StorageServiceDatasetProvision']
iter_count = 0
iter_table = {}
class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run"""
def do_GET(s):
"""Respond to a GET request."""
if '/dfm.wsdl' != s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "application/wsdl+xml")
s.end_headers()
out = s.wfile
out.write(WSDL_HEADER)
out.write(WSDL_TYPES)
for api in APIS:
out.write('<message name="%sRequest">' % api)
out.write('<part element="na:%s" name="parameters"/>' % api)
out.write('</message>')
out.write('<message name="%sResponse">' % api)
out.write('<part element="na:%sResult" name="results"/>' % api)
out.write('</message>')
out.write('<portType name="DfmInterface">')
for api in APIS:
out.write('<operation name="%s">' % api)
out.write('<input message="na:%sRequest"/>' % api)
out.write('<output message="na:%sResponse"/>' % api)
out.write('</operation>')
out.write('</portType>')
out.write('<binding name="DfmBinding" type="na:DfmInterface">')
out.write('<soap:binding style="document" ' +
'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction="urn:%s"/>' % api)
out.write('<input><soap:body use="literal"/></input>')
out.write('<output><soap:body use="literal"/></output>')
out.write('</operation>')
out.write('</binding>')
out.write(WSDL_TRAILER)
def do_POST(s):
"""Respond to a POST request."""
if '/apis/soap/v1' != s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://www.netapp.com/management/v1'
nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/',
'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0]
request = body.getchildren()[0]
tag = request.tag
if not tag.startswith('{' + ntap_ns + '}'):
s.send_response(500)
s.end_headers
return
api = tag[(2 + len(ntap_ns)):]
global iter_count
global iter_table
if 'DatasetListInfoIterStart' == api:
iter_name = 'dataset_%s' % iter_count
iter_count = iter_count + 1
iter_table[iter_name] = 0
body = """<na:DatasetListInfoIterStartResult>
<na:Records>1</na:Records>
<na:Tag>%s</na:Tag>
</na:DatasetListInfoIterStartResult>""" % iter_name
elif 'DatasetListInfoIterNext' == api:
tags = body.xpath('na:DatasetListInfoIterNext/na:Tag',
namespaces=nsmap)
iter_name = tags[0].text
if iter_table[iter_name]:
body = """<na:DatasetListInfoIterNextResult>
<na:Datasets></na:Datasets>
<na:Records>0</na:Records>
</na:DatasetListInfoIterNextResult>"""
else:
iter_table[iter_name] = 1
body = """<na:DatasetListInfoIterNextResult>
<na:Datasets>
<na:DatasetInfo>
<na:DatasetId>0</na:DatasetId>
<na:DatasetMetadata>
<na:DfmMetadataField>
<na:FieldName>OpenStackProject</na:FieldName>
<na:FieldValue>testproj</na:FieldValue>
</na:DfmMetadataField>
<na:DfmMetadataField>
<na:FieldName>OpenStackVolType</na:FieldName>
<na:FieldValue></na:FieldValue>
</na:DfmMetadataField>
</na:DatasetMetadata>
<na:DatasetName>OpenStack_testproj</na:DatasetName>
</na:DatasetInfo>
</na:Datasets>
<na:Records>1</na:Records>
</na:DatasetListInfoIterNextResult>"""
elif 'DatasetListInfoIterEnd' == api:
body = """<na:DatasetListInfoIterEndResult/>"""
elif 'DatasetEditBegin' == api:
body = """<na:DatasetEditBeginResult>
<na:EditLockId>0</na:EditLockId>
</na:DatasetEditBeginResult>"""
elif 'DatasetEditCommit' == api:
body = """<na:DatasetEditCommitResult>
<na:IsProvisioningFailure>false</na:IsProvisioningFailure>
<na:JobIds>
<na:JobInfo>
<na:JobId>0</na:JobId>
</na:JobInfo>
</na:JobIds>
</na:DatasetEditCommitResult>"""
elif 'DatasetProvisionMember' == api:
body = """<na:DatasetProvisionMemberResult/>"""
elif 'DatasetRemoveMember' == api:
body = """<na:DatasetRemoveMemberResult/>"""
elif 'DfmAbout' == api:
body = """<na:DfmAboutResult/>"""
elif 'DpJobProgressEventListIterStart' == api:
iter_name = 'dpjobprogress_%s' % iter_count
iter_count = iter_count + 1
iter_table[iter_name] = 0
body = """<na:DpJobProgressEventListIterStartResult>
<na:Records>2</na:Records>
<na:Tag>%s</na:Tag>
</na:DpJobProgressEventListIterStartResult>""" % iter_name
elif 'DpJobProgressEventListIterNext' == api:
tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag',
namespaces=nsmap)
iter_name = tags[0].text
if iter_table[iter_name]:
body = """<na:DpJobProgressEventListIterNextResult/>"""
else:
iter_table[iter_name] = 1
name = ('filer:/OpenStack_testproj/volume-00000001/'
'volume-00000001')
body = """<na:DpJobProgressEventListIterNextResult>
<na:ProgressEvents>
<na:DpJobProgressEventInfo>
<na:EventStatus>normal</na:EventStatus>
<na:EventType>lun-create</na:EventType>
<na:ProgressLunInfo>
<na:LunPathId>0</na:LunPathId>
<na:LunName>%s</na:LunName>
</na:ProgressLunInfo>
</na:DpJobProgressEventInfo>
<na:DpJobProgressEventInfo>
<na:EventStatus>normal</na:EventStatus>
<na:EventType>job-end</na:EventType>
</na:DpJobProgressEventInfo>
</na:ProgressEvents>
<na:Records>2</na:Records>
</na:DpJobProgressEventListIterNextResult>""" % name
elif 'DpJobProgressEventListIterEnd' == api:
body = """<na:DpJobProgressEventListIterEndResult/>"""
elif 'DatasetMemberListInfoIterStart' == api:
iter_name = 'datasetmember_%s' % iter_count
iter_count = iter_count + 1
iter_table[iter_name] = 0
body = """<na:DatasetMemberListInfoIterStartResult>
<na:Records>1</na:Records>
<na:Tag>%s</na:Tag>
</na:DatasetMemberListInfoIterStartResult>""" % iter_name
elif 'DatasetMemberListInfoIterNext' == api:
tags = body.xpath('na:DatasetMemberListInfoIterNext/na:Tag',
namespaces=nsmap)
iter_name = tags[0].text
if iter_table[iter_name]:
body = """<na:DatasetMemberListInfoIterNextResult>
<na:DatasetMembers></na:DatasetMembers>
<na:Records>0</na:Records>
</na:DatasetMemberListInfoIterNextResult>"""
else:
iter_table[iter_name] = 1
name = ('filer:/OpenStack_testproj/volume-00000001/'
'volume-00000001')
body = """<na:DatasetMemberListInfoIterNextResult>
<na:DatasetMembers>
<na:DatasetMemberInfo>
<na:MemberId>0</na:MemberId>
<na:MemberName>%s</na:MemberName>
</na:DatasetMemberInfo>
</na:DatasetMembers>
<na:Records>1</na:Records>
</na:DatasetMemberListInfoIterNextResult>""" % name
elif 'DatasetMemberListInfoIterEnd' == api:
body = """<na:DatasetMemberListInfoIterEndResult/>"""
elif 'HostListInfoIterStart' == api:
body = """<na:HostListInfoIterStartResult>
<na:Records>1</na:Records>
<na:Tag>host</na:Tag>
</na:HostListInfoIterStartResult>"""
elif 'HostListInfoIterNext' == api:
body = """<na:HostListInfoIterNextResult>
<na:Hosts>
<na:HostInfo>
<na:HostAddress>1.2.3.4</na:HostAddress>
<na:HostId>0</na:HostId>
<na:HostName>filer</na:HostName>
</na:HostInfo>
</na:Hosts>
<na:Records>1</na:Records>
</na:HostListInfoIterNextResult>"""
elif 'HostListInfoIterEnd' == api:
body = """<na:HostListInfoIterEndResult/>"""
elif 'LunListInfoIterStart' == api:
body = """<na:LunListInfoIterStartResult>
<na:Records>1</na:Records>
<na:Tag>lun</na:Tag>
</na:LunListInfoIterStartResult>"""
elif 'LunListInfoIterNext' == api:
path = 'OpenStack_testproj/volume-00000001/volume-00000001'
body = """<na:LunListInfoIterNextResult>
<na:Luns>
<na:LunInfo>
<na:HostId>0</na:HostId>
<na:LunPath>%s</na:LunPath>
</na:LunInfo>
</na:Luns>
<na:Records>1</na:Records>
</na:LunListInfoIterNextResult>""" % path
elif 'LunListInfoIterEnd' == api:
body = """<na:LunListInfoIterEndResult/>"""
elif 'ApiProxy' == api:
names = body.xpath('na:ApiProxy/na:Request/na:Name',
namespaces=nsmap)
proxy = names[0].text
if 'igroup-list-info' == proxy:
igroup = 'openstack-iqn.1993-08.org.debian:01:23456789'
initiator = 'iqn.1993-08.org.debian:01:23456789'
proxy_body = """<initiator-groups>
<initiator-group-info>
<initiator-group-name>%s</initiator-group-name>
<initiator-group-type>iscsi</initiator-group-type>
<initiator-group-os-type>linux</initiator-group-os-type>
<initiators>
<initiator-info>
<initiator-name>%s</initiator-name>
</initiator-info>
</initiators>
</initiator-group-info>
</initiator-groups>""" % (igroup, initiator)
elif 'igroup-create' == proxy:
proxy_body = ''
elif 'igroup-add' == proxy:
proxy_body = ''
elif 'lun-map-list-info' == proxy:
proxy_body = '<initiator-groups/>'
elif 'lun-map' == proxy:
proxy_body = '<lun-id-assigned>0</lun-id-assigned>'
elif 'lun-unmap' == proxy:
proxy_body = ''
elif 'iscsi-portal-list-info' == proxy:
proxy_body = """<iscsi-portal-list-entries>
<iscsi-portal-list-entry-info>
<ip-address>1.2.3.4</ip-address>
<ip-port>3260</ip-port>
<tpgroup-tag>1000</tpgroup-tag>
</iscsi-portal-list-entry-info>
</iscsi-portal-list-entries>"""
elif 'iscsi-node-get-name' == proxy:
target = 'iqn.1992-08.com.netapp:sn.111111111'
proxy_body = '<node-name>%s</node-name>' % target
else:
# Unknown proxy API
s.send_response(500)
s.end_headers
return
api = api + ':' + proxy
proxy_header = '<na:ApiProxyResult><na:Response><na:Results>'
proxy_trailer = """</na:Results><na:Status>passed</na:Status>
</na:Response></na:ApiProxyResult>"""
body = proxy_header + proxy_body + proxy_trailer
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX)
class FakeHttplibSocket(object):
"""A fake socket implementation for httplib.HTTPResponse"""
def __init__(self, value):
self._rbuffer = StringIO.StringIO(value)
self._wbuffer = StringIO.StringIO('')
oldclose = self._wbuffer.close
def newclose():
self.result = self._wbuffer.getvalue()
oldclose()
self._wbuffer.close = newclose
def makefile(self, mode, _other):
"""Returns the socket's internal buffer"""
if mode == 'r' or mode == 'rb':
return self._rbuffer
if mode == 'w' or mode == 'wb':
return self._wbuffer
class FakeHTTPConnection(object):
"""A fake httplib.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake Dfm handler above, we then turn the response into
the httplib.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeDfmServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = httplib.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
STORAGE_SERVICE = 'Openstack Service'
STORAGE_SERVICE_PREFIX = 'Openstack Service-'
PROJECT_ID = 'testproj'
VOLUME_NAME = 'volume-00000001'
VOLUME_TYPE = ''
VOLUME_SIZE = 2147483648L # 2 GB
INITIATOR = 'iqn.1993-08.org.debian:01:23456789'
def setUp(self):
super(NetAppDriverTestCase, self).setUp()
driver = netapp.NetAppISCSIDriver()
self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection)
driver._create_client(wsdl_url='http://localhost:8088/dfm.wsdl',
login='root', password='password',
hostname='localhost', port=8088, cache=False)
driver._set_storage_service(self.STORAGE_SERVICE)
driver._set_storage_service_prefix(self.STORAGE_SERVICE_PREFIX)
driver._set_vfiler('')
self.driver = driver
def test_connect(self):
self.driver.check_for_setup_error()
def test_create_destroy(self):
self.driver._discover_luns()
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
self.VOLUME_TYPE, self.VOLUME_SIZE)
self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID)
def test_map_unmap(self):
self.driver._discover_luns()
self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID,
self.VOLUME_TYPE, self.VOLUME_SIZE)
volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID,
'id': 0, 'provider_auth': None}
updates = self.driver._get_export(volume)
self.assertTrue(updates['provider_location'])
volume['provider_location'] = updates['provider_location']
connector = {'initiator': self.INITIATOR}
connection_info = self.driver.initialize_connection(volume, connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
self.driver.terminate_connection(volume, connector)
self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID)
WSDL_HEADER_CMODE = """<?xml version="1.0" encoding="UTF-8"?>
<definitions xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:na="http://cloud.netapp.com/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns="http://schemas.xmlsoap.org/wsdl/"
targetNamespace="http://cloud.netapp.com/" name="CloudStorageService">
"""
WSDL_TYPES_CMODE = """<types>
<xs:schema xmlns:na="http://cloud.netapp.com/"
xmlns:xs="http://www.w3.org/2001/XMLSchema" version="1.0"
targetNamespace="http://cloud.netapp.com/">
<xs:element name="ProvisionLun">
<xs:complexType>
<xs:all>
<xs:element name="Name" type="xs:string"/>
<xs:element name="Size" type="xsd:long"/>
<xs:element name="Metadata" type="na:Metadata" minOccurs="0"
maxOccurs="unbounded"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="ProvisionLunResult">
<xs:complexType>
<xs:all>
<xs:element name="Lun" type="na:Lun"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="DestroyLun">
<xs:complexType>
<xs:all>
<xs:element name="Handle" type="xsd:string"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="DestroyLunResult">
<xs:complexType>
<xs:all/>
</xs:complexType>
</xs:element>
<xs:element name="CloneLun">
<xs:complexType>
<xs:all>
<xs:element name="Handle" type="xsd:string"/>
<xs:element name="NewName" type="xsd:string"/>
<xs:element name="Metadata" type="na:Metadata" minOccurs="0"
maxOccurs="unbounded"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="CloneLunResult">
<xs:complexType>
<xs:all>
<xs:element name="Lun" type="na:Lun"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="MapLun">
<xs:complexType>
<xs:all>
<xs:element name="Handle" type="xsd:string"/>
<xs:element name="InitiatorType" type="xsd:string"/>
<xs:element name="InitiatorName" type="xsd:string"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="MapLunResult">
<xs:complexType>
<xs:all/>
</xs:complexType>
</xs:element>
<xs:element name="UnmapLun">
<xs:complexType>
<xs:all>
<xs:element name="Handle" type="xsd:string"/>
<xs:element name="InitiatorType" type="xsd:string"/>
<xs:element name="InitiatorName" type="xsd:string"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="UnmapLunResult">
<xs:complexType>
<xs:all/>
</xs:complexType>
</xs:element>
<xs:element name="ListLuns">
<xs:complexType>
<xs:all>
<xs:element name="NameFilter" type="xsd:string" minOccurs="0"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="ListLunsResult">
<xs:complexType>
<xs:all>
<xs:element name="Lun" type="na:Lun" minOccurs="0"
maxOccurs="unbounded"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="GetLunTargetDetails">
<xs:complexType>
<xs:all>
<xs:element name="Handle" type="xsd:string"/>
<xs:element name="InitiatorType" type="xsd:string"/>
<xs:element name="InitiatorName" type="xsd:string"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:element name="GetLunTargetDetailsResult">
<xs:complexType>
<xs:all>
<xs:element name="TargetDetails" type="na:TargetDetails"
minOccurs="0" maxOccurs="unbounded"/>
</xs:all>
</xs:complexType>
</xs:element>
<xs:complexType name="Metadata">
<xs:sequence>
<xs:element name="Key" type="xs:string"/>
<xs:element name="Value" type="xs:string"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="Lun">
<xs:sequence>
<xs:element name="Name" type="xs:string"/>
<xs:element name="Size" type="xs:long"/>
<xs:element name="Handle" type="xs:string"/>
<xs:element name="Metadata" type="na:Metadata" minOccurs="0"
maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="TargetDetails">
<xs:sequence>
<xs:element name="Address" type="xs:string"/>
<xs:element name="Port" type="xs:int"/>
<xs:element name="Portal" type="xs:int"/>
<xs:element name="Iqn" type="xs:string"/>
<xs:element name="LunNumber" type="xs:int"/>
</xs:sequence>
</xs:complexType>
</xs:schema></types>"""
WSDL_TRAILER_CMODE = """<service name="CloudStorageService">
<port name="CloudStoragePort" binding="na:CloudStorageBinding">
<soap:address location="http://hostname:8080/ws/ntapcloud"/>
</port>
</service>
</definitions>"""
RESPONSE_PREFIX_CMODE = """<?xml version='1.0' encoding='UTF-8'?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Body>"""
RESPONSE_SUFFIX_CMODE = """</soapenv:Body></soapenv:Envelope>"""
CMODE_APIS = ['ProvisionLun', 'DestroyLun', 'CloneLun', 'MapLun', 'UnmapLun',
'ListLuns', 'GetLunTargetDetails']
class FakeCMODEServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP handler that fakes enough stuff to allow the driver to run"""
def do_GET(s):
"""Respond to a GET request."""
if '/ntap_cloud.wsdl' != s.path:
s.send_response(404)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "application/wsdl+xml")
s.end_headers()
out = s.wfile
out.write(WSDL_HEADER_CMODE)
out.write(WSDL_TYPES_CMODE)
for api in CMODE_APIS:
out.write('<message name="%sRequest">' % api)
out.write('<part element="na:%s" name="req"/>' % api)
out.write('</message>')
out.write('<message name="%sResponse">' % api)
out.write('<part element="na:%sResult" name="res"/>' % api)
out.write('</message>')
out.write('<portType name="CloudStorage">')
for api in CMODE_APIS:
out.write('<operation name="%s">' % api)
out.write('<input message="na:%sRequest"/>' % api)
out.write('<output message="na:%sResponse"/>' % api)
out.write('</operation>')
out.write('</portType>')
out.write('<binding name="CloudStorageBinding" '
'type="na:CloudStorage">')
out.write('<soap:binding style="document" ' +
'transport="http://schemas.xmlsoap.org/soap/http"/>')
for api in CMODE_APIS:
out.write('<operation name="%s">' % api)
out.write('<soap:operation soapAction=""/>')
out.write('<input><soap:body use="literal"/></input>')
out.write('<output><soap:body use="literal"/></output>')
out.write('</operation>')
out.write('</binding>')
out.write(WSDL_TRAILER_CMODE)
def do_POST(s):
"""Respond to a POST request."""
if '/ws/ntapcloud' != s.path:
s.send_response(404)
s.end_headers
return
request_xml = s.rfile.read(int(s.headers['Content-Length']))
ntap_ns = 'http://cloud.netapp.com/'
nsmap = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'na': ntap_ns}
root = etree.fromstring(request_xml)
body = root.xpath('/soapenv:Envelope/soapenv:Body',
namespaces=nsmap)[0]
request = body.getchildren()[0]
tag = request.tag
if not tag.startswith('{' + ntap_ns + '}'):
s.send_response(500)
s.end_headers
return
api = tag[(2 + len(ntap_ns)):]
if 'ProvisionLun' == api:
body = """<ns:ProvisionLunResult xmlns:ns=
"http://cloud.netapp.com/">
<Lun><Name>lun1</Name><Size>20</Size>
<Handle>1d9c006c-a406-42f6-a23f-5ed7a6dc33e3</Handle>
<Metadata><Key>OsType</Key>
<Value>linux</Value></Metadata></Lun>
</ns:ProvisionLunResult>"""
elif 'DestroyLun' == api:
body = """<ns:DestroyLunResult xmlns:ns="http://cloud.netapp.com/"
/>"""
elif 'CloneLun' == api:
body = """<ns:CloneLunResult xmlns:ns="http://cloud.netapp.com/">
<Lun><Name>lun2</Name><Size>2</Size>
<Handle>98ea1791d228453899d422b4611642c3</Handle>
<Metadata><Key>OsType</Key>
<Value>linux</Value></Metadata>
</Lun></ns:CloneLunResult>"""
elif 'MapLun' == api:
body = """<ns1:MapLunResult xmlns:ns="http://cloud.netapp.com/"
/>"""
elif 'Unmap' == api:
body = """<ns1:UnmapLunResult xmlns:ns="http://cloud.netapp.com/"
/>"""
elif 'ListLuns' == api:
body = """<ns:ListLunsResult xmlns:ns="http://cloud.netapp.com/">
<Lun>
<Name>lun1</Name>
<Size>20</Size>
<Handle>asdjdnsd</Handle>
</Lun>
</ns:ListLunsResult>"""
elif 'GetLunTargetDetails' == api:
body = """<ns:GetLunTargetDetailsResult
xmlns:ns="http://cloud.netapp.com/">
<TargetDetail>
<Address>1.2.3.4</Address>
<Port>3260</Port>
<Portal>1000</Portal>
<Iqn>iqn.199208.com.netapp:sn.123456789</Iqn>
<LunNumber>0</LunNumber>
</TargetDetail>
</ns:GetLunTargetDetailsResult>"""
else:
# Unknown API
s.send_response(500)
s.end_headers
return
s.send_response(200)
s.send_header("Content-Type", "text/xml; charset=utf-8")
s.end_headers()
s.wfile.write(RESPONSE_PREFIX_CMODE)
s.wfile.write(body)
s.wfile.write(RESPONSE_SUFFIX_CMODE)
class FakeCmodeHTTPConnection(object):
"""A fake httplib.HTTPConnection for netapp tests
Requests made via this connection actually get translated and routed into
the fake Dfm handler above, we then turn the response into
the httplib.HTTPResponse that the caller expects.
"""
def __init__(self, host, timeout=None):
self.host = host
def request(self, method, path, data=None, headers=None):
if not headers:
headers = {}
req_str = '%s %s HTTP/1.1\r\n' % (method, path)
for key, value in headers.iteritems():
req_str += "%s: %s\r\n" % (key, value)
if data:
req_str += '\r\n%s' % data
# NOTE(vish): normally the http transport normailizes from unicode
sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8"))
# NOTE(vish): stop the server from trying to look up address from
# the fake socket
FakeCMODEServerHandler.address_string = lambda x: '127.0.0.1'
self.app = FakeCMODEServerHandler(sock, '127.0.0.1:8080', None)
self.sock = FakeHttplibSocket(sock.result)
self.http_response = httplib.HTTPResponse(self.sock)
def set_debuglevel(self, level):
pass
def getresponse(self):
self.http_response.begin()
return self.http_response
def getresponsebody(self):
return self.sock.result
class NetAppCmodeISCSIDriverTestCase(test.TestCase):
"""Test case for NetAppISCSIDriver"""
volume = {
'name': 'lun1', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None
}
snapshot = {
'name': 'lun2', 'size': 1, 'volume_name': 'lun1',
'volume_size': 1, 'project_id': 'project'
}
volume_sec = {
'name': 'vol_snapshot', 'size': 1, 'volume_name': 'lun1',
'os_type': 'linux', 'provider_location': 'lun1',
'id': 'lun1', 'provider_auth': None, 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None
}
def setUp(self):
super(NetAppCmodeISCSIDriverTestCase, self).setUp()
driver = netapp.NetAppCmodeISCSIDriver()
self.stubs.Set(httplib, 'HTTPConnection', FakeCmodeHTTPConnection)
driver._create_client(wsdl_url='http://localhost:8080/ntap_cloud.wsdl',
login='root', password='password',
hostname='localhost', port=8080, cache=False)
self.driver = driver
def test_connect(self):
self.driver.check_for_setup_error()
def test_create_destroy(self):
self.driver.create_volume(self.volume)
self.driver.delete_volume(self.volume)
def test_create_vol_snapshot_destroy(self):
self.driver.create_volume(self.volume)
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume_sec, self.snapshot)
self.driver.delete_snapshot(self.snapshot)
self.driver.delete_volume(self.volume)
def test_map_unmap(self):
self.driver.create_volume(self.volume)
updates = self.driver.create_export(None, self.volume)
self.assertTrue(updates['provider_location'])
self.volume['provider_location'] = updates['provider_location']
connector = {'initiator': 'init1'}
connection_info = self.driver.initialize_connection(self.volume,
connector)
self.assertEqual(connection_info['driver_volume_type'], 'iscsi')
properties = connection_info['data']
self.driver.terminate_connection(self.volume, connector)
self.driver.delete_volume(self.volume)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import candidate_secondary_path
class candidate_secondary_paths(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The set of candidate secondary paths which may be used
for this primary path. When secondary paths are specified
in the list the path of the secondary LSP in use must be
restricted to those path options referenced. The
priority of the secondary paths is specified within the
list. Higher priority values are less preferred - that is
to say that a path with priority 0 is the most preferred
path. In the case that the list is empty, any secondary
path option may be utilised when the current primary path
is in use.
"""
__slots__ = ("_path_helper", "_extmethods", "__candidate_secondary_path")
_yang_name = "candidate-secondary-paths"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__candidate_secondary_path = YANGDynClass(
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"p2p-primary-path",
"p2p-primary-path",
"candidate-secondary-paths",
]
def _get_candidate_secondary_path(self):
"""
Getter method for candidate_secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path (list)
YANG Description: List of secondary paths which may be utilised when the
current primary path is in use
"""
return self.__candidate_secondary_path
def _set_candidate_secondary_path(self, v, load=False):
"""
Setter method for candidate_secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_candidate_secondary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_candidate_secondary_path() directly.
YANG Description: List of secondary paths which may be utilised when the
current primary path is in use
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """candidate_secondary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("secondary_path",candidate_secondary_path.candidate_secondary_path, yang_name="candidate-secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='secondary-path', extensions=None), is_container='list', yang_name="candidate-secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__candidate_secondary_path = t
if hasattr(self, "_set"):
self._set()
def _unset_candidate_secondary_path(self):
self.__candidate_secondary_path = YANGDynClass(
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
candidate_secondary_path = __builtin__.property(
_get_candidate_secondary_path, _set_candidate_secondary_path
)
_pyangbind_elements = OrderedDict(
[("candidate_secondary_path", candidate_secondary_path)]
)
from . import candidate_secondary_path
class candidate_secondary_paths(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: The set of candidate secondary paths which may be used
for this primary path. When secondary paths are specified
in the list the path of the secondary LSP in use must be
restricted to those path options referenced. The
priority of the secondary paths is specified within the
list. Higher priority values are less preferred - that is
to say that a path with priority 0 is the most preferred
path. In the case that the list is empty, any secondary
path option may be utilised when the current primary path
is in use.
"""
__slots__ = ("_path_helper", "_extmethods", "__candidate_secondary_path")
_yang_name = "candidate-secondary-paths"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__candidate_secondary_path = YANGDynClass(
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"p2p-tunnel-attributes",
"p2p-primary-path",
"p2p-primary-path",
"candidate-secondary-paths",
]
def _get_candidate_secondary_path(self):
"""
Getter method for candidate_secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path (list)
YANG Description: List of secondary paths which may be utilised when the
current primary path is in use
"""
return self.__candidate_secondary_path
def _set_candidate_secondary_path(self, v, load=False):
"""
Setter method for candidate_secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_candidate_secondary_path is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_candidate_secondary_path() directly.
YANG Description: List of secondary paths which may be utilised when the
current primary path is in use
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """candidate_secondary_path must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("secondary_path",candidate_secondary_path.candidate_secondary_path, yang_name="candidate-secondary-path", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='secondary-path', extensions=None), is_container='list', yang_name="candidate-secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=True)""",
}
)
self.__candidate_secondary_path = t
if hasattr(self, "_set"):
self._set()
def _unset_candidate_secondary_path(self):
self.__candidate_secondary_path = YANGDynClass(
base=YANGListType(
"secondary_path",
candidate_secondary_path.candidate_secondary_path,
yang_name="candidate-secondary-path",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="secondary-path",
extensions=None,
),
is_container="list",
yang_name="candidate-secondary-path",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=True,
)
candidate_secondary_path = __builtin__.property(
_get_candidate_secondary_path, _set_candidate_secondary_path
)
_pyangbind_elements = OrderedDict(
[("candidate_secondary_path", candidate_secondary_path)]
)
| |
"""runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import os
import sys
import importlib.machinery # importlib first so we can test #15386 via -m
import imp
from pkgutil import read_code, get_loader, get_importer
__all__ = [
"run_module", "run_path",
]
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
self.mod_name = mod_name
self.module = imp.new_module(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
class _ModifiedArgv0(object):
def __init__(self, value):
self.value = value
self._saved_value = self._sentinel = object()
def __enter__(self):
if self._saved_value is not self._sentinel:
raise RuntimeError("Already preserving saved value")
self._saved_value = sys.argv[0]
sys.argv[0] = self.value
def __exit__(self, *args):
self.value = self._sentinel
sys.argv[0] = self._saved_value
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__cached__ = None,
__doc__ = None,
__loader__ = mod_loader,
__package__ = pkg_name)
exec(code, run_globals)
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in new namespace with sys modified"""
with _TempModule(mod_name) as temp_module, _ModifiedArgv0(mod_fname):
mod_globals = temp_module.module.__dict__
_run_code(code, mod_globals, init_globals,
mod_name, mod_fname, mod_loader, pkg_name)
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# This helper is needed due to a missing component in the PEP 302
# loader protocol (specifically, "get_filename" is non-standard)
# Since we can't introduce new features in maintenance releases,
# support was added to zipimporter under the name '_get_filename'
def _get_filename(loader, mod_name):
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return os.path.abspath(meth(mod_name))
return None
# Helper to get the loader, code and filename for a module
def _get_module_details(mod_name):
loader = get_loader(mod_name)
if loader is None:
raise ImportError("No module named %s" % mod_name)
if loader.is_package(mod_name):
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise ImportError("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name)
except ImportError as e:
raise ImportError(("%s; %r is a package and cannot " +
"be directly executed") %(e, mod_name))
code = loader.get_code(mod_name)
if code is None:
raise ImportError("No code object available for %s" % mod_name)
filename = _get_filename(loader, mod_name)
return mod_name, loader, code, filename
# XXX ncoghlan: Should this be documented and made public?
# (Current thoughts: don't repeat the mistake that lead to its
# creation when run_module() no longer met the needs of
# mainmodule.c, but couldn't be changed because it was public)
def _run_module_as_main(mod_name, alter_argv=True):
"""Runs the designated module in the __main__ namespace
Note that the executed module will have full access to the
__main__ namespace. If this is not desirable, the run_module()
function should be used to run the module code in a fresh namespace.
At the very least, these variables in __main__ will be overwritten:
__name__
__file__
__cached__
__loader__
__package__
"""
try:
if alter_argv or mod_name != "__main__": # i.e. -m switch
mod_name, loader, code, fname = _get_module_details(mod_name)
else: # i.e. directory or zipfile execution
mod_name, loader, code, fname = _get_main_module_details()
except ImportError as exc:
# Try to provide a good error message
# for directories, zip files and the -m switch
if alter_argv:
# For -m switch, just display the exception
info = str(exc)
else:
# For directories/zipfiles, let the user
# know what the code was looking for
info = "can't find '__main__' module in %r" % sys.argv[0]
msg = "%s: %s" % (sys.executable, info)
sys.exit(msg)
pkg_name = mod_name.rpartition('.')[0]
main_globals = sys.modules["__main__"].__dict__
if alter_argv:
sys.argv[0] = fname
return _run_code(code, main_globals, None,
"__main__", fname, loader, pkg_name)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
mod_name, loader, code, fname = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
pkg_name = mod_name.rpartition('.')[0]
if alter_sys:
return _run_module_code(code, init_globals, run_name,
fname, loader, pkg_name)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name,
fname, loader, pkg_name)
def _get_main_module_details():
# Helper that gives a nicer error message when attempting to
# execute a zipfile or directory by invoking __main__.py
# Also moves the standard __main__ out of the way so that the
# preexisting __loader__ entry doesn't cause issues
main_name = "__main__"
saved_main = sys.modules[main_name]
del sys.modules[main_name]
try:
return _get_module_details(main_name)
except ImportError as exc:
if main_name in str(exc):
raise ImportError("can't find %r module in %r" %
(main_name, sys.path[0])) from exc
raise
finally:
sys.modules[main_name] = saved_main
def _get_code_from_file(run_name, fname):
# Check for a compiled file first
with open(fname, "rb") as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
with open(fname, "rb") as f:
code = compile(f.read(), fname, 'exec')
loader = importlib.machinery.SourceFileLoader(run_name, fname)
else:
loader = importlib.machinery.SourcelessFileLoader(run_name, fname)
return code, loader
def run_path(path_name, init_globals=None, run_name=None):
"""Execute code located at the specified filesystem location
Returns the resulting top level namespace dictionary
The file path may refer directly to a Python script (i.e.
one that could be directly executed with execfile) or else
it may refer to a zipfile or directory containing a top
level __main__.py script.
"""
if run_name is None:
run_name = "<run_path>"
pkg_name = run_name.rpartition(".")[0]
importer = get_importer(path_name)
if isinstance(importer, (type(None), imp.NullImporter)):
# Not a valid sys.path entry, so run the code directly
# execfile() doesn't help as we want to allow compiled files
code, mod_loader = _get_code_from_file(run_name, path_name)
return _run_module_code(code, init_globals, run_name, path_name,
mod_loader, pkg_name)
else:
# Importer is defined for path, so add it to
# the start of sys.path
sys.path.insert(0, path_name)
try:
# Here's where things are a little different from the run_module
# case. There, we only had to replace the module in sys while the
# code was running and doing so was somewhat optional. Here, we
# have no choice and we have to remove it even while we read the
# code. If we don't do this, a __loader__ attribute in the
# existing __main__ module may prevent location of the new module.
mod_name, loader, code, fname = _get_main_module_details()
with _TempModule(run_name) as temp_module, \
_ModifiedArgv0(path_name):
mod_globals = temp_module.module.__dict__
return _run_code(code, mod_globals, init_globals,
run_name, fname, loader, pkg_name).copy()
finally:
try:
sys.path.remove(path_name)
except ValueError:
pass
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print("No module specified for execution", file=sys.stderr)
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
| |
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
import yaml
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib2 import URLError
import rospkg.distro
import rosdep2.sources_list
GITHUB_BASE_URL = 'https://raw.github.com/ros/rosdistro/master/rosdep/base.yaml'
def get_test_dir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'sources.list.d'))
def test_get_sources_list_dir():
assert rosdep2.sources_list.get_sources_list_dir()
def test_get_sources_cache_dir():
assert rosdep2.sources_list.get_sources_cache_dir()
def test_parse_sources_data():
from rosdep2.sources_list import parse_sources_data
parse_sources_data
def test_url_constants():
from rosdep2.sources_list import DEFAULT_SOURCES_LIST_URL
for url_name, url in [('DEFAULT_SOURCES_LIST_URL', DEFAULT_SOURCES_LIST_URL)]:
try:
f = urlopen(url)
f.read()
f.close()
except:
assert False, "URL [%s][%s] failed to download"%(url_name, url)
def test_download_default_sources_list():
from rosdep2.sources_list import download_default_sources_list
data = download_default_sources_list()
assert 'http' in data, data # sanity check, all sources files have urls
try:
download_default_sources_list(url='http://bad.ros.org/foo.yaml')
assert False, "should not have succeeded/valdiated"
except URLError:
pass
def test_CachedDataSource():
from rosdep2.sources_list import CachedDataSource, DataSource, TYPE_GBPDISTRO, TYPE_YAML
type_ = TYPE_GBPDISTRO
url = 'http://fake.willowgarage.com/foo'
tags = ['tag1']
rosdep_data = {'key': {}}
origin = '/tmp/bar'
cds = CachedDataSource(type_, url, tags, rosdep_data, origin=origin)
assert cds == CachedDataSource(type_, url, tags, rosdep_data, origin=origin)
assert cds != CachedDataSource(type_, url, tags, rosdep_data, origin=None)
assert cds != CachedDataSource(type_, url, tags, {}, origin=origin)
assert cds != CachedDataSource(TYPE_YAML, url, tags, rosdep_data, origin=origin)
assert cds != CachedDataSource(type_, 'http://ros.org/foo.yaml', tags, rosdep_data, origin=origin)
assert cds != DataSource(type_, url, tags, origin=origin)
assert DataSource(type_, url, tags, origin=origin) != cds
assert cds.type == type_
assert cds.url == url
assert cds.origin == origin
assert cds.rosdep_data == rosdep_data
assert type_ in str(cds)
assert type_ in repr(cds)
assert url in str(cds)
assert url in repr(cds)
assert tags[0] in str(cds)
assert tags[0] in repr(cds)
assert 'key' in str(cds)
assert 'key' in repr(cds)
def test_DataSource():
from rosdep2.sources_list import DataSource
data_source = DataSource('yaml', 'http://fake/url', ['tag1', 'tag2'])
assert data_source == rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['tag1', 'tag2'])
assert 'yaml' == data_source.type
assert 'http://fake/url' == data_source.url
assert ['tag1', 'tag2'] == data_source.tags
assert 'yaml http://fake/url tag1 tag2' == str(data_source)
data_source_foo = DataSource('yaml', 'http://fake/url', ['tag1', 'tag2'], origin='foo')
assert data_source_foo != data_source
assert data_source_foo.origin == 'foo'
assert '[foo]:\nyaml http://fake/url tag1 tag2' == str(data_source_foo), str(data_source_foo)
assert repr(data_source)
try:
rosdep2.sources_list.DataSource('yaml', 'http://fake/url', 'tag1', origin='foo')
assert False, "should have raised"
except ValueError:
pass
try:
rosdep2.sources_list.DataSource('yaml', 'non url', ['tag1'], origin='foo')
assert False, "should have raised"
except ValueError:
pass
try:
rosdep2.sources_list.DataSource('bad', 'http://fake/url', ['tag1'], origin='foo')
assert False, "should have raised"
except ValueError:
pass
try:
rosdep2.sources_list.DataSource('yaml', 'http://host.no.path/', ['tag1'], origin='foo')
assert False, "should have raised"
except ValueError:
pass
def test_parse_sources_file():
from rosdep2.sources_list import parse_sources_file
from rosdep2 import InvalidData
for f in ['20-default.list', '30-nonexistent.list']:
path = os.path.join(get_test_dir(), f)
sources = parse_sources_file(path)
assert sources[0].type == 'yaml'
assert sources[0].origin == path, sources[0].origin
try:
sources = parse_sources_file('bad')
except InvalidData:
pass
def test_parse_sources_list():
from rosdep2.sources_list import parse_sources_list
from rosdep2 import InvalidData
# test with non-existent dir, should return with empty list as
# directory is not required to exist.
assert [] == parse_sources_list(sources_list_dir='/not/a/real/path')
# test with real dir
path = get_test_dir()
sources_list = parse_sources_list(sources_list_dir=get_test_dir())
# at time test was written, at least two sources files
assert len(sources_list) > 1
# make sure files got loaded in intended order
assert sources_list[0].origin.endswith('20-default.list')
assert sources_list[1].origin.endswith('20-default.list')
assert sources_list[2].origin.endswith('30-nonexistent.list')
# tripwire -- we don't know what the actual return value is, but
# should not error on a correctly configured test system.
parse_sources_list()
def test_write_cache_file():
from rosdep2.sources_list import write_cache_file, compute_filename_hash, PICKLE_CACHE_EXT
try:
import cPickle as pickle
except ImportError:
import pickle
tempdir = tempfile.mkdtemp()
filepath = write_cache_file(tempdir, 'foo', {'data': 1}) + PICKLE_CACHE_EXT
computed_path = os.path.join(tempdir, compute_filename_hash('foo')) + PICKLE_CACHE_EXT
assert os.path.samefile(filepath, computed_path)
with open(filepath, 'rb') as f:
assert {'data': 1} == pickle.loads(f.read())
def test_update_sources_list():
from rosdep2.sources_list import update_sources_list, InvalidData, compute_filename_hash, PICKLE_CACHE_EXT
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
sources_list_dir=get_test_dir()
index_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'rosdistro', 'index.yaml'))
index_url = 'file://' + pathname2url(index_path)
os.environ['ROSDISTRO_INDEX_URL'] = index_url
tempdir = tempfile.mkdtemp()
# use a subdirectory of test dir to make sure rosdep creates the necessary substructure
tempdir = os.path.join(tempdir, 'newdir')
errors = []
def error_handler(loc, e):
errors.append((loc, e))
retval = update_sources_list(sources_list_dir=sources_list_dir,
sources_cache_dir=tempdir, error_handler=error_handler)
assert retval
assert len(retval) == 2, retval
# one of our sources is intentionally bad, this should be a softfail
assert len(errors) == 1, errors
assert errors[0][0].url == 'https://badhostname.willowgarage.com/rosdep.yaml'
source0, path0 = retval[0]
assert source0.origin.endswith('20-default.list'), source0
hash1 = compute_filename_hash(GITHUB_URL)
hash2 = compute_filename_hash(BADHOSTNAME_URL)
filepath = os.path.join(tempdir, hash1)
assert filepath == path0, "%s vs %s"%(filepath, path0)
with open(filepath+PICKLE_CACHE_EXT, 'rb') as f:
data = pickle.loads(f.read())
assert 'cmake' in data
# verify that cache index exists. contract specifies that even
# failed downloads are specified in the index, just in case old
# download data is present.
with open(os.path.join(tempdir, 'index'), 'r') as f:
index = f.read().strip()
expected = "#autogenerated by rosdep, do not edit. use 'rosdep update' instead\n"\
"yaml %s \n"\
"yaml %s python\n"\
"yaml %s ubuntu"%(GITHUB_URL, GITHUB_PYTHON_URL, BADHOSTNAME_URL)
assert expected == index, "\n[%s]\nvs\n[%s]"%(expected, index)
def test_load_cached_sources_list():
from rosdep2.sources_list import load_cached_sources_list, update_sources_list
tempdir = tempfile.mkdtemp()
# test behavior on empty cache
assert [] == load_cached_sources_list(sources_cache_dir=tempdir)
# pull in cache data
sources_list_dir=get_test_dir()
retval = update_sources_list(sources_list_dir=sources_list_dir,
sources_cache_dir=tempdir, error_handler=None)
assert retval
# now test with cached data
retval = load_cached_sources_list(sources_cache_dir=tempdir)
assert len(retval) == 3, '%s != %s' % ([source0, source1, source2], retval[0:3])
source0 = retval[0]
source1 = retval[1]
source2 = retval[2]
# this should be the 'default' source
assert 'python' in source1.rosdep_data
assert not source0.tags
# this should be the 'non-existent' source
assert source2.rosdep_data == {}
assert source2.tags == ['ubuntu']
def test_DataSourceMatcher():
empty_data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', [])
assert empty_data_source == rosdep2.sources_list.DataSource('yaml', 'http://fake/url', [])
# matcher must match 'all' tags
data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['tag1', 'tag2'])
partial_data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['tag1'])
# same tags as test data source
matcher = rosdep2.sources_list.DataSourceMatcher(['tag1', 'tag2'])
assert matcher.matches(data_source)
assert matcher.matches(partial_data_source)
assert matcher.matches(empty_data_source)
# alter one tag
matcher = rosdep2.sources_list.DataSourceMatcher(['tag1', 'tag3'])
assert not matcher.matches(data_source)
assert matcher.matches(empty_data_source)
matcher = rosdep2.sources_list.DataSourceMatcher(['tag1'])
assert not matcher.matches(data_source)
def test_download_rosdep_data():
from rosdep2.sources_list import download_rosdep_data
from rosdep2 import DownloadFailure
url = GITHUB_BASE_URL
data = download_rosdep_data(url)
assert 'boost' in data #sanity check
# try with a bad URL
try:
data = download_rosdep_data('http://badhost.willowgarage.com/rosdep.yaml')
assert False, "should have raised"
except DownloadFailure as e:
pass
# try to trigger both non-dict clause and YAMLError clause
for url in [
'https://code.ros.org/svn/release/trunk/distros/',
'https://code.ros.org/svn/release/trunk/distros/manifest.xml',
]:
try:
data = download_rosdep_data(url)
assert False, "should have raised"
except DownloadFailure as e:
pass
BADHOSTNAME_URL = 'https://badhostname.willowgarage.com/rosdep.yaml'
GITHUB_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/base.yaml'
GITHUB_PYTHON_URL = 'https://github.com/ros/rosdistro/raw/master/rosdep/python.yaml'
GITHUB_FUERTE_URL = 'https://raw.github.com/ros-infrastructure/rosdep_rules/master/rosdep_fuerte.yaml'
EXAMPLE_SOURCES_DATA_BAD_TYPE = "YAML %s"%(GITHUB_URL)
EXAMPLE_SOURCES_DATA_BAD_URL = "yaml not-a-url tag1 tag2"
EXAMPLE_SOURCES_DATA_BAD_LEN = "yaml"
EXAMPLE_SOURCES_DATA_NO_TAGS = "yaml %s"%(GITHUB_URL)
EXAMPLE_SOURCES_DATA = "yaml %s fuerte ubuntu"%(GITHUB_URL)
EXAMPLE_SOURCES_DATA_MULTILINE = """
# this is a comment, above and below are empty lines
yaml %s
yaml %s fuerte ubuntu
"""%(GITHUB_URL, GITHUB_FUERTE_URL)
def test_parse_sources_data():
from rosdep2.sources_list import parse_sources_data, TYPE_YAML, InvalidData
retval = parse_sources_data(EXAMPLE_SOURCES_DATA, origin='foo')
assert len(retval) == 1
sd = retval[0]
assert sd.type == TYPE_YAML, sd.type
assert sd.url == GITHUB_URL
assert sd.tags == ['fuerte', 'ubuntu']
assert sd.origin == 'foo'
retval = parse_sources_data(EXAMPLE_SOURCES_DATA_NO_TAGS)
assert len(retval) == 1
sd = retval[0]
assert sd.type == TYPE_YAML
assert sd.url == GITHUB_URL
assert sd.tags == []
assert sd.origin == '<string>'
retval = parse_sources_data(EXAMPLE_SOURCES_DATA_MULTILINE)
assert len(retval) == 2
sd = retval[0]
assert sd.type == TYPE_YAML
assert sd.url == GITHUB_URL
assert sd.tags == []
sd = retval[1]
assert sd.type == TYPE_YAML
assert sd.url == GITHUB_FUERTE_URL
assert sd.tags == ['fuerte', 'ubuntu']
for bad in [EXAMPLE_SOURCES_DATA_BAD_URL,
EXAMPLE_SOURCES_DATA_BAD_TYPE,
EXAMPLE_SOURCES_DATA_BAD_LEN]:
try:
parse_sources_data(bad)
assert False, "should have raised: %s"%(bad)
except InvalidData as e:
pass
def test_DataSourceMatcher_create_default():
distro_name = rospkg.distro.current_distro_codename()
os_detect = rospkg.os_detect.OsDetect()
os_name, os_version, os_codename = os_detect.detect_os()
matcher = rosdep2.sources_list.DataSourceMatcher.create_default()
# matches full
os_data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', [distro_name, os_name, os_codename])
assert matcher.matches(os_data_source)
# matches against current os
os_data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', [os_name, os_codename])
assert matcher.matches(os_data_source)
# matches against current distro
distro_data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', [distro_name])
assert matcher.matches(distro_data_source)
# test matcher with os override
matcher = rosdep2.sources_list.DataSourceMatcher.create_default(os_override=('fubuntu', 'flucid'))
assert not matcher.matches(os_data_source)
data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['fubuntu'])
assert matcher.matches(data_source)
data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['flucid'])
assert matcher.matches(data_source)
data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['flucid', 'fubuntu'])
assert matcher.matches(data_source)
data_source = rosdep2.sources_list.DataSource('yaml', 'http://fake/url', ['kubuntu', 'lucid'])
assert not matcher.matches(data_source)
def test_SourcesListLoader_create_default():
from rosdep2.sources_list import update_sources_list, SourcesListLoader, DataSourceMatcher
# create temp dir for holding sources cache
tempdir = tempfile.mkdtemp()
# pull in cache data
sources_list_dir=get_test_dir()
retval = update_sources_list(sources_list_dir=sources_list_dir,
sources_cache_dir=tempdir, error_handler=None)
assert retval
# now test with cached data
matcher = rosdep2.sources_list.DataSourceMatcher(['ubuntu', 'lucid'])
loader = SourcesListLoader.create_default(matcher, sources_cache_dir=tempdir)
assert loader.sources
sources0 = loader.sources
assert not any([s for s in loader.sources if not matcher.matches(s)])
loader = SourcesListLoader.create_default(matcher, sources_cache_dir=tempdir)
assert sources0 == loader.sources
# now test with different matcher
matcher2 = rosdep2.sources_list.DataSourceMatcher(['python'])
loader2 = SourcesListLoader.create_default(matcher2, sources_cache_dir=tempdir)
assert loader2.sources
# - should have filtered down to python-only
assert sources0 != loader2.sources
assert not any([s for s in loader2.sources if not matcher2.matches(s)])
# test API
# very simple, always raises RNF
try:
loader.get_rosdeps('foo')
except rospkg.ResourceNotFound: pass
try:
loader.get_view_key('foo')
except rospkg.ResourceNotFound: pass
assert [] == loader.get_loadable_resources()
all_sources = [x.url for x in loader.sources]
assert all_sources == loader.get_loadable_views()
# test get_source early to make sure model matches expected
try:
loader.get_source('foo')
assert False, "should have raised"
except rospkg.ResourceNotFound: pass
s = loader.get_source(GITHUB_URL)
assert s.url == GITHUB_URL
# get_view_dependencies
# - loader doesn't new view name, so assume everything
assert all_sources == loader.get_view_dependencies('foo')
# - actual views don't depend on anything
assert [] == loader.get_view_dependencies(GITHUB_URL)
# load_view
from rosdep2.model import RosdepDatabase
for verbose in [True, False]:
rosdep_db = RosdepDatabase()
loader.load_view(GITHUB_URL, rosdep_db, verbose=verbose)
assert rosdep_db.is_loaded(GITHUB_URL)
assert [] == rosdep_db.get_view_dependencies(GITHUB_URL)
entry = rosdep_db.get_view_data(GITHUB_URL)
assert 'cmake' in entry.rosdep_data
assert GITHUB_URL == entry.origin
# - coverage, repeat loader, should noop
loader.load_view(GITHUB_URL, rosdep_db)
| |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setup module for the GRPC Python package."""
from distutils import cygwinccompiler
from distutils import extension as _extension
from distutils import util
import os
import os.path
import pkg_resources
import platform
import re
import shlex
import shutil
import sys
import sysconfig
import setuptools
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = ('include', '.',)
BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
CARES_INCLUDE = (
os.path.join('third_party', 'cares'),
os.path.join('third_party', 'cares', 'cares'),)
if 'linux' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_linux'),)
if 'darwin' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_darwin'),)
README = os.path.join(PYTHON_STEM, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import _spawn_patch
import commands
import grpc_core_dependencies
import grpc_version
_spawn_patch.monkeypatch_spawn()
LICENSE = 'Apache License 2.0'
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: Apache Software License',
]
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support. Even if this
# is set to false, if the script detects that the generated `.c` file isn't
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
# Environment variable specifying whether or not there's interest in setting up
# documentation building.
ENABLE_DOCUMENTATION_BUILD = os.environ.get(
'GRPC_PYTHON_ENABLE_DOCUMENTATION_BUILD', False)
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = ''
if 'win32' in sys.platform and sys.version_info < (3, 5):
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11'
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv -fno-exceptions'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
# TODO(atash) sift through the GCC specs to see if libstdc++ can have any
# influence on the linkage outcome on MinGW for non-C++ programs.
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
'-static'.format(msvcr=msvcr))
if "linux" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy -static-libgcc'
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE +
CARES_INCLUDE)
EXTENSION_LIBRARIES = ()
if "linux" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += ('advapi32', 'ws2_32',)
DEFINE_MACROS = (
('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600),
('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
if "win32" in sys.platform:
# TODO(zyc): Re-enble c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),
('GRPC_ARES', 0),)
if '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
DEFINE_MACROS += (('NTDDI_VERSION', 0x06000000),)
else:
DEFINE_MACROS += (('HAVE_CONFIG_H', 1),)
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
pymodinit_type = 'PyObject*' if PY3 else 'void'
pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type)
DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python3 was built with.
# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
if 'darwin' in sys.platform and PY3:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.7.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.7'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)',
r'macosx-10.7-\1',
util.get_platform())
def cython_extensions_and_necessity():
cython_module_files = [os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.pyx')
for name in CYTHON_EXTENSION_MODULE_NAMES]
config = os.environ.get('CONFIG', 'opt')
prefix = 'libs/' + config + '/'
if "darwin" in sys.platform:
extra_objects = [prefix + 'libares.a',
prefix + 'libboringssl.a',
prefix + 'libgpr.a',
prefix + 'libgrpc.a']
core_c_files = []
else:
core_c_files = list(CORE_C_FILES)
extra_objects = []
extensions = [
_extension.Extension(
name=module_name,
sources=[module_file] + list(CYTHON_HELPER_C_FILES) + core_c_files,
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES),
define_macros=list(DEFINE_MACROS),
extra_objects=extra_objects,
extra_compile_args=list(CFLAGS),
extra_link_args=list(LDFLAGS),
) for (module_name, module_file) in zip(list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files)
]
need_cython = BUILD_WITH_CYTHON
if not BUILD_WITH_CYTHON:
need_cython = need_cython or not commands.check_and_update_cythonization(extensions)
return commands.try_cythonize(extensions, linetracing=ENABLE_CYTHON_TRACING, mandatory=BUILD_WITH_CYTHON), need_cython
CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity()
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
'six>=1.5.2',
# TODO(atash): eventually split the grpcio package into a metapackage
# depending on protobuf and the runtime component (independent of protobuf)
'protobuf>=3.3.0',
)
if not PY3:
INSTALL_REQUIRES += ('futures>=2.2.0', 'enum34>=1.0.4')
SETUP_REQUIRES = INSTALL_REQUIRES + (
'sphinx>=1.3',
'sphinx_rtd_theme>=0.1.8',
'six>=1.10',
) if ENABLE_DOCUMENTATION_BUILD else ()
try:
import Cython
except ImportError:
if BUILD_WITH_CYTHON:
sys.stderr.write(
"You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, "
"but do not have Cython installed. We won't stop you from using "
"other commands, but the extension files will fail to build.\n")
elif need_cython:
sys.stderr.write(
'We could not find Cython. Setup may take 10-20 minutes.\n')
SETUP_REQUIRES += ('cython>=0.23',)
COMMAND_CLASS = {
'doc': commands.SphinxDocumentation,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'build_ext': commands.BuildExt,
'gather': commands.Gather,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc', '_cython', '_credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile(os.path.join('etc', 'roots.pem'),
os.path.join(credentials_dir, 'roots.pem'))
PACKAGE_DATA = {
# Binaries that may or may not be present in the final installation, but are
# mentioned here for completeness.
'grpc._cython': [
'_credentials/roots.pem',
'_windows/grpc_c.32.python',
'_windows/grpc_c.64.python',
],
}
PACKAGES = setuptools.find_packages(PYTHON_STEM)
setuptools.setup(
name='grpcio',
version=grpc_version.VERSION,
description='HTTP/2-based RPC framework',
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='https://grpc.io',
license=LICENSE,
classifiers=CLASSIFIERS,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from googleapiclient.discovery import build
import fnmatch
import json
import os
import glob
import shutil
import esprima
import argparse
import re
def fileSearchReplace(filepath, findStr, replaceStr):
"""Open a file and find/replace all matches"""
with open(filepath, "r+", encoding="utf-8") as f:
rawText = f.read()
f.seek(0)
f.write(rawText.replace(findStr, replaceStr))
f.truncate()
def parseAsDict(filepath):
""" Simple, naive parsing of a Javascript Object source file into a Dictionary
Used for the locale-specific string collections
"""
r = re.compile(r'\n\s*([^:]*):\s*([^\n]*)', re.UNICODE)
with open(filepath, "r", encoding="utf-8") as f:
rawText = f.read()
matches = r.findall(rawText)
# convert to a dictionary
result = {}
for (kAttr, kValue) in matches:
#get ride of spaces
kValue = kValue.strip()
# get rid of comma at end if exists
if kValue[-1]==',':
kValue=kValue[:-1].strip()
# get rid of quote marks because the regex was a bit crap
kValue = kValue[1:-1]
#put in dictionary
result[kAttr] = kValue
return result
def saveDictAsJsObject(filepath, dictObj):
""" Save a Dictionary object into a Javascript Object format
Used for saving locale-specific string collections
"""
with open(filepath, "w", encoding="utf-8") as f:
f.write("module.exports = {\n")
for (k,v) in dictObj.items():
# process the quotation marks etc
f.write(k+':\''+v+'\',\n')
f.write("}")
class Translator(object):
"""Wrapper for the Google Translation API. Must provide your own API Key"""
def __init__(self, apiKey):
self.GOOGLE_CHUNKSIZE = 64
self.apiKey = apiKey
self.service = build('translate', 'v2', developerKey=apiKey)
def encodeForTranslation(self, strValue):
strValue = strValue.replace('%s', '<s-placeholder/>')
strValue = strValue.replace('%d', '<d-placeholder/>')
strValue = strValue.replace('%f', '<f-placeholder/>')
strValue = strValue.replace('%i', '<i-placeholder/>')
strValue = strValue.replace('%o', '<o-placeholder/>')
strValue = strValue.replace('\n', '<nl-placeholder/>')
return strValue
def decodeFromTranslation(self, strValue):
strValue = strValue.replace('<s-placeholder/>', '%s')
strValue = strValue.replace('<d-placeholder/>', '%d')
strValue = strValue.replace('<f-placeholder/>', '%f')
strValue = strValue.replace('<i-placeholder/>', '%i')
strValue = strValue.replace('<o-placeholder/>', '%o')
strValue = strValue.replace('<nl-placeholder/>', '\n')
strValue = strValue.replace(''', '\\\'')
return strValue
def translate(self, chinesePhrase, newLanguage='en'):
return self.translateList([chinesePhrase], newLanguage)
def translateSmallList(self, chinesePhraseList, newLanguage='en'):
# encode the strings
chinesePhraseList = [self.encodeForTranslation(x) for x in chinesePhraseList]
cloudresult = self.service.translations().list(source='zh', target=newLanguage, q=chinesePhraseList).execute()
# parse the result into a simple list
result = []
for t in cloudresult['translations']:
result.append(self.decodeFromTranslation(t['translatedText']))
return result
def translateList(self, chinesePhraseList, newLanguage='en'):
# break it into smaller chunks and then join
translatedValues = []
for x in range(0, len(chinesePhraseList)//self.GOOGLE_CHUNKSIZE):
translatedValues += self.translateSmallList(list(chinesePhraseList[x*self.GOOGLE_CHUNKSIZE:(x+1)*self.GOOGLE_CHUNKSIZE]), newLanguage)
# the remainder
translatedValues += self.translateSmallList(list(chinesePhraseList[len(translatedValues):]), newLanguage)
return translatedValues
class TranslationCache(object):
""" Used for caching results of translations, so we don't repeatedly ask Google same questions """
def __init__(self, filepath):
self.filepath = filepath
self.items = {}
if os.path.exists(filepath):
with open(filepath, 'r') as f:
self.items = json.load(f)
def get(self, key):
try:
return self.items[key]
except:
return None
def put(self, key, value):
self.items[key] = value
def save(self):
with open(self.filepath, 'w') as f:
json.dump(self.items, f)
class SourceRegion(object):
""" Represent a region in the source code """
def __init__(self, token, rawCode):
self.value = token.value
self.range = token.range
self.rawCode = rawCode
def trimLeft(self, amount):
self.value = self.value[amount:]
self.range[0] += amount
def trimRight(self, amount):
self.value = self.value[:-amount]
self.range[1] -= amount
def trim(self, amount):
""" Trim amount from both sides """
self.trimLeft(amount)
self.trimRight(amount)
def stripSuffix(self, suffix):
if self.value[-len(suffix):]==suffix:
self.trimRight(len(suffix))
def stripPrefix(self, prefix):
if self.value[:len(prefix)]==prefix:
self.trimLeft(len(prefix))
def strip(self, prefix, suffix):
self.stripPrefix(prefix)
self.stripSuffix(suffix)
def rawValue(self):
return self.rawCode[self.range[0]:self.range[1]].encode("utf-8").decode("unicode-escape")
def __repr__(self):
return (self.rawValue(), self.range)
def __str__(self):
return str(self.__repr__())
class ParsedSourceFile(object):
""" Wrapper for esprima ES parsing library """
def __init__(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
self.rawCode = f.read()
self.ast = esprima.parseScript(self.rawCode, range=True, tokens=True)
self.stringLiterals = self.getStringLiterals()
self.transform = 0
def getStringLiterals(self):
""" Return a list of all string literal values
We include the newer ` Template ` strings too
"""
result = []
for t in self.ast.tokens:
if t.type=="String":
r = SourceRegion(t, self.rawCode)
r.strip("'", "'")
r.strip('"', '"')
r.strip(' ', ' ')
result.append(r)
elif t.type=="Template":
r = SourceRegion(t, self.rawCode)
r.strip('`', '`')
r.stripSuffix("${") # ignore placeholder characters
r.stripSuffix("}")
r.stripPrefix("}")
r.strip(' ', ' ')
result.append(r)
return result
def replaceValue(self, sourceRegion, value):
beforeCode = self.rawCode[:sourceRegion.range[0]+self.transform]
afterCode = self.rawCode[sourceRegion.range[1]+self.transform:]
self.rawCode = beforeCode+value+afterCode
# did we shrink or grow the whole code string ?
self.transform += len(value)-(sourceRegion.range[1]-sourceRegion.range[0])
def save(self, filepath):
with open(filepath, "w", encoding="utf-8") as f:
f.write(self.rawCode)
def containsChineseCharacters(strToSearch):
""" Naive detection of Chinese letters in a string """
r = re.compile(r'[\u4e00-\u9fff]+|\\[uU][4-9][0-9a-fA-F]{3}', re.UNICODE)
#r = re.compile(r'[\u4e00-\u9fff]', re.UNICODE)
#r = re.compile(r'\\[uU][4-9][0-9a-fA-F]{3}', re.UNICODE)
return r.search(strToSearch)!=None
def chineseRatio(strToEvaluate):
""" Return the approximate ratio of Chinese characters in the string """
if strToEvaluate=='':
return 0
r = re.compile(r'[\u4e00-\u9fff]+|\\[uU][4-9][0-9a-fA-F]{3}', re.UNICODE)
#r = re.compile(r'\\[uU][4-9][0-9a-fA-F]{3}', re.UNICODE)
res = r.sub('', strToEvaluate)
return (len(strToEvaluate)-len(res))/len(strToEvaluate)
def listJsFilesWithChinese(rootpath):
""" Build up a list of interesting files that we will process later. Does not recurse. """
matches = []
print("Enumerating code files with Chinese strings ...")
for filepath in glob.glob(os.path.join(rootpath, '*.js')):
with open(filepath, "r", encoding="utf-8") as f:
if containsChineseCharacters(f.read()):
matches.append(filepath)
print("Found "+str(len(matches)))
return matches
def translateDictionary(translationCache, translator, zhDict, newLanguage='en', threshold=0.3):
translatedDict = {}
toTranslate = []
for (key, value) in zhDict.items():
if isinstance(value, str) and chineseRatio(value)>threshold:
if key.strip()=='' or value.strip()=='':
continue
# make sure it is mostly chinese
if translationCache.get(zhDict[key])!=None:
translatedDict[key] = translationCache.get(zhDict[key])
else:
# We will ask google to translate
toTranslate.append((key, zhDict[key]))
elif isinstance(value, dict):
translatedDict[key] = translateDictionary(translationCache, translator, value, newLanguage, threshold)
else:
translatedDict[key] = value
# Do we need to ask Google to translate some things ?
if toTranslate!=[]:
translated = translator.translateList([x[1] for x in toTranslate], newLanguage)
if len(translated)!=len(toTranslate):
raise Exception("translateList should return same size list as input!")
# add to the dictionary
for x in range(0, len(toTranslate)):
translatedDict[toTranslate[x][0]] = translated[x]
# add to the cache
translationCache.put(toTranslate[x][1], translated[x])
translationCache.save()
return translatedDict
def changeHardcodedLocale(rootpath, newLanguage='en'):
"""Fix the hardcoded locale.
The developers have started to think about locales. But they hardcoded 'zh' as the current one
and didnt actually translate any of the strings yet. lets help them :P
"""
fileSearchReplace(os.path.join(rootpath, 'js/common/locales/index.js'), "const defaultLocales = 'zh'", "const defaultLocales = '"+newLanguage+"'")
def changeMonacoLanguage(rootpath, newLanguage='en'):
"""Change the interface language for the Monaco Editor. Blank is English by default"""
if newLanguage.lower()=='en':
newLanguage = '' # blank means English
fileSearchReplace(os.path.join(rootpath, 'html/editor.html'), 'zh-cn', newLanguage)
fileSearchReplace(os.path.join(rootpath, 'html/editor-dev.html'), 'zh-cn', newLanguage)
def translatePackageJson(translationCache, translator, rootpath, newLanguage='en'):
# Main package.json
filepath = os.path.join(rootpath, 'package.json')
with open(filepath, 'r', encoding="utf-8") as f:
packageDict = json.load(f)
translatedDict = translateDictionary(translationCache, translator, packageDict, newLanguage)
with open(filepath, 'w', encoding="utf-8") as f:
json.dump(translatedDict, f)
def generateLocaleStrings(translationCache, translator, rootpath, newLanguage='en'):
""" Generate the missing locale-specific string collection. """
# ensure directories exist
if not os.path.exists(os.path.join(rootpath, 'js/common/locales/'+newLanguage)):
os.makedirs(os.path.join(rootpath, 'js/common/locales/'+newLanguage))
# do the translation work for both dict objects
zhDict = parseAsDict(os.path.join(rootpath, 'js/common/locales/zh/index.js'))
translatedDict = translateDictionary(translationCache, translator, zhDict, newLanguage, 0)
# save the new dictionaries as JS objects
saveDictAsJsObject(os.path.join(rootpath, 'js/common/locales/'+newLanguage+'/index.js'), translatedDict)
def translateFile(translationCache, translator, filepath, newLanguage='en', threshold=0.3):
parsedFile = ParsedSourceFile(filepath)
# build subset of qualifying mostly Chinese strings
literalsToTranslate = []
for literal in parsedFile.stringLiterals:
if chineseRatio(literal.rawValue()) > threshold:
# do we already have a translation ?
t = translationCache.get(literal.rawValue())
if t!=None:
parsedFile.replaceValue(literal, t)
else:
literalsToTranslate.append(literal)
# translate them all
if literalsToTranslate!=[]:
translations = translator.translateList([x.rawValue() for x in literalsToTranslate], newLanguage)
# should be same length!
if len(translations)!=len(literalsToTranslate):
raise("Didn't get expected number of translations")
# make the replacements
for x in range(0, len(literalsToTranslate)):
# save the translation to the cache
translationCache.put(literalsToTranslate[x].rawValue(), translations[x])
parsedFile.replaceValue(literalsToTranslate[x], translations[x])
translationCache.save()
# save changes to file
parsedFile.save(filepath)
def main(args):
nwdir = args.nwdir
newLanguage = 'en'
translator = Translator(args.key)
translationCache = TranslationCache('translations.json')
# do it
translatePackageJson(translationCache, translator, nwdir, newLanguage)
changeHardcodedLocale(nwdir, newLanguage)
changeMonacoLanguage(nwdir, newLanguage)
generateLocaleStrings(translationCache, translator, nwdir, newLanguage)
# fix the hardcoded chinese strings in the js/ directory
for filepath in listJsFilesWithChinese(os.path.join(nwdir,'js/')):
translateFile(translationCache, translator, filepath, newLanguage)
# # # THe editor extension too
translateFile(translationCache, translator, os.path.join(nwdir,'js/extensions/editor/index.js'), newLanguage)
for filepath in listJsFilesWithChinese(os.path.join(nwdir,'js/extensions/editor/assets/api')):
translateFile(translationCache, translator, filepath, newLanguage)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nwdir", help="Path to the 'package.nw' directory.")
parser.add_argument("--key", help="Google Cloud API Key for use with the translation services")
args = parser.parse_args()
main(args)
| |
# -*- coding: utf-8 -*-
"""
Single VsOne Chip Match Interface
For VsMany Interaction
Interaction for looking at matches between a single query and database annotation
Main development file
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_coverage --show
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import numpy as np
import plottool_ibeis as pt
import six
from plottool_ibeis import interact_helpers as ih
from ibeis import viz
from ibeis.algo.hots import scoring
from ibeis.algo.hots import hstypes
from ibeis.viz import viz_helpers as vh
from ibeis.viz import viz_hough
from ibeis.viz import viz_chip
from plottool_ibeis import interact_matches
from ibeis.viz.interact.interact_chip import ishow_chip
(print, rrr, profile) = ut.inject2(__name__, '[interact_matches]')
def testdata_match_interact(**kwargs):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-testdata_match_interact --show --db PZ_MTEST --qaid 3
Example:
>>> # VIZ_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> import plottool_ibeis as pt
>>> kwargs = {}
>>> mx = ut.get_argval('--mx', type_=int, default=None)
>>> self = testdata_match_interact(mx=mx, **kwargs)
>>> pt.show_if_requested()
"""
import ibeis
qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1', t=['default:Knorm=3'])
ibs = qreq_.ibs
cm = qreq_.execute()[0]
cm.sortself()
aid2 = None
self = MatchInteraction(ibs, cm, aid2, mode=1, dodraw=False, qreq_=qreq_, **kwargs)
self.start()
return self
# TODO inherit from AbstractInteraction
@six.add_metaclass(ut.ReloadingMetaclass)
class MatchInteraction(interact_matches.MatchInteraction2):
"""
Plots a chip result and sets up callbacks for interaction.
SeeAlso:
plottool_ibeis.interact_matches.MatchInteraction2
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-testdata_match_interact --show --db PZ_MTEST --qaid 3
"""
def __init__(self, ibs, cm, aid2=None, fnum=None,
qreq_=None, figtitle='Match Interaction',
**kwargs):
#print('[ibs] MatchInteraction.__init__')
self.ibs = ibs
self.cm = cm
self.qreq_ = qreq_
# Unpack Args
if aid2 is None:
index = 0
# FIXME: no sortself
cm.sortself()
self.rank = index
else:
index = cm.daid2_idx.get(aid2, None)
# TODO: rank?
self.rank = None
if index is not None:
self.qaid = self.cm.qaid
self.daid = self.cm.daid_list[index]
fm = self.cm.fm_list[index]
fk = self.cm.fk_list[index]
fsv = self.cm.fsv_list[index]
if self.cm.fs_list is None:
fs_list = self.cm.get_fsv_prod_list()
else:
fs_list = self.cm.fs_list
fs = None if fs_list is None else fs_list[index]
H1 = None if self.cm.H_list is None else cm.H_list[index]
self.score = None if self.cm.score_list is None else self.cm.score_list[index]
else:
self.qaid = self.cm.qaid
self.daid = aid2
fm = np.empty((0, 2), dtype=hstypes.FM_DTYPE)
fk = np.empty(0, dtype=hstypes.FK_DTYPE)
fsv = np.empty((0, 2), dtype=hstypes.FS_DTYPE)
fs = np.empty(0, dtype=hstypes.FS_DTYPE)
H1 = None
self.score = None
# Read properties
self.query_config2_ = (None if self.qreq_ is None else
self.qreq_.extern_query_config2)
self.data_config2_ = (None if self.qreq_ is None else
self.qreq_.extern_data_config2)
rchip1 = vh.get_chips(ibs, [self.qaid], config2_=self.query_config2_)[0]
rchip2 = vh.get_chips(ibs, [self.daid], config2_=self.data_config2_)[0]
kpts1 = ibs.get_annot_kpts([self.qaid], config2_=self.query_config2_)[0]
kpts2 = ibs.get_annot_kpts([self.daid], config2_=self.data_config2_)[0]
vecs1 = ibs.get_annot_vecs([self.qaid], config2_=self.query_config2_)[0]
vecs2 = ibs.get_annot_vecs([self.daid], config2_=self.data_config2_)[0]
self.figtitle = figtitle
self.kwargs = kwargs
self.fnum2 = pt.next_fnum()
super(MatchInteraction, self).__init__(rchip1, rchip2, kpts1, kpts2,
fm, fs, fsv, vecs1, vecs2, H1,
H2=None, fk=fk, fnum=fnum,
**kwargs)
#def plot(self, fnum, pnum):
def chipmatch_view(self, fnum=None, pnum=(1, 1, 1), verbose=None, **kwargs_):
"""
just visualizes the matches using some type of lines
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-chipmatch_view --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact()
>>> self.chipmatch_view()
>>> pt.show_if_requested()
"""
if fnum is None:
fnum = self.fnum
if verbose is None:
verbose = ut.VERBOSE
ibs = self.ibs
aid = self.daid
qaid = self.qaid
figtitle = self.figtitle
# drawing mode draw: with/without lines/feats
mode = kwargs_.get('mode', self.mode)
draw_ell = mode >= 1
draw_lines = mode == 2
#self.mode = (self.mode + 1) % 3
pt.figure(fnum=fnum, docla=True, doclf=True)
show_matches_kw = self.kwargs.copy()
show_matches_kw.update(
dict(fnum=fnum, pnum=pnum, draw_lines=draw_lines,
draw_ell=draw_ell, colorbar_=True, vert=self.vert))
show_matches_kw.update(kwargs_)
if self.warp_homog:
show_matches_kw['H1'] = self.H1
#show_matches_kw['score'] = self.score
show_matches_kw['rawscore'] = self.score
show_matches_kw['aid2_raw_rank'] = self.rank
tup = viz.viz_matches.show_matches2(ibs, self.qaid, self.daid,
self.fm, self.fs,
qreq_=self.qreq_,
**show_matches_kw)
ax, xywh1, xywh2 = tup
self.xywh2 = xywh2
pt.set_figtitle(figtitle + ' ' + vh.get_vsstr(qaid, aid))
def sv_view(self, dodraw=True):
""" spatial verification view
"""
#fnum = viz.FNUMS['special']
aid = self.daid
fnum = pt.next_fnum()
fig = pt.figure(fnum=fnum, docla=True, doclf=True)
ih.disconnect_callback(fig, 'button_press_event')
viz.viz_sver.show_sver(self.ibs, self.qaid, aid2=aid, fnum=fnum)
if dodraw:
#self.draw()
pt.draw()
def show_coverage(self, dodraw=True):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_coverage --show
python -m ibeis.viz.interact.interact_matches --test-show_coverage
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact(mx=1)
>>> self.show_coverage(dodraw=False)
>>> pt.show_if_requested()
"""
masks_list = scoring.get_masks(self.qreq_, self.cm)
scoring.show_coverage_mask(self.qreq_, self.cm, masks_list)
if dodraw:
#self.draw()
pt.draw()
def show_each_chip(self):
viz_chip.show_chip(self.ibs, self.qaid, fnum=pt.next_fnum(), nokpts=True)
viz_chip.show_chip(self.ibs, self.daid, fnum=pt.next_fnum(), nokpts=True)
pt.draw()
#self.draw()
def show_each_fgweight_chip(self):
viz_chip.show_chip(self.ibs, self.qaid, fnum=pt.next_fnum(),
weight_label='fg_weights')
viz_chip.show_chip(self.ibs, self.daid, fnum=pt.next_fnum(),
weight_label='fg_weights')
#self.draw()
pt.draw()
def show_each_dstncvs_chip(self, dodraw=True):
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches --test-show_each_dstncvs_chip --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.viz.interact.interact_matches import * # NOQA
>>> self = testdata_match_interact(mx=1)
>>> self.show_each_dstncvs_chip(dodraw=False)
>>> pt.show_if_requested()
"""
dstncvs1, dstncvs2 = scoring.get_kpts_distinctiveness(self.ibs,
[self.qaid,
self.daid])
print('dstncvs1_stats = ' + ut.get_stats_str(dstncvs1))
print('dstncvs2_stats = ' + ut.get_stats_str(dstncvs2))
weight_label = 'dstncvs'
showkw = dict(weight_label=weight_label, ell=False, pts=True)
viz_chip.show_chip(self.ibs, self.qaid, weights=dstncvs1,
fnum=pt.next_fnum(), **showkw)
viz_chip.show_chip(self.ibs, self.daid, weights=dstncvs2,
fnum=pt.next_fnum(), **showkw)
if dodraw:
#self.draw()
pt.draw()
def show_each_probchip(self):
viz_hough.show_probability_chip(self.ibs, self.qaid, fnum=pt.next_fnum())
viz_hough.show_probability_chip(self.ibs, self.daid, fnum=pt.next_fnum())
pt.draw()
#self.draw()
def dev_reload(self):
ih.disconnect_callback(self.fig, 'button_press_event')
self.rrr()
self.set_callbacks()
def dev_embed(self):
ut.embed()
def toggle_samefig(self):
self.same_fig = not self.same_fig
if self.mx is not None:
self.select_ith_match(self.mx)
self.draw()
def query_last_feature(self):
ibs = self.ibs
qaid = self.qaid
viz.show_nearest_descriptors(ibs, qaid, self.last_fx, pt.next_fnum(),
qreq_=self.qreq_, draw_chip=True)
fig3 = pt.gcf()
ih.connect_callback(fig3, 'button_press_event', self.on_click)
pt.draw()
def get_popup_options(self):
from ibeis.gui import inspect_gui
options = []
ax = pt.gca() # HACK
from plottool_ibeis import plot_helpers as ph
viztype = ph.get_plotdat(ax, 'viztype', '')
is_match_type = viztype in ['matches', 'multi_match']
if is_match_type:
options += inspect_gui.get_aidpair_context_menu_options(
self.ibs, self.qaid, self.daid, self.cm,
qreq_=self.qreq_,
#update_callback=self.show_page,
#backend_callback=None, aid_list=aid_list)
)
options += [
#('Toggle same_fig', self.toggle_samefig),
#('Toggle vert', self.toggle_vert),
('query last feature', self.query_last_feature),
('show each chip', self.show_each_chip),
('show each distinctiveness chip', self.show_each_dstncvs_chip),
('show each foreground weight chip', self.show_each_fgweight_chip),
('show each probchip', self.show_each_probchip),
('show coverage', self.show_coverage),
#('show each probchip', self.query_last_feature),
]
#options.append(('name_interaction', self.name_interaction))
#if self.H1 is not None:
# options.append(('Toggle homog', self.toggle_homog))
if ut.is_developer():
options.append(('dev_reload', self.dev_reload))
options.append(('dev_embed', self.dev_embed))
#options.append(('cancel', lambda: print('cancel')))
options += super(MatchInteraction, self).get_popup_options()
return options
#self.show_popup_menu(options, event)
# Callback
def on_click_inside(self, event, ax):
from plottool_ibeis import plot_helpers as ph
ibs = self.ibs
viztype = ph.get_plotdat(ax, 'viztype', '')
is_match_type = viztype in ['matches', 'multi_match']
key = '' if event.key is None else event.key
print('key=%r ' % key)
ctrl_down = key.find('control') == 0
# Click in match axes
if event.button == 3:
return super(MatchInteraction, self).on_click_inside(event, ax)
if is_match_type and ctrl_down:
# Ctrl-Click
print('.. control click')
return self.sv_view()
elif viztype in ['warped', 'unwarped']:
print('clicked at patch')
ut.print_dict(ph.get_plotdat_dict(ax))
hs_aid = {
'aid1': self.qaid,
'aid2': self.daid,
}[vh.get_ibsdat(ax, 'aid', None)]
hs_fx = vh.get_ibsdat(ax, 'fx', None)
print('hs_fx = %r' % (hs_fx,))
print('hs_aid = %r' % (hs_aid,))
if hs_aid is not None and viztype == 'unwarped':
ishow_chip(ibs, hs_aid, fx=hs_fx, fnum=pt.next_fnum())
elif hs_aid is not None and viztype == 'warped':
viz.show_keypoint_gradient_orientations(ibs, hs_aid, hs_fx,
fnum=pt.next_fnum())
else:
return super(MatchInteraction, self).on_click_inside(event, ax)
self.draw()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.viz.interact.interact_matches
python -m ibeis.viz.interact.interact_matches --allexamples
python -m ibeis.viz.interact.interact_matches --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE7
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
test_models.py
http://docs.djangoproject.com/en/dev/topics/testing/
.. moduleauthor:: Russell Sim <russell.sim@monash.edu>
"""
from django.test import TestCase
class ExperimentFormTestCase(TestCase):
def setUp(self):
from django.contrib.auth.models import User
user = 'tardis_user1'
pwd = 'secret'
email = ''
self.user = User.objects.create_user(user, email, pwd)
def _data_to_post(self, data=None):
from django.http import QueryDict
data = data or [('authors', 'russell, steve'),
('created_by', self.user.pk),
('description', 'desc.....'),
('institution_name', 'some university'),
('title', 'test experiment'),
('url', 'http://www.test.com'),
('dataset-MAX_NUM_FORMS', ''),
('dataset-INITIAL_FORMS', '0'),
('dataset-TOTAL_FORMS', '2'),
('dataset-0-datafile-MAX_NUM_FORMS', ''),
('dataset-0-datafile-INITIAL_FORMS', '0'),
('dataset-0-datafile-TOTAL_FORMS', '1'),
('dataset-0-id', ''),
('dataset-0-description', 'first one'),
('dataset-0-datafile-0-filename', 'file/another.py'),
('dataset-1-description', 'second'),
('dataset-1-datafile-MAX_NUM_FORMS', ''),
('dataset-1-datafile-INITIAL_FORMS', '0'),
('dataset-1-datafile-TOTAL_FORMS', '2'),
('dataset-1-datafile-0-id', ''),
('dataset-1-datafile-0-filename', 'second_ds/file.py'),
('dataset-1-datafile-1-id', ''),
('dataset-1-datafile-1-filename', 'second_ds/file1.py'),
]
data = QueryDict('&'.join(['%s=%s' % (k, v) for k, v in data]))
return data
def _create_experiment(self, data=None):
from tardis.tardis_portal import models, forms
from os.path import basename
from django.contrib.auth.models import User
data = self._data_to_post(data)
exp = models.Experiment(title=data['title'],
institution_name=data['institution_name'],
description=data['description'],
created_by=User.objects.get(id=data['created_by']),
)
exp.save()
for i, a in enumerate(data['authors'].split(', ')):
ae = models.Author_Experiment(experiment=exp,
author=a,
order=i)
ae.save()
ds_desc = {'first one': ['file/another.py'],
'second': ['second_ds/file.py', 'second_ds/file1.py']}
for d, df in ds_desc.items():
dataset = models.Dataset(description=d,
experiment=exp)
dataset.save()
for f in df:
d = models.Dataset_File(url='file://' + f,
dataset=dataset,
filename=basename(f))
d.save()
return exp
def test_form_printing(self):
from tardis.tardis_portal import forms
example_post = self._data_to_post()
f = forms.ExperimentForm(example_post)
as_table = """<tr><th><label for="url">Url:</label></th><td><input type="text" name="url" value="http://www.test.com" id="url" /></td></tr>
<tr><th><label for="title">Title:</label></th><td><input id="title" type="text" name="title" value="test experiment" maxlength="400" /></td></tr>
<tr><th><label for="institution_name">Institution name:</label></th><td><input id="institution_name" type="text" name="institution_name" value="some university" maxlength="400" /></td></tr>
<tr><th><label for="description">Description:</label></th><td><textarea id="description" rows="10" cols="40" name="description">desc.....</textarea></td></tr>
<tr><th><label for="start_time">Start time:</label></th><td><input type="text" name="start_time" id="start_time" /></td></tr>
<tr><th><label for="end_time">End time:</label></th><td><input type="text" name="end_time" id="end_time" /></td></tr>
<tr><th><label for="public">Public:</label></th><td><input type="checkbox" name="public" id="public" /></td></tr>
<tr><th><label for="authors">Authors:</label></th><td><input type="text" name="authors" value="russell, steve" id="authors" /></td></tr>"""
self.assertEqual(f.as_table(), as_table)
def test_form_parsing(self):
from os.path import basename
from tardis.tardis_portal import forms, models
example_post = [('title', 'test experiment'),
('url', 'http://www.test.com'),
('institution_name', 'some university'),
('description', 'desc.....'),
('authors', 'russell, steve'),
('dataset-MAX_NUM_FORMS', ''),
('dataset-INITIAL_FORMS', '0'),
('dataset-TOTAL_FORMS', '2'),
('dataset-0-datafile-MAX_NUM_FORMS', ''),
('dataset-0-datafile-INITIAL_FORMS', '0'),
('dataset-0-datafile-TOTAL_FORMS', '2'),
('dataset-0-description', 'first one'),
('dataset-0-id', ''),
('dataset-0-datafile-0-id', ''),
('dataset-0-datafile-0-filename', 'location.py'),
('dataset-0-datafile-0-protocol', ''),
('dataset-0-datafile-0-url', 'file/location.py'),
('dataset-0-datafile-1-id', ''),
('dataset-0-datafile-1-filename', 'another.py'),
('dataset-0-datafile-1-protocol', ''),
('dataset-0-datafile-1-url', 'file/another.py'),
('dataset-1-id', ''),
('dataset-1-description', 'second'),
('dataset-1-datafile-MAX_NUM_FORMS', ''),
('dataset-1-datafile-INITIAL_FORMS', '0'),
('dataset-1-datafile-TOTAL_FORMS', '1'),
('dataset-1-datafile-0-id', ''),
('dataset-1-datafile-0-filename', 'file.py'),
('dataset-1-datafile-0-protocol', ''),
('dataset-1-datafile-0-url', 'second_ds/file.py'),
]
example_post = self._data_to_post(example_post)
f = forms.ExperimentForm(example_post)
# test validity of form data
self.assertTrue(f.is_valid(), repr(f.errors))
# save form
exp = f.save(commit=False)
exp['experiment'].created_by = self.user
exp.save_m2m()
# retrieve model from database
e = models.Experiment.objects.get(pk=exp['experiment'].pk)
self.assertEqual(e.title, example_post['title'])
self.assertEqual(e.institution_name, example_post['institution_name'])
self.assertEqual(e.description, example_post['description'])
# test there are 2 authors
self.assertEqual(len(e.author_experiment_set.all()), 2)
# check we can get one of the authors back
self.assertEqual(e.author_experiment_set.get(author='steve').author,
'steve')
# check both datasets have been saved
ds = models.Dataset.objects.filter(experiment=exp['experiment'].pk)
self.assertEqual(len(ds), 2)
# check that all the files exist in the database
check_files = {'first one': ['file/location.py', 'file/another.py'],
'second': ['second_ds/file.py']}
for d in ds:
files = models.Dataset_File.objects.filter(dataset=d.pk)
v_files = [basename(f) for f in check_files[d.description]]
v_urls = check_files[d.description]
for f in files:
self.assertTrue(f.filename in v_files,
"%s not in %s" % (f.filename, v_files))
self.assertTrue(f.url in v_urls,
"%s not in %s" % (f.url, v_urls))
def test_initial_form(self):
from tardis.tardis_portal import forms
as_table = """<tr><th><label for="url">Url:</label></th><td><input type="text" name="url" id="url" /></td></tr>
<tr><th><label for="title">Title:</label></th><td><input id="title" type="text" name="title" maxlength="400" /></td></tr>
<tr><th><label for="institution_name">Institution name:</label></th><td><input id="institution_name" type="text" name="institution_name" maxlength="400" /></td></tr>
<tr><th><label for="description">Description:</label></th><td><textarea id="description" rows="10" cols="40" name="description"></textarea></td></tr>
<tr><th><label for="start_time">Start time:</label></th><td><input type="text" name="start_time" id="start_time" /></td></tr>
<tr><th><label for="end_time">End time:</label></th><td><input type="text" name="end_time" id="end_time" /></td></tr>
<tr><th><label for="public">Public:</label></th><td><input type="checkbox" name="public" id="public" /></td></tr>
<tr><th><label for="authors">Authors:</label></th><td><input type="text" name="authors" id="authors" /></td></tr>"""
f = forms.ExperimentForm()
self.assertEqual(f.as_table(), as_table)
#TODO needs to be extended to cover printing initial datasets
def test_validation(self):
from tardis.tardis_portal import forms
# test empty form
f = forms.ExperimentForm()
self.assertTrue(f.is_valid())
# test blank post data
post = self._data_to_post([('authors', ''),
('created_by', ''),
('description', ''),
('institution_name', ''),
('title', ''),
('url', ''),
('dataset-MAX_NUM_FORMS', ''),
('dataset-INITIAL_FORMS', '0'),
('dataset-TOTAL_FORMS', '1'),
('dataset-0-datafile-MAX_NUM_FORMS', ''),
('dataset-0-datafile-INITIAL_FORMS', '0'),
('dataset-0-datafile-TOTAL_FORMS', '1'),
('dataset-0-id', ''),
('dataset-0-description', ''),
])
f = forms.ExperimentForm(data=post)
self.assertFalse(f.is_valid())
# test a valid form
example_post = self._data_to_post()
f = forms.ExperimentForm(example_post)
self.assertTrue(f.is_valid())
# test a valid instance of a form
exp = self._create_experiment()
f = forms.ExperimentForm(instance=exp)
self.assertTrue(f.is_valid())
# test a valid instance with unmodified post
#f = forms.ExperimentForm(instance=exp, data=example_post)
#self.assertFalse(f.is_valid())
def test_instance(self):
from tardis.tardis_portal import forms
exp = self._create_experiment()
f = forms.ExperimentForm(instance=exp)
value = "value=\"%s\""
text_area = ">%s</textarea>"
self.assertTrue(value % 'test experiment' in
str(f['title']), str(f['title']))
self.assertTrue(value % 'some university' in
str(f['institution_name']))
for ds, df in f.get_datasets():
if 'dataset_description[0]' in str(ds['description']):
self.assertTrue(text_area % "first one" in
str(ds['description']))
for file in df:
self.assertTrue(value % "another.py" in
str(file['filename']))
if 'dataset_description[1]' in str(ds['description']):
self.assertTrue(text_area % "second" in
str(ds['description']))
for file in df:
if value % "file.py" in str(file['filename']):
continue
if value % "file1.py" in str(file['filename']):
continue
self.assertTrue(False, "Not all files present")
self.assertTrue(value % "russell, steve" in str(f['authors']),
str(f['authors']))
def test_render(self):
from tardis.tardis_portal import forms
from django.template import Template, Context
exp = self._create_experiment()
f = forms.ExperimentForm(instance=exp)
template = """<form action="" method="post">
{% for field in form %}
<div class="fieldWrapper">
{{ field.errors }}
{{ field.label_tag }}: {{ field }}
</div>
{% endfor %}
{{ form.datasets.management_form }}
{% for dataset_form, file_forms in form.get_datasets %}
{% for field in dataset_form %}
<div class="fieldWrapper">
{{ field.errors }}
{{ field.label_tag }}: {{ field }}
</div>
{% endfor %}
{{ file_forms.management_form }}
{% for file_form in file_forms.forms %}
{% for field in file_form %}
<div class="fieldWrapper">
{{ field.errors }}
{{ field.label_tag }}: {{ field }}
</div>
{% endfor %}
{% endfor %}
{% endfor %}
<p><input type="submit" value="Submit" /></p>
</form>
"""
t = Template(template)
output = t.render(Context({'form': f}))
value = "value=\"%s\""
span = ">%s</span>"
text_area = ">%s</textarea>"
# test experiment fields
self.assertTrue(value % "test experiment" in output)
self.assertTrue(value % "some university" in output)
self.assertTrue(text_area % "desc....." in output)
self.assertTrue(text_area % "second" in output, output)
self.assertTrue(span % "file1.py" in output)
self.assertTrue(value % "file://second_ds/file.py" in output)
self.assertEqual(output.count('0-datafile-0-url" value'), 1)
self.assertEqual(output.count('0-datafile-1-url" value'), 1)
self.assertEqual(output.count('1-datafile-0-url" value'), 1)
self.assertEqual(output.count('description">first one</text'), 1)
self.assertEqual(output.count('description">second</text'), 1)
def test_initial_data(self):
from tardis.tardis_portal import forms
from django.forms.models import model_to_dict
exp = self._create_experiment()
initial = model_to_dict(exp)
for i, ds in enumerate(exp.dataset_set.all()):
initial['dataset_description[' + str(i) + ']'] = ds.description
f = forms.ExperimentForm(initial=initial)
value = "value=\"%s\""
text_area = ">%s</textarea>"
self.assertTrue(value % 'test experiment' in str(f['title']))
self.assertTrue(value % 'some university' in
str(f['institution_name']))
# TODO the reset of this test is disabled because it's too complex
return
for ds, df in f.get_datasets():
self.assertTrue(text_area % "first one" in
str(ds['description']))
# TODO Currently broken, not sure if initial will be used without the
# data argument
self.assertTrue(text_area % "second" in
str(f['dataset_description[1]']))
self.assertTrue(value % "russell, steve" in str(f['authors']))
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tempfile
import shutil
import unit_test_utils
import os
from pyspark.mllib.linalg import *
from pyspark.sql.types import *
from pyspark.sql.functions import array, struct
from pysparkling.ml import *
from h2o.estimators import H2OGradientBoostingEstimator
from pyspark.ml.feature import VectorAssembler
from ai.h2o.sparkling.ml.models.H2OMOJOModel import H2OMOJOModel
@pytest.fixture(scope="module")
def gbmModel(prostateDataset):
gbm = H2OGBM(ntrees=2, seed=42, distribution="bernoulli", labelCol="capsule")
return gbm.fit(prostateDataset)
def testDomainColumns(gbmModel):
domainValues = gbmModel.getDomainValues()
assert domainValues["DPROS"] is None
assert domainValues["DCAPS"] is None
assert domainValues["VOL"] is None
assert domainValues["AGE"] is None
assert domainValues["PSA"] is None
assert domainValues["capsule"] == ["0", "1"]
assert domainValues["RACE"] is None
assert domainValues["ID"] is None
def testTrainingParams(gbmModel):
params = gbmModel.getTrainingParams()
assert params["seed"] == "42"
assert params["distribution"] == "bernoulli"
assert params["ntrees"] == "2"
assert len(params) == 44
def testModelCategory(gbmModel):
category = gbmModel.getModelCategory()
assert category == "Binomial"
def testTrainingMetrics(gbmModel):
metrics = gbmModel.getTrainingMetrics()
assert metrics is not None
assert len(metrics) is 10
def testFeatureTypes(gbmModel):
types = gbmModel.getFeatureTypes()
assert types["DPROS"] == "Numeric"
assert types["GLEASON"] == "Numeric"
assert types["DCAPS"] == "Numeric"
assert types["VOL"] == "Numeric"
assert types["AGE"] == "Numeric"
assert types["PSA"] == "Numeric"
assert types["capsule"] == "Enum"
assert types["RACE"] == "Numeric"
assert types["ID"] == "Numeric"
assert len(types) == 9
def testScoringHistory(gbmModel):
scoringHistoryDF = gbmModel.getScoringHistory()
assert scoringHistoryDF.count() > 0
assert len(scoringHistoryDF.columns) > 0
def testFeatureImportances(gbmModel):
featureImportancesDF = gbmModel.getFeatureImportances()
assert featureImportancesDF.select("Variable").collect().sort() == gbmModel.getFeaturesCols().sort()
assert len(featureImportancesDF.columns) == 4
def testFeatureImportancesAndScoringHistoryAreSameAfterSerde(gbmModel):
expectedScoringHistoryDF = gbmModel.getScoringHistory()
expectedFeatureImportancesDF = gbmModel.getFeatureImportances()
filePath = "file://" + os.path.abspath("build/scoringHistoryAndFeatureImportancesSerde")
gbmModel.write().overwrite().save(filePath)
loadedModel = H2OMOJOModel.load(filePath)
loadedScoringHistoryDF = loadedModel.getScoringHistory()
loadedFeatureImportancesDF = loadedModel.getFeatureImportances()
unit_test_utils.assert_data_frames_are_identical(expectedScoringHistoryDF, loadedScoringHistoryDF)
unit_test_utils.assert_data_frames_are_identical(expectedFeatureImportancesDF, loadedFeatureImportancesDF)
def getCurrentMetrics():
metrics = gbmModel.getCurrentMetrics()
assert metrics == gbmModel.getTrainingMetrics()
@pytest.fixture(scope="module")
def prostateDatasetWithDoubles(prostateDataset):
return prostateDataset.select(
prostateDataset.CAPSULE.cast("string").alias("CAPSULE"),
prostateDataset.AGE.cast("double").alias("AGE"),
prostateDataset.RACE.cast("double").alias("RACE"),
prostateDataset.DPROS.cast("double").alias("DPROS"),
prostateDataset.DCAPS.cast("double").alias("DCAPS"),
prostateDataset.PSA,
prostateDataset.VOL,
prostateDataset.GLEASON.cast("double").alias("GLEASON"))
def trainAndTestH2OPythonGbm(hc, dataset):
h2oframe = hc.asH2OFrame(dataset)
label = "CAPSULE"
gbm = H2OGradientBoostingEstimator(seed=42)
gbm.train(y=label, training_frame=h2oframe)
directoryName = tempfile.mkdtemp(prefix="")
try:
mojoPath = gbm.download_mojo(directoryName)
model = H2OMOJOModel.createFromMojo("file://" + mojoPath)
return model.transform(dataset).select(
"prediction",
"detailed_prediction.probabilities.0",
"detailed_prediction.probabilities.1")
finally:
shutil.rmtree(directoryName)
def compareH2OPythonGbmOnTwoDatasets(hc, reference, tested):
expected = trainAndTestH2OPythonGbm(hc, reference)
result = trainAndTestH2OPythonGbm(hc, tested)
unit_test_utils.assert_data_frames_are_identical(expected, result)
def testMojoTrainedWithH2OAPISupportsArrays(hc, prostateDatasetWithDoubles):
arrayDataset = prostateDatasetWithDoubles.select(
prostateDatasetWithDoubles.CAPSULE,
array(
prostateDatasetWithDoubles.AGE,
prostateDatasetWithDoubles.RACE,
prostateDatasetWithDoubles.DPROS,
prostateDatasetWithDoubles.DCAPS,
prostateDatasetWithDoubles.PSA,
prostateDatasetWithDoubles.VOL,
prostateDatasetWithDoubles.GLEASON).alias("features"))
compareH2OPythonGbmOnTwoDatasets(hc, prostateDatasetWithDoubles, arrayDataset)
def testMojoTrainedWithH2OAPISupportsVectors(hc, prostateDatasetWithDoubles):
assembler = VectorAssembler(
inputCols=["AGE", "RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"],
outputCol="features")
vectorDataset = assembler.transform(prostateDatasetWithDoubles).select("CAPSULE", "features")
compareH2OPythonGbmOnTwoDatasets(hc, prostateDatasetWithDoubles, vectorDataset)
def testMojoTrainedWithH2OAPISupportsStructs(hc, prostateDatasetWithDoubles):
arrayDataset = prostateDatasetWithDoubles.select(
prostateDatasetWithDoubles.CAPSULE,
prostateDatasetWithDoubles.AGE,
struct(
prostateDatasetWithDoubles.RACE,
struct(
prostateDatasetWithDoubles.DPROS,
prostateDatasetWithDoubles.DCAPS,
prostateDatasetWithDoubles.PSA).alias("b"),
prostateDatasetWithDoubles.VOL).alias("a"),
prostateDatasetWithDoubles.GLEASON)
compareH2OPythonGbmOnTwoDatasets(hc, prostateDatasetWithDoubles, arrayDataset)
def testMojoModelCouldBeSavedAndLoaded(gbmModel, prostateDataset):
path = "file://" + os.path.abspath("build/testMojoModelCouldBeSavedAndLoaded")
gbmModel.write().overwrite().save(path)
loadedModel = H2OMOJOModel.load(path)
expected = gbmModel.transform(prostateDataset).drop("detailed_prediction")
result = loadedModel.transform(prostateDataset).drop("detailed_prediction")
unit_test_utils.assert_data_frames_are_identical(expected, result)
def testGetCrossValidationSummary():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/gbm_cv.mojo"))
summary = mojo.getCrossValidationMetricsSummary()
assert summary.columns == ["metric", "mean", "sd", "cv_1_valid", "cv_2_valid", "cv_3_valid"]
assert summary.count() > 0
def testCrossValidationModelsAreAvailableAfterSavingAndLoading(prostateDataset):
path = "file://" + os.path.abspath("build/testCrossValidationModelsAreAvialableAfterSavingAndLoading")
nfolds = 3
gbm = H2OGBM(ntrees=2, seed=42, distribution="bernoulli", labelCol="capsule",
nfolds=nfolds, keepCrossValidationModels=True)
model = gbm.fit(prostateDataset)
model.write().overwrite().save(path)
loadedModel = H2OMOJOModel.load(path)
cvModels = loadedModel.getCrossValidationModels()
assert len(cvModels) == nfolds
result = loadedModel.transform(prostateDataset)
cvResult = cvModels[0].transform(prostateDataset)
assert cvResult.schema == result.schema
assert cvResult.count() == result.count()
assert 0 < cvModels[0].getTrainingMetrics()['AUC'] < 1
assert 0 < cvModels[0].getValidationMetrics()['AUC'] < 1
assert cvModels[0].getCrossValidationMetrics() == {}
assert cvModels[0].getModelDetails() == model.getCrossValidationModels()[0].getModelDetails()
def testCrossValidationModelsAreNoneIfKeepCrossValidationModelsIsFalse(prostateDataset):
gbm = H2OGBM(ntrees=2, seed=42, distribution="bernoulli", labelCol="capsule",
nfolds=3, keepCrossValidationModels=False)
model = gbm.fit(prostateDataset)
assert model.getCrossValidationModels() is None
def testMetricObjects(prostateDataset):
gbm = H2OGBM(ntrees=2, seed=42, distribution="bernoulli", labelCol="capsule",
nfolds=3, keepCrossValidationModels=False)
model = gbm.fit(prostateDataset)
def compareMetricValues(metricsObject, metricsMap):
for metric in metricsMap:
metricValue = metricsMap[metric]
objectValue = getattr(metricsObject, "get" + metric)()
assert(metricValue == objectValue)
assert metricsObject.getConfusionMatrix().count() > 0
assert len(metricsObject.getConfusionMatrix().columns) > 0
assert metricsObject.getGainsLiftTable().count() > 0
assert len(metricsObject.getGainsLiftTable().columns) > 0
assert metricsObject.getMaxCriteriaAndMetricScores().count() > 0
assert len(metricsObject.getMaxCriteriaAndMetricScores().columns) > 0
assert metricsObject.getThresholdsAndMetricScores().count() > 0
assert len(metricsObject.getThresholdsAndMetricScores().columns) > 0
compareMetricValues(model.getTrainingMetricsObject(), model.getTrainingMetrics())
compareMetricValues(model.getCrossValidationMetricsObject(), model.getCrossValidationMetrics())
compareMetricValues(model.getCurrentMetricsObject(), model.getCurrentMetrics())
assert model.getValidationMetricsObject() is None
assert model.getValidationMetrics() == {}
def testGetStartTime():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/multi_model_iris.mojo"))
assert mojo.getStartTime() == 1631392711317
def testGetEndTime():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/multi_model_iris.mojo"))
assert mojo.getEndTime() == 1631392711360
def testGetRunTime():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/multi_model_iris.mojo"))
assert mojo.getRunTime() == 43
def testGetDefaultThreshold():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/binom_model_prostate.mojo"))
assert mojo.getDefaultThreshold() == 0.40858428648438255
def testGetCrossValidationModelsScoringHistory():
mojo = H2OMOJOModel.createFromMojo("file://" + os.path.abspath("../ml/src/test/resources/gbm_cv.mojo"))
history = mojo.getCrossValidationModelsScoringHistory()
assert len(history) == 3
for history_df in history:
assert len(history_df.columns) == 16
assert history_df.count() == 3
def testGetCrossValidationModelsScoringHistoryWhenDataIsMissing():
mojo = H2OMOJOModel.createFromMojo("file://" + os.path.abspath("../ml/src/test/resources/deep_learning_prostate.mojo"))
history = mojo.getCrossValidationModelsScoringHistory()
assert len(history) == 0
def testGetModelSummary():
mojo = H2OMOJOModel.createFromMojo(
"file://" + os.path.abspath("../ml/src/test/resources/deep_learning_prostate.mojo"))
summary = mojo.getModelSummary()
assert summary.count() == 4
assert summary.columns == ["Layer", "Units", "Type", "Dropout", "L1", "L2", "Mean Rate", "Rate RMS", "Momentum",
"Mean Weight", "Weight RMS", "Mean Bias", "Bias RMS"]
collected_summary = summary.collect()
assert collected_summary[0].asDict() == {'Layer': 1, 'Units': 8, 'Type': 'Input', 'Dropout': 0.0, 'L1': None,
'L2': None, 'Mean Rate': None, 'Rate RMS': None, 'Momentum': None,
'Mean Weight': None, 'Weight RMS': None, 'Mean Bias': None,
'Bias RMS': None}
assert collected_summary[1].asDict() == {'Layer': 2, 'Units': 200, 'Type': 'Rectifier', 'Dropout': 0.0, 'L1': 0.0,
'L2': 0.0, 'Mean Rate': 0.006225864375919627,
'Rate RMS': 0.0030197836458683014, 'Momentum': 0.0,
'Mean Weight': 0.0020895117304439736, 'Weight RMS': 0.09643048048019409,
'Mean Bias': 0.42625558799512825, 'Bias RMS': 0.049144044518470764}
assert collected_summary[2].asDict() == {'Layer': 3, 'Units': 200, 'Type': 'Rectifier', 'Dropout': 0.0, 'L1': 0.0,
'L2': 0.0, 'Mean Rate': 0.04241905607206281,
'Rate RMS': 0.09206506609916687, 'Momentum': 0.0,
'Mean Weight': -0.008243563556700311, 'Weight RMS': 0.06984925270080566,
'Mean Bias': 0.9844640783479953, 'Bias RMS': 0.008990883827209473}
assert collected_summary[3].asDict() == {'Layer': 4, 'Units': 1, 'Type': 'Linear', 'Dropout': None, 'L1': 0.0,
'L2': 0.0, 'Mean Rate': 0.0006254940157668898,
'Rate RMS': 0.0009573120623826981, 'Momentum': 0.0,
'Mean Weight': 0.0009763148391539289, 'Weight RMS': 0.06601589918136597,
'Mean Bias': 0.002604305485232783, 'Bias RMS': 1.0971281125650402e-154}
| |
# nls/model.py
# This module define core abstractions that maps to problem and model definition.
# (c) Daniel Bershatsky, 2016
# See LICENSE for details
from __future__ import absolute_import, print_function
from pprint import pprint
from time import time
from types import FunctionType
from datetime import datetime
from numpy import array, exp, sqrt, arange, ones, zeros, meshgrid, mgrid, pi, linspace, angle, gradient
from scipy.integrate import simps
from scipy.io import loadmat, savemat
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation, cm
from matplotlib.pyplot import figure, plot, show, title, xlabel, ylabel, subplot, legend, xlim, ylim, contourf, hold, colorbar
from .animation import *
from .native import *
from .pumping import *
from .version import *
class Problem(object):
"""Entry point in any computation. It implements design pattern `Factory` that used to construct object of type
`Model`.
"""
def model(self, *args, **kwargs):
"""
Piority of Arguments: Arguments passed in `kwargs` has the most piority, 'param' key in `kwargs` has less
piority than `kwargs` and dictionary arguments in `args` have the least piority. Other arguments are ignored.
Argument List:
model - set model type, default value 'default';
dx - default value '1.0e-1';
dt - default value '1.0e-3';
t0 - default value '0.0';
u0 - default value '1.0e-1';
order - default value '5';
pumping - default value ``;
!original_params - default value `{}`;
!dimless_params - default value `{}`;
"""
if 'filename' in kwargs:
return self.modelFromFile(kwargs['filename'])
if 'params' in kwargs:
params = kwargs.pop('params')
kwargs['model'] = 'default' if 'model' not in kwargs else kwargs['model']
kwargs['original_params'] = {} if 'original_params' not in kwargs else kwargs['original_params']
if 'R' not in kwargs['original_params']:
kwargs['original_params']['R'] = 0.0242057488654
if 'gamma' not in kwargs['original_params']:
kwargs['original_params']['gamma'] = 0.0242057488654
if 'g' not in kwargs['original_params']:
kwargs['original_params']['g'] = 0.00162178517398
if 'tilde_g' not in kwargs['original_params']:
kwargs['original_params']['tilde_g'] = 0.0169440242057
if 'gamma_R' not in kwargs['original_params']:
kwargs['original_params']['gamma_R'] = 0.242057488654
if kwargs.get('model') in ('1d', 'default', str(Model1D)):
return self.fabricateModel1D(*args, **kwargs)
elif kwargs.get('model') in ('2d', str(Model2D)):
return self.fabricateModel2D(*args, **kwargs)
else:
raise Exception('Unknown model passed!')
def modelFromFile(self, filename):
def modelFromFileLikeObject(filename):
mat = loadmat(filename)
if 'model' in mat:
return self.model(model=mat['model'][0]).restore(filename)
if isinstance(filename, file):
return modelFromFileLikeObject(filename)
else:
with open(filename) as f:
return modelFromFileLikeObject(filename)
def fabricateModel1D(self, *args, **kwargs):
kwargs['dx'] = 1.0e-1 if 'dx' not in kwargs else kwargs['dx']
kwargs['dt'] = 1.0e-3 if 'dt' not in kwargs else kwargs['dt']
kwargs['t0'] = 0.0e+0 if 't0' not in kwargs else kwargs['t0']
kwargs['u0'] = 1.0e-1 if 'u0' not in kwargs else kwargs['u0']
kwargs['order'] = 5 if 'order' not in kwargs else kwargs['order']
kwargs['pumping'] = GaussianPumping() if 'pumping' not in kwargs else kwargs['pumping']
kwargs['num_nodes'] = 1000 if 'num_nodes' not in kwargs else kwargs['num_nodes']
kwargs['num_iters'] = 100000 if 'num_iters' not in kwargs else kwargs['num_iters']
if type(kwargs['u0']) in (int, float, complex):
kwargs['u0'] = kwargs['u0'] * ones(kwargs['num_nodes'])
elif isinstance(kwargs['u0'], FunctionType):
grid = linspace(0.0, kwargs['dx'] * kwargs['num_nodes'], kwargs['num_nodes'])
kwargs['u0'] = kwargs['u0'](grid)
return Model1D(**kwargs)
def fabricateModel2D(self, *args, **kwargs):
kwargs['dx'] = 1.0e-1 if 'dx' not in kwargs else kwargs['dx']
kwargs['dt'] = 1.0e-3 if 'dt' not in kwargs else kwargs['dt']
kwargs['t0'] = 0.0e+0 if 't0' not in kwargs else kwargs['t0']
kwargs['u0'] = 1.0e-1 if 'u0' not in kwargs else kwargs['u0']
kwargs['order'] = 3 if 'order' not in kwargs else kwargs['order']
kwargs['pumping'] = GaussianPumping() if 'pumping' not in kwargs else kwargs['pumping']
kwargs['num_nodes'] = 40 if 'num_nodes' not in kwargs else kwargs['num_nodes']
kwargs['num_iters'] = 1000 if 'num_iters' not in kwargs else kwargs['num_iters']
if type(kwargs['u0']) in (int, float, complex):
kwargs['u0'] = kwargs['u0'] * ones((kwargs['num_nodes'], kwargs['num_nodes']))
return Model2D(**kwargs)
class AbstractModel(object):
"""Base type for objects which constructed with `Problem` class. Child object of this class implements computation
and other related routines. This class defines common routines of initialization, solving, and model storage.
"""
def __init__(self, *args, **kwargs):
self.dt = kwargs['dt']
self.dx = kwargs['dx']
self.order = kwargs['order']
self.num_nodes = kwargs['num_nodes']
self.num_iters = kwargs['num_iters']
self.pumping = kwargs['pumping']
self.init_sol = kwargs['u0']
self.originals = kwargs['original_params']
self.coeffs = zeros(23)
self.verbose = bool(kwargs.get('verbose'))
self.solver = None
hbar = 6.61e-34
m_e = 9.1e-31
m_0 = 1.0e-5 * m_e
phi0 = sqrt(self.originals['gamma'] / (2.0 * self.originals['g']))
t0 = phi0
x0 = sqrt(hbar * t0 / (2 * m_0))
n0 = 2.0 / (self.originals['R'] * t0)
# NLS equation coeficients
self.coeffs[0] = 1.0 # \partial_t
self.coeffs[1] = 1.0 # \nabla^2
self.coeffs[2] = 1.0 #
self.coeffs[3] = 1.0 # linear damping
self.coeffs[4] = 1.0 # self.originals['g'] * phi0 ** 3 # nonlinearity
self.coeffs[5] = 4.0 * self.originals['tilde_g'] / self.originals['R'] #* phi0 * n0 # interaction to reservoir
# Reservoir equation coefficients
self.coeffs[10] = 0.0 # \parital_t
self.coeffs[11] = 1.0 / (n0 * self.originals['gamma_R']) # pumping coefficient
self.coeffs[12] = 1.0 # damping
self.coeffs[13] = self.originals['R'] * phi0 ** 2 / self.originals['gamma_R'] # interaction term
self.coeffs[14] = 0.0 # diffusive term
def __repr__(self):
from pprint import pformat
return pformat({
'dt': self.dt,
'dx': self.dx,
'order': self.order,
'num_nodes': self.num_nodes,
'num_iters': self.num_iters,
'pumping': self.pumping,
'originals': self.originals,
}) + '\n' + str(self.coeffs)
def getApproximationOrder(self):
return self.order
def getCharacteristicScale(self, scale):
hbar = 6.61e-34
m_e = 9.1e-31
m_0 = 1.0e-5 * m_e
phi0 = sqrt(self.originals['gamma'] / (2.0 * self.originals['g']))
t0 = phi0
x0 = sqrt(hbar * t0 / (2 * m_0))
n0 = 2.0 / (self.originals['R'] * t0)
scales = {
'x': x0,
't': t0,
'n': n0,
'phi': phi0,
}
return scales[scale] if scale in scales else None
def getChemicalPotential(self, solution):
"""Call solver in order to calculate chemical potential.
"""
if isinstance(solution, Solution):
solution = solution.getSolution()
self.mu = self.solver.chemicalPotential(solution)
return self.mu
def getCoefficients(self):
return self.coeffs
def getInitialSolution(self):
return self.init_sol
def getNumberOfIterations(self):
return self.num_iters
def getNumberOfNodes(self):
return self.num_nodes
def getParticleNumber(self, method='simps'):
return simps((self.solution.conj() * self.solution).real, dx=self.dx) # TODO: polar coords
def getPumping(self):
if len(self.init_sol.shape) == 1:
right = self.num_nodes * self.dx
left = 0.0
x = linspace(left, right, self.num_nodes)
grid = meshgrid(x)
return self.pumping(*grid)
else:
right = self.num_nodes * self.dx / 2
left = -right
x = linspace(left, right, self.num_nodes)
grid = meshgrid(x, x)
return self.pumping(*grid)
def getSpatialStep(self):
return self.dx
def getSolver(self):
return self.solver
def getTimeStep(self):
return self.dt
def setNumberOfIterations(self, num_iters):
self.num_iters = num_iters
def setPumping(self, pumping):
self.pumping = pumping
def setInitialSolution(self, solution):
self.init_sol = solution
def solve(self, num_iters=None):
"""Call solver that is aggregated certain child objects.
"""
return self.solver(num_iters)
def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = date if date else datetime.now()
date = date.replace(microsecond=0).isoformat()
filename = filename if filename else date + '.mat'
matfile = {
'model': str(type(self)),
'date': date,
'dim': len(self.init_sol.shape),
'dimlesses': self.coeffs,
'init_solution': self.init_sol,
'num_iters': self.num_iters,
'num_nodes': self.num_nodes,
'order': self.order,
'originals': self.originals,
'pumping': self.getPumping(),
'spatial_step': self.dx,
'time_step': self.dt,
}
if desc:
matfile['desc'] = desc
if label:
matfile['label'] = label
savemat(filename, matfile)
def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
matfile['originals'] = matfile['originals'][0, 0]
if matfile['dim'] == 1:
matfile['init_solution'] = matfile['init_solution'][0, :]
matfile['pumping'] = matfile['pumping'][0, :]
self.coeffs = matfile['dimlesses'][0, :]
self.init_sol = matfile['init_solution']
self.num_nodes = matfile['num_nodes'][0, 0]
self.num_iters = matfile['num_iters'][0, 0]
self.pumping = GridPumping(matfile['pumping'])
self.dx = matfile['spatial_step'][0, 0]
self.dt = matfile['time_step'][0, 0]
types = matfile['originals'].dtype
values = matfile['originals']
self.originals = dict(zip(types.names, (value[0, 0] for value in values)))
if 'desc' in matfile:
self.desc = str(matfile['desc'][0])
if 'label' in matfile:
self.label = str(matfile['label'][0])
return self
class Model1D(AbstractModel):
"""Default model that is NLS equation with reservoir in axe symmentic case.
"""
def __init__(self, *args, **kwargs):
super(Model1D, self).__init__(*args, **kwargs)
self.solver = Solver1D(self)
class Model2D(AbstractModel):
"""Model that is NLS equation with reservoir on two dimensional grid.
"""
def __init__(self, *args, **kwargs):
super(Model2D, self).__init__(*args, **kwargs)
self.solver = Solver2D(self)
class Solution(object):
"""Object that represents solution of a given model. Also it contains all model parameters and has ability to store
and to load solution.
TODO: improve design.
"""
def __init__(self, model, solution=None, verbose=False):
self.elapsed_time = 0.0
self.model = model
self.solution = solution
self.verbose = verbose
def getDampingIntegral(self):
"""Calculate integral of damping terms of hamiltonian using rectangular method.
"""
reservoir = self.getReservoir()
density = self.getDensity()
length = self.model.getSpatialStep()
if self.solution.ndim == 1:
nodes = self.model.getNumberOfNodes()
radius = linspace(0, nodes * self.model.getSpatialStep(), nodes)
integral = 2 * pi * sum((reservoir - 1.0) * density * radius * length)
elif self.solution.ndim == 2:
area = length ** 2
integral = sum(sum((reservoir - 1.0) * density * area))
return integral
def getDensity(self):
return (self.solution.conj() * self.solution).real
def getElapsedTime(self):
return self.elapsed_time
def getModel(self):
return self.model
def getReservoir(self):
p = self.model.getPumping() # pumping profile
u = self.getDensity() # density profile
n = self.model.coeffs[11] * p / (self.model.coeffs[12] + self.model.coeffs[13] * u)
return n
def getSolution(self):
return self.solution
def setElapsedTime(self, seconds):
self.elapsed_time = seconds
def setSolution(self, solution):
self.solution = solution
def visualize(self, *args, **kwargs):
if len(self.model.init_sol.shape) == 1:
self.visualize1d(*args, **kwargs)
else:
self.visualize2d(*args, **kwargs)
def visualize1d(self, *args, **kwargs):
x = arange(0.0, self.model.dx * self.model.num_nodes, self.model.dx)
p = self.model.pumping(x) # pumping profile
u = (self.solution.conj() * self.solution).real # density profile
n = self.model.coeffs[11] * p / (self.model.coeffs[12] + self.model.coeffs[13] * u)
def rect_plot(subplot_number, value, label, name, labelx, labely, xmax=20):
subplot(2, 3, subplot_number)
hold(False)
plot(x, value, label=label)
xlim((0, xmax))
legend(loc='best')
title(name)
xlabel(labelx)
ylabel(labely)
rect_plot(1, p, 'pumping', 'Pumping profile.', 'r', 'p')
rect_plot(2, u, 'density', 'Density distribution of BEC.', 'r', 'u')
rect_plot(3, n, 'reservoir', 'Density distribution of reservoir.', 'r', 'n')
def polar_plot(subplot_number, value, xmax=20):
hold(False)
subplot(2, 3, subplot_number, polar=True)
theta = arange(0, 2 * 3.14 + 0.1, 0.1)
contourf(theta, x, array([value for _ in theta]).T)
ylim((0, xmax))
polar_plot(4, p)
polar_plot(5, u)
polar_plot(6, n)
def visualize2d(self, *args, **kwargs):
right = self.model.num_nodes * self.model.dx / 2
left = -right
x = linspace(left, right, self.model.num_nodes)
gx, gy = meshgrid(x, x)
p = self.model.getPumping()
u = (self.solution.conj() * self.solution).real # density profile
n = self.model.coeffs[11] * p / (self.model.coeffs[12] + self.model.coeffs[13] * u)
fig = kwargs['figure'] if 'figure' in kwargs else figure()
def surface_plot(subplot_number, value, label, name, labels):
ax = fig.add_subplot(130 + subplot_number, projection='3d')
ax.plot_surface(gx, gy, value, label=label)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
ax.set_title(name)
def contour_plot(subplot_number, value, label, name, labels):
levels = linspace(0.0, value.max() + 1.0e-3, 11)
extent = (gx[0, 0], gx[-1, -1], gy[0, 0], gy[-1, -1])
ax = fig.add_subplot(130 + subplot_number, aspect='equal')
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_title(name)
cp = ax.contourf(gx, gy, value, levels, cmap=cm.get_cmap('Accent'), extent=extent)
colorbar(cp, orientation='horizontal')
def stream_plot(subplot_number, value, label, name, labels):
"""Plot stream of complex function.
:param: value tuple Pair of absolute value and its angle.
"""
jx, jy = value[0] * gradient(value[1])
ax = fig.add_subplot(120 + subplot_number, aspect='equal')
ax.streamplot(gx, gy, jx, jy, color=value[0])
ax.set_xlim(gx[0, 0], gx[-1, -1])
ax.set_ylim(gy[0, 0], gy[-1, -1])
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_title(name)
def density_plot(subplot_number, value, label, name, labels):
extent = (gx[0, 0], gx[-1, -1], gy[0, 0], gy[-1, -1])
ax = fig.add_subplot(120 + subplot_number, aspect='equal')
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_title(name)
ax.imshow(value[0], extent=extent)
ax.contour(gx, gy, value[1].real, [0.0], colors='red', extent=extent)
ax.contour(gx, gy, value[1].imag, [0.0], colors='blue', extent=extent)
if 'stream' in kwargs and kwargs['stream']:
stream_plot(1, (u, angle(self.solution)), 'phase gradient', 'Condensate streams', ('x', 'y'))
density_plot(2, (u, self.solution), 'density', 'Density distribution of BEC.', ('x', 'y'))
else:
helper_plot = contour_plot if 'contour' in kwargs and kwargs['contour'] else surface_plot
helper_plot(1, p, 'pumping', 'Pumping profile.', ('x', 'y', 'p'))
helper_plot(2, u, 'density', 'Density distribution of BEC.', ('x', 'y', 'u'))
helper_plot(3, n, 'reservoir', 'Density distribution of reservoir.', ('x', 'y', 'n'))
if kwargs.get('filename'):
fig.savefig(kwargs['filename'])
def show(self):
show()
def store(self, filename=None, label=None, desc=None, date=None):
"""Store object to mat-file. TODO: determine format specification
"""
date = datetime.now() if date is None else date
filename = filename if filename else date.replace(microsecond=0).isoformat() + '.mat'
def storeWithFileLikeObject(file_like):
content = {
'elapsed_time': self.elapsed_time,
'solution': self.solution,
'version': version(),
}
self.model.store(file_like, label, desc, date)
savemat(file_like, content, appendmat=True)
if isinstance(filename, file):
storeWithFileLikeObject(filename)
else:
with open(filename, 'wb') as f:
storeWithFileLikeObject(f)
def restore(self, filename):
"""Restore object from mat-file. TODO: determine format specification
"""
matfile = loadmat(filename)
if matfile['dim'] == 1:
matfile['solution'] = matfile['solution'][0, :]
self.elapsed_time = matfile['elapsed_time'][0, 0]
self.solution = matfile['solution']
return self
def report(self):
message = 'Elapsed in {0} seconds with {1} iteration on {2} grid nodes.'
print(message.format(self.elapsed_time, self.model.getNumberOfIterations(), self.model.getNumberOfNodes()))
from .solver import * # cyclic import fix
| |
import sys
import petsc4py
petsc4py.init(sys.argv)
# from scipy.io import savemat, loadmat
# from src.ref_solution import *
# import warnings
# from memory_profiler import profile
# from time import time
from src.myio import *
from src.objComposite import *
from src.StokesFlowMethod import *
from src.geo import *
from src import stokes_flow as sf
# from src import slender_body as slb
from codeStore.helix_common import *
def get_problem_kwargs(**main_kwargs):
problem_kwargs = get_solver_kwargs()
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'obj_helicoid')
OptDB.setValue('f', fileHandle)
problem_kwargs['fileHandle'] = fileHandle
kwargs_list = (get_obj_helicoid_kwargs(), get_helix_kwargs(),
get_forcefree_kwargs(), main_kwargs,)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
fileHandle = problem_kwargs['fileHandle']
print_solver_info(**problem_kwargs)
print_forcefree_info(**problem_kwargs)
print_helix_info(fileHandle, **problem_kwargs)
print_obj_helicoid_info(**problem_kwargs)
return True
def do_solve_base_flow(basei, problem, obj_comp, uw_Base_list, sumFT_Base_list):
problem.set_basei(basei)
problem.create_F_U()
problem.solve()
PETSc.Sys.Print('---> basei %d' % basei)
PETSc.Sys.Print(obj_comp.get_total_force())
ref_U = obj_comp.get_ref_U()
PETSc.Sys.Print('ref_u: %f %f %f' % (ref_U[0], ref_U[1], ref_U[2]))
PETSc.Sys.Print('ref_w: %f %f %f' % (ref_U[3], ref_U[4], ref_U[5]))
uw_Base_list.append(obj_comp.get_ref_U())
sumFT_Base_list.append(obj_comp.get_total_force())
return uw_Base_list, sumFT_Base_list
def create_helicoid_hlx_comp(**problem_kwargs):
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
tobj = sf.obj_dic[matrix_method]()
tobj.combine(tail_obj_list)
tobj.set_name('helicoid_hlx')
# tobj.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
helicoid_list = obj2helicoid_list_v3(tobj, **problem_kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((1, 0, 0)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
# helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
# update_fun=update_fun, update_order=update_order)
return helicoid_comp
def create_helicoid_hlx_selfRotate(**problem_kwargs):
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
tobj = sf.obj_dic[matrix_method]()
tobj.combine(tail_obj_list)
tobj.set_name('helicoid_hlx_selfRotate')
# tobj.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
helicoid_list = obj2helicoid_list_selfRotate(tobj, **problem_kwargs)
helicoid_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='helicoid_comp')
for tobj in helicoid_list:
helicoid_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
# helicoid_comp.set_update_para(fix_x=False, fix_y=False, fix_z=False,
# update_fun=update_fun, update_order=update_order)
return helicoid_comp
def main_resistanceMatrix_hlx_old(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
# create helicoid
# # dbg, sub_geos are disk.
# namehandle = 'helicoid'
# r2 = 0.3
# ds = 0.03
# th_loc = 0.7853981633974483
# tgeo = regularizeDisk()
# tgeo.create_ds(ds, r2)
# tgeo.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
# tobj = sf.StokesFlowObj()
# tobj.set_matrix_method(matrix_method) # the geo is regularizeDisk
# tobj.set_data(f_geo=tgeo, u_geo=tgeo, name=namehandle)
# helicoid_comp = obj2helicoid_comp(tobj, **problem_kwargs)
# # helicoid_comp.show_u_nodes(linestyle='')
# # assert 1 == 2
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
tobj = sf.StokesFlowObj()
tobj.combine(tail_obj_list)
tobj.set_name('helicoid_hlx')
# tobj.node_rotation(norm=np.array([1, 0, 0]), theta=th_loc)
helicoid_comp = obj2helicoid_comp(tobj, **problem_kwargs)
# helicoid_comp.show_u_nodes(linestyle='')
# assert 1 == 2
helicoid_obj_list = helicoid_comp.get_obj_list()
helicoid_center = helicoid_comp.get_center()
# solve
problem = sf.StokesFlowProblem(**problem_kwargs)
for tobj in helicoid_obj_list:
problem.add_obj(tobj)
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
# 1. translation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 1, 0, 0, 0)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('translation total_force', total_force)
# problem.vtk_self('%s_tran' % fileHandle)
# 2. rotation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 0, 0, 0, 1)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('rotation total_force', total_force)
# problem.vtk_self('%s_rota' % fileHandle)
# 1. translation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((0, 1, 0, 0, 0, 0)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('translation total_force', total_force)
# problem.vtk_self('%s_tran' % fileHandle)
# 2. rotation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 0, 0, 1, 0)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('rotation total_force', total_force)
# problem.vtk_self('%s_rota' % fileHandle)
# 1. translation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((1, 0, 0, 0, 0, 0)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_tran' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('translation total_force', total_force)
# problem.vtk_self('%s_tran' % fileHandle)
# 2. rotation
for tobj in helicoid_obj_list:
tobj.set_rigid_velocity(np.array((0, 0, 0, 1, 0, 0)), center=helicoid_center)
problem.create_F_U()
problem.solve()
if problem_kwargs['pickProblem']:
problem.pickmyself('%s_rota' % fileHandle, pick_M=False, mat_destroy=False)
total_force = problem.get_total_force()
PETSc.Sys.Print('rotation total_force', total_force)
# problem.vtk_self('%s_rota' % fileHandle)
return True
def main_resistanceMatrix_hlx(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
helicoid_comp = create_helicoid_hlx_comp(**problem_kwargs)
# helicoid_comp.show_u_nodes(linestyle='')
# assert 1 == 2
helicoid_obj_list = helicoid_comp.get_obj_list()
helicoid_center = helicoid_comp.get_center()
# solve
problem = sf.StokesFlowProblem(**problem_kwargs)
for tobj in helicoid_obj_list:
problem.add_obj(tobj)
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
AtBtCt_full(problem, save_vtk=False, pick_M=False,
center=helicoid_center, save_name=fileHandle)
return True
def main_resistanceMatrix_part(**main_kwargs):
OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
# helicoid_part_idx = OptDB.getInt('helicoid_part_idx', 0)
# main_kwargs['helicoid_part_idx'] = helicoid_part_idx
save_vtk = OptDB.getBool('save_vtk', False)
main_kwargs['save_vtk'] = save_vtk
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
pickProblem = problem_kwargs['pickProblem']
center = problem_kwargs['center']
# helicoid_part_idx = problem_kwargs['helicoid_part_idx']
print_case_info(**problem_kwargs)
# helicoid_comp = create_helicoid_hlx_comp(**problem_kwargs)
# # helicoid_comp.show_u_nodes(linestyle='')
# # assert 1 == 2
# helicoid_obj_list = helicoid_comp.get_obj_list()
# # helicoid_center = helicoid_comp.get_center()
# tobj = helicoid_obj_list[helicoid_part_idx]
# tobj_center = tobj.get_u_geo().get_center()
# # PETSc.Sys.Print(tobj.get_u_geo().get_center())
# # PETSc.Sys.Print(helicoid_center)
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
# solve
problem = sf.StokesFlowProblem(**problem_kwargs)
for tobj in tail_obj_list:
problem.add_obj(tobj)
if pickProblem:
problem.pickmyself('%s_tran' % fileHandle, ifcheck=True)
problem.print_info()
problem.create_matrix()
# At, Bt1, Bt2, Ct = AtBtCt_full(problem, save_vtk=save_vtk, pick_M=False, print_each=False,
# center=tobj_center, save_name=fileHandle)
At, Bt1, Bt2, Ct = AtBtCt_pickInfo(problem=problem, pick_M=False, save_vtk=save_vtk,
center=center, print_each=False, save_name=fileHandle)
PETSc.Sys.Print('Tr(A)=%f, Tr(B1)=%f, Tr(B2)=%f, ' %
(np.trace(At), np.trace(Bt1), np.trace(Bt2)))
return True
def main_resistanceMatrix_selfRotate(**main_kwargs):
# OptDB = PETSc.Options()
main_kwargs['zoom_factor'] = 1
problem_kwargs = get_problem_kwargs(**main_kwargs)
matrix_method = problem_kwargs['matrix_method']
fileHandle = problem_kwargs['fileHandle']
pickProblem = problem_kwargs['pickProblem']
print_case_info(**problem_kwargs)
helicoid_comp = create_helicoid_hlx_selfRotate(**problem_kwargs)
# helicoid_comp.show_u_nodes(linestyle='')
# assert 1 == 2
helicoid_obj_list = helicoid_comp.get_obj_list()
helicoid_center = helicoid_comp.get_center()
helicoid_norm = helicoid_comp.get_norm()
problem_kwargs['problem_center'] = helicoid_center
problem_kwargs['problem_norm'] = helicoid_norm
problem_kwargs['problem_n_copy'] = problem_kwargs['helicoid_ndsk_each']
problem = sf.problem_dic[matrix_method](**problem_kwargs)
for tobj in helicoid_obj_list:
problem.add_obj(tobj)
# problem.show_all_u_nodes(linestyle='')
problem.print_info()
problem.create_matrix()
AtBtCt_selfRotate(problem, save_vtk=False, pick_M=False,
center=helicoid_center, save_name=fileHandle)
return True
def main_fun_E(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helicoid_strain_rate')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
# field_range = np.array([[-3, -3, -3], [3, 3, 3]])
# n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
# main_kwargs['field_range'] = field_range
# main_kwargs['n_grid'] = n_grid
# main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
# pickProblem = problem_kwargs['pickProblem']
# fileHandle = problem_kwargs['fileHandle']
# save_vtk = problem_kwargs['save_vtk']
problem_kwargs['basei'] = 1
problem_kwargs['zoom_factor'] = 1
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
helicoid_comp = create_helicoid_hlx_comp(namehandle='helicoid', **problem_kwargs)
problem = sf.StrainRateBaseForceFreeProblem(**problem_kwargs)
problem.add_obj(helicoid_comp)
problem.print_info()
problem.create_matrix()
uw_Base_list = []
sumFT_Base_list = []
# passive cases
for basei in (1, 2, 3, 4, 5):
uw_Base_list, sumFT_Base_list = do_solve_base_flow(basei, problem, helicoid_comp,
uw_Base_list, sumFT_Base_list)
return True
if __name__ == '__main__':
OptDB = PETSc.Options()
# pythonmpi helicoid.py -sm lg_rs -legendre_m 3 -legendre_k 2 -epsilon 3 -ffweight 2 -main_fun_noIter 1 -vortexStrength 1 -helicoid_r1 1 -helicoid_r2 0.3 -helicoid_ds 0.03
# if OptDB.getBool('main_fun_noIter', False):
# OptDB.setValue('main_fun', False)
# main_fun_noIter()
matrix_method = OptDB.getString('sm', 'pf')
if OptDB.getBool('main_fun_E', False):
assert '_selfRotate' not in matrix_method
OptDB.setValue('main_fun', False)
main_fun_E()
matrix_method = OptDB.getString('sm', 'pf')
if OptDB.getBool('main_resistanceMatrix_hlx', False):
assert '_selfRotate' not in matrix_method
OptDB.setValue('main_fun', False)
main_resistanceMatrix_hlx()
if OptDB.getBool('main_resistanceMatrix_part', False):
assert '_selfRotate' not in matrix_method
OptDB.setValue('main_fun', False)
main_resistanceMatrix_part()
if OptDB.getBool('main_resistanceMatrix_selfRotate', False):
assert '_selfRotate' in matrix_method
OptDB.setValue('main_fun', False)
main_resistanceMatrix_selfRotate()
# if OptDB.getBool('main_fun', True):
# main_fun()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.ml.classification import (
BinaryLogisticRegressionSummary,
BinaryRandomForestClassificationSummary,
FMClassifier,
FMClassificationSummary,
LinearSVC,
LinearSVCSummary,
LogisticRegression,
LogisticRegressionSummary,
MultilayerPerceptronClassifier,
MultilayerPerceptronClassificationSummary,
RandomForestClassificationSummary,
RandomForestClassifier,
)
from pyspark.ml.clustering import BisectingKMeans, GaussianMixture, KMeans
from pyspark.ml.linalg import Vectors
from pyspark.ml.regression import GeneralizedLinearRegression, LinearRegression
from pyspark.sql import DataFrame
from pyspark.testing.mlutils import SparkSessionTestCase
class TrainingSummaryTest(SparkSessionTestCase):
def test_linear_regression_summary(self):
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"],
)
lr = LinearRegression(
maxIter=5, regParam=0.0, solver="normal", weightCol="weight", fitIntercept=False
)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertAlmostEqual(s.explainedVariance, 0.25, 2)
self.assertAlmostEqual(s.meanAbsoluteError, 0.0)
self.assertAlmostEqual(s.meanSquaredError, 0.0)
self.assertAlmostEqual(s.rootMeanSquaredError, 0.0)
self.assertAlmostEqual(s.r2, 1.0, 2)
self.assertAlmostEqual(s.r2adj, 1.0, 2)
self.assertTrue(isinstance(s.residuals, DataFrame))
self.assertEqual(s.numInstances, 2)
self.assertEqual(s.degreesOfFreedom, 1)
devResiduals = s.devianceResiduals
self.assertTrue(isinstance(devResiduals, list) and isinstance(devResiduals[0], float))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class LinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.explainedVariance, s.explainedVariance)
def test_glr_summary(self):
from pyspark.ml.linalg import Vectors
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"],
)
glr = GeneralizedLinearRegression(
family="gaussian", link="identity", weightCol="weight", fitIntercept=False
)
model = glr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertEqual(s.numIterations, 1) # this should default to a single iteration of WLS
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.numInstances, 2)
self.assertTrue(isinstance(s.residuals(), DataFrame))
self.assertTrue(isinstance(s.residuals("pearson"), DataFrame))
coefStdErr = s.coefficientStandardErrors
self.assertTrue(isinstance(coefStdErr, list) and isinstance(coefStdErr[0], float))
tValues = s.tValues
self.assertTrue(isinstance(tValues, list) and isinstance(tValues[0], float))
pValues = s.pValues
self.assertTrue(isinstance(pValues, list) and isinstance(pValues[0], float))
self.assertEqual(s.degreesOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedom, 1)
self.assertEqual(s.residualDegreeOfFreedomNull, 2)
self.assertEqual(s.rank, 1)
self.assertTrue(isinstance(s.solver, str))
self.assertTrue(isinstance(s.aic, float))
self.assertTrue(isinstance(s.deviance, float))
self.assertTrue(isinstance(s.nullDeviance, float))
self.assertTrue(isinstance(s.dispersion, float))
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned
# The child class GeneralizedLinearRegressionTrainingSummary runs full test
sameSummary = model.evaluate(df)
self.assertAlmostEqual(sameSummary.deviance, s.deviance)
def test_binary_logistic_regression_summary(self):
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"],
)
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_logistic_regression_summary(self):
df = self.spark.createDataFrame(
[
(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9)),
],
["label", "weight", "features"],
)
lr = LogisticRegression(maxIter=5, regParam=0.01, weightCol="weight", fitIntercept=False)
model = lr.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 0.75, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.583, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.65, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.65, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LogisticRegressionSummary))
self.assertFalse(isinstance(sameSummary, BinaryLogisticRegressionSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_linear_svc_summary(self):
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0, 1.0, 1.0)), (0.0, 2.0, Vectors.dense(1.0, 2.0, 3.0))],
["label", "weight", "features"],
)
svc = LinearSVC(maxIter=5, weightCol="weight")
model = svc.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.scoreCol, "rawPrediction")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
print(s.weightedTruePositiveRate)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, LinearSVCSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_binary_randomforest_classification_summary(self):
df = self.spark.createDataFrame(
[(1.0, 2.0, Vectors.dense(1.0)), (0.0, 2.0, Vectors.sparse(1, [], []))],
["label", "weight", "features"],
)
rf = RandomForestClassifier(weightCol="weight")
model = rf.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 1.0, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, BinaryRandomForestClassificationSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_multiclass_randomforest_classification_summary(self):
df = self.spark.createDataFrame(
[
(1.0, 2.0, Vectors.dense(1.0)),
(0.0, 2.0, Vectors.sparse(1, [], [])),
(2.0, 2.0, Vectors.dense(2.0)),
(2.0, 2.0, Vectors.dense(1.9)),
],
["label", "weight", "features"],
)
rf = RandomForestClassifier(weightCol="weight")
model = rf.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertEqual(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, RandomForestClassificationSummary))
self.assertFalse(isinstance(sameSummary, BinaryRandomForestClassificationSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_fm_classification_summary(self):
df = self.spark.createDataFrame(
[
(1.0, Vectors.dense(2.0)),
(0.0, Vectors.dense(2.0)),
(0.0, Vectors.dense(6.0)),
(1.0, Vectors.dense(3.0)),
],
["label", "features"],
)
fm = FMClassifier(maxIter=5)
model = fm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.scoreCol, "probability")
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
objHist = s.objectiveHistory
self.assertTrue(isinstance(objHist, list) and isinstance(objHist[0], float))
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertTrue(isinstance(s.roc, DataFrame))
self.assertAlmostEqual(s.areaUnderROC, 0.625, 2)
self.assertTrue(isinstance(s.pr, DataFrame))
self.assertTrue(isinstance(s.fMeasureByThreshold, DataFrame))
self.assertTrue(isinstance(s.precisionByThreshold, DataFrame))
self.assertTrue(isinstance(s.recallByThreshold, DataFrame))
self.assertAlmostEqual(s.weightedTruePositiveRate, 0.75, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.25, 2)
self.assertAlmostEqual(s.weightedRecall, 0.75, 2)
self.assertAlmostEqual(s.weightedPrecision, 0.8333333333333333, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 0.7333333333333334, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 0.7333333333333334, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, FMClassificationSummary))
self.assertAlmostEqual(sameSummary.areaUnderROC, s.areaUnderROC)
def test_mlp_classification_summary(self):
df = self.spark.createDataFrame(
[
(0.0, Vectors.dense([0.0, 0.0])),
(1.0, Vectors.dense([0.0, 1.0])),
(1.0, Vectors.dense([1.0, 0.0])),
(0.0, Vectors.dense([1.0, 1.0])),
],
["label", "features"],
)
mlp = MultilayerPerceptronClassifier(layers=[2, 2, 2], seed=123)
model = mlp.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary()
# test that api is callable and returns expected types
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.labelCol, "label")
self.assertEqual(s.predictionCol, "prediction")
self.assertGreater(s.totalIterations, 0)
self.assertTrue(isinstance(s.labels, list))
self.assertTrue(isinstance(s.truePositiveRateByLabel, list))
self.assertTrue(isinstance(s.falsePositiveRateByLabel, list))
self.assertTrue(isinstance(s.precisionByLabel, list))
self.assertTrue(isinstance(s.recallByLabel, list))
self.assertTrue(isinstance(s.fMeasureByLabel(), list))
self.assertTrue(isinstance(s.fMeasureByLabel(1.0), list))
self.assertAlmostEqual(s.accuracy, 1.0, 2)
self.assertAlmostEqual(s.weightedTruePositiveRate, 1.0, 2)
self.assertAlmostEqual(s.weightedFalsePositiveRate, 0.0, 2)
self.assertAlmostEqual(s.weightedRecall, 1.0, 2)
self.assertAlmostEqual(s.weightedPrecision, 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(), 1.0, 2)
self.assertAlmostEqual(s.weightedFMeasure(1.0), 1.0, 2)
# test evaluation (with training dataset) produces a summary with same values
# one check is enough to verify a summary is returned, Scala version runs full test
sameSummary = model.evaluate(df)
self.assertTrue(isinstance(sameSummary, MultilayerPerceptronClassificationSummary))
self.assertAlmostEqual(sameSummary.accuracy, s.accuracy)
def test_gaussian_mixture_summary(self):
data = [
(Vectors.dense(1.0),),
(Vectors.dense(5.0),),
(Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),),
]
df = self.spark.createDataFrame(data, ["features"])
gmm = GaussianMixture(k=2)
model = gmm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.probabilityCol, "probability")
self.assertTrue(isinstance(s.probability, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 3)
def test_bisecting_kmeans_summary(self):
data = [
(Vectors.dense(1.0),),
(Vectors.dense(5.0),),
(Vectors.dense(10.0),),
(Vectors.sparse(1, [], []),),
]
df = self.spark.createDataFrame(data, ["features"])
bkm = BisectingKMeans(k=2)
model = bkm.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 20)
def test_kmeans_summary(self):
data = [
(Vectors.dense([0.0, 0.0]),),
(Vectors.dense([1.0, 1.0]),),
(Vectors.dense([9.0, 8.0]),),
(Vectors.dense([8.0, 9.0]),),
]
df = self.spark.createDataFrame(data, ["features"])
kmeans = KMeans(k=2, seed=1)
model = kmeans.fit(df)
self.assertTrue(model.hasSummary)
s = model.summary
self.assertTrue(isinstance(s.predictions, DataFrame))
self.assertEqual(s.featuresCol, "features")
self.assertEqual(s.predictionCol, "prediction")
self.assertTrue(isinstance(s.cluster, DataFrame))
self.assertEqual(len(s.clusterSizes), 2)
self.assertEqual(s.k, 2)
self.assertEqual(s.numIter, 1)
if __name__ == "__main__":
from pyspark.ml.tests.test_training_summary import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import sys
import requests
import traceback
import json
import os
from dispel4py.registry import utils
DEF_URL = 'http://escience8.inf.ed.ac.uk:8080/VerceRegistry/rest/'
DEF_WORKSPACE = 1
PKG_IMPLEMENTATION = ".__impl"
PKG_GENERICDEF = ".__gendef"
AUTH_HEADER = 'X-Auth-Token'
class VerceRegistry(object):
'''
Dispel4Py's interface to the VERCE Registry. Dispel4Py could work withut a registry or through
connecting to alternative registries of python and dispel4py components. In this instance this
makes use of the VERCE Registry's REST API.
'''
registry_url = DEF_URL
workspace = DEF_WORKSPACE
user = None
registered_entities = {}
token = None
def __init__(self, wspc_id=DEF_WORKSPACE):
# this imports the requests module before anything else
# so we don't get a loop when importing
requests.get('http://github.com')
# change the registry URL according to the environment var, if set
if 'VERCEREGISTRY_HOST' in os.environ:
self.registry_url = os.environ['VERCEREGISTRY_HOST']
self.workspace = wspc_id
# print 'Initialised VerceRegistry object for ' + self.registry_url
def set_workspace(self, wspc_id):
self.workspace = wspc_id
def login(self, user, password):
url = self.registry_url + 'login?username=%s&password=%s' % (user, password)
response = requests.post(url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
if response.status_code == requests.codes.forbidden:
raise NotAuthorisedException()
else:
raise
try:
self.token = response.json()['access_token']
self.user = user
except:
raise NotAuthorisedException()
def find_module(self, fullname, path=None):
try:
url = self.registry_url + "workspace/%s?packagesByPrefix=%s&exists=true" % (self.workspace, fullname)
response = requests.get(url, headers=getHeaders(self.token))
except:
return None
if response.status_code != requests.codes.ok:
return None
if response.json()[0]:
return self
else:
# maybe it's an object?
code = self.get_code(fullname)
if code is None:
return None
else:
# print "found code for " + fullname
self.registered_entities[fullname] = code
return self
def clone(self, wspc_id, clone_name):
''' Clone the given Workspace(wspc_id) into a new one named clone_name. '''
try:
url = self.registry_url + 'workspace/%s?cloneTo=%s' % (wspc_id, clone_name)
data = { 'workspace': { 'id': wspc_id } }
# print url
# print json.dumps(data)
response = requests.post(url, headers=getHeaders(self.token), data=json.dumps(data))
# print response.json()
except Exception as err:
print traceback.format_exc()
sys.stderr.write("An error occurred:\n%s\n" % err)
sys.exit(-1)
def load_module(self, fullname):
# print "load_module " + fullname
if fullname in sys.modules:
return sys.modules[fullname]
mod = imp.new_module(fullname)
mod.__loader__ = self
sys.modules[fullname] = mod
if fullname in self.registered_entities:
code = self.registered_entities[fullname]
print "compiling code for module " + fullname
exec code in mod.__dict__
mod.__file__ = "[%r]" % fullname
mod.__path__ = []
return mod
def get_code(self, fullname):
'''
Retrieves and returns the source code of the dispel4py component identified by 'fullname'.
'fullname' is in the form package.name.
'''
impl_id = self.getImplementationId(fullname)
if impl_id:
response = requests.get(self.registry_url + "implementation/%s" % impl_id, headers=getHeaders(self.token))
if response.status_code == requests.codes.ok:
return response.json()["code"]
def update_code(self, fullname, code):
'''
Updates/replaces the source code of the given dispel4py component identified by 'fullname'
with the contents of 'code'.
'''
impl_id = self.getImplementationId(fullname)
impl = {}
impl["user"] = { 'username' : self.user }
impl["workspace"] = self.workspace
impl["code"] = code
data = { 'implementation' : impl }
url = self.registry_url + "implementation/%s" % impl_id
response = requests.put(url, headers=getHeaders(self.token), data=json.dumps(data))
if response.status_code != requests.codes.ok:
raise Exception("Implementation update failed")
response_json = response.json()
if "errors" in response_json:
print "Error: %s" % response_json["errors"]
raise Exception("Implementation update failed")
return response_json["id"]
def getImplementationId(self, fullname):
pkg, simpleName = split_name(fullname)
url = self.registry_url + "workspace/%s/%s/%s?deep=true" % (self.workspace, pkg, simpleName)
try:
response = requests.get(url, headers=getHeaders(self.token))
except:
return None
if response.status_code != requests.codes.ok:
return None
json = response.json()
if "implementations" in json:
impl_id = json["implementations"][0]["id"]
return impl_id
def register_gendef(self, pkg, simpleName):
gendef = {}
gendef["user"] = { 'username' : self.user }
gendef["workspaceId"] = self.workspace
gendef["pckg"] = pkg + PKG_GENERICDEF
gendef["name"] = simpleName
data = {}
data["gendef"] = gendef;
response = requests.post(self.registry_url + "gendef/", data=json.dumps(data), headers=getHeaders(self.token))
try:
response.raise_for_status()
except:
print response.text
raise RegistrationFailed, "Registration of generic definition failed", sys.exc_info()[2]
return response.json().get("id")
def register_implementation(self, sigId, pkg, simpleName, path):
with open(path, "r") as src:
code = src.read()
impl = {}
impl["user"] = { 'username' : self.user }
impl["workspaceId"] = self.workspace
impl["pckg"] = pkg + PKG_IMPLEMENTATION
impl["name"] = simpleName
impl["genericSigId"] = sigId
impl["code"] = code
data = {}
data["implementation"] = impl
response = requests.post(self.registry_url + "implementation/", data=json.dumps(data), headers=getHeaders(self.token))
try:
response.raise_for_status()
except:
raise RegistrationFailed, "Registration of implementation failed", sys.exc_info()[2]
response_json = response.json()
if "errors" in response_json:
print "Error: %s" % response_json["errors"]
raise Exception("Registration of implementation failed")
return response_json["id"]
def register_function(self, fullname, functionName, path):
'''
Registers a dispel4py/python function with the VERCE Registry. The function is registered under
'fullname' and it is identified by 'functionName'. 'path' is the path to a file containing
the source code of the function to be registered.
'''
pkg, simpleName = split_name(fullname)
# load the code
funImpl = utils.loadSource(simpleName, path, functionName)
funAnn = utils.extractAnnotations(funImpl)
# build the function signature
function = {}
function["user"] = { 'username' : self.user }
function["workspaceId"] = self.workspace
function["pckg"] = pkg
function["name"] = simpleName
function["parameters"]=[]
for param in funAnn['params']:
function["parameters"].append(param['type'] + " " + param['name'])
function["returnType"]=funAnn['return']
data = {}
data["function"] = function
# print "Registering function " + simpleName + " in " + pkg
genDefId = self.register_gendef(pkg, simpleName)
# print "Registered generic definition: id = %s" % genDefId
# register function signature
function["genericDefId"] = genDefId
try:
response = requests.post(self.registry_url + "function/", data=json.dumps(data), headers=getHeaders(self.token))
try:
response.raise_for_status()
except:
requests.delete(self.registry_url + "gendef/%s" % genDefId)
raise RegistrationFailed, "Registration of function signature failed", sys.exc_info()[2]
functionId = response.json()["id"]
# print "Registered function signature: id = %s" % functionId
implId = self.register_implementation(functionId, pkg, simpleName, path)
# print "Registered implementation: id = %s" % implId
except:
requests.delete(self.registry_url + "gendef/%s" % genDefId)
raise
def register_pe(self, fullname, className, path):
'''
Registers a dispel4py processing element (PE) with the VERCE Registry. The PE is registered under
'fullname' and it is identified by 'className'. 'path' is the path to a file containing
the source code of the PE to be registered.
'''
pkg, simpleName = split_name(fullname)
# load the code
peImpl = utils.loadSource(simpleName, path, className)()
# prepare the PE signature
peSig = {}
peSig["user"] = { 'username' : self.user }
peSig["workspaceId"] = self.workspace
peSig["pckg"] = pkg
peSig["name"] = simpleName
connections = []
for conx in peImpl.inputconnections.values():
connection = {}
connection["name"] = conx['name']
connection["kind"] = 0
connection["modifiers"] = []
connections.append(connection)
for conx in peImpl.outputconnections.values():
connection = {}
connection["name"] = conx['name']
connection["kind"] = 1
connection["modifiers"] = []
connections.append(connection)
peSig["connections"] = connections
data = {}
data["pesig"] = peSig
# Register generic signature
# print "Registering PE " + simpleName + " in " + pkg
genDefId = self.register_gendef(pkg, simpleName)
# print "Registered generic definition: id = %s" % genDefId
try:
# Register PE signature
peSig["genericDefId"] = genDefId
response = requests.post(self.registry_url + "pe/", data=json.dumps(data), headers=getHeaders(self.token))
try:
response.raise_for_status()
except:
requests.delete(self.registry_url + "gendef/%s" % genDefId)
raise RegistrationFailed, "Registration of PE signature failed", sys.exc_info()[2]
peId = response.json()["id"]
# print "Registered PE signature: id = %s" % peId
# Register implementation
implId = self.register_implementation(peId, pkg, simpleName, path)
# print "Registered implementation: id = %s" % implId
except:
# delete everything that was registered if anything went wrong
requests.delete(self.registry_url + "gendef/%s" % genDefId)
raise
def list(self, pkg):
'''
Lists the contents of package 'pkg'.
'''
url = self.registry_url + "workspace/%s/%s" % (self.workspace, pkg)
response = requests.get(url, headers=getHeaders(self.token))
result = []
if response.status_code == requests.codes.ok:
response_json = response.json()
for obj in response_json:
desc = { 'name' : obj['name'], 'type' : obj['class'] }
result.append(desc)
elif response.status_code == requests.codes.not_found: # not found
raise UnknownPackageException(pkg)
return result
def listPackages(self, pkg):
'''
Lists the packages contained within package 'pkg'.
'''
url = self.registry_url + "workspace/%s?packagesByPrefix=%s" % (self.workspace, pkg)
response = requests.get(url, headers=getHeaders(self.token))
# print json.dumps(response.json(), sort_keys=True, indent=4)
result = []
if response.status_code == requests.codes.ok:
result = response.json()
elif response.status_code == requests.codes.not_found: # not found
raise UnknownPackageException(pkg)
return result
def delete(self, fullname):
pkg, simpleName = split_name(fullname)
# assume that the gen def is defined in subpackage PKG_GENERICDEF
url = self.registry_url + "workspace/%s/%s%s/%s" % (self.workspace, pkg, PKG_GENERICDEF, simpleName)
response = requests.get(url, headers=getHeaders(self.token))
if response.status_code == requests.codes.ok:
genDefId = response.json()["id"]
response = requests.delete(self.registry_url + "gendef/%s" % genDefId, headers=getHeaders(self.token))
if response.status_code == requests.codes.ok:
print "Deleted " + fullname
else:
print "Failed to delete %s" % fullname
print response.text
else:
print "Cannot find " + fullname
def createWorkspace(self, name):
url = self.registry_url + 'workspace'
data = { 'workspace': { 'name' : name, 'owner' : self.user } }
response = requests.post(url, data=json.dumps(data), headers=getHeaders(self.token))
if response.status_code == requests.codes.forbidden:
raise NotAuthorisedException()
if response.status_code != requests.codes.ok:
raise RegistrationFailed()
def listWorkspaces(self):
url = self.registry_url + 'workspace'
response = requests.get(url, headers=getHeaders(self.token))
if response.status_code == requests.codes.forbidden:
raise NotAuthorisedException()
if response.status_code != requests.codes.ok:
raise RegistrationFailed()
return response
##############################################################################
# Utility and static methods:
##############################################################################
def remove_registry_from_meta_path():
mylist = [ i for i in sys.meta_path if type(i) != VerceRegistry ]
sys.meta_path = mylist
def currentRegistry():
'''
Returns the currently used registry.
'''
for i in sys.meta_path:
if isinstance(i, VerceRegistry): return i
def initRegistry(username=None, password=None, url=DEF_URL, workspace=DEF_WORKSPACE, token=None):
'''
Initialises the registry. This method must be called before any 'import' statements.
'''
remove_registry_from_meta_path()
reg = VerceRegistry()
reg.workspace = workspace
reg.registry_url = url
reg.user = username
if token:
reg.token = token
response = requests.get(url + 'dummy', headers=getHeaders(token))
if response.status_code == requests.codes.forbidden:
raise NotAuthorisedException()
else:
response.raise_for_status()
else:
reg.login(username, password)
sys.meta_path.append(reg)
return reg
def split_name(fullname):
parts = fullname.split('.')
pkg = ".".join(parts[:-1])
simpleName = parts[-1]
return pkg, simpleName
def getHeaders(token):
if token:
return { AUTH_HEADER : token }
else:
raise NotAuthorisedException()
class NotAuthorisedException(Exception):
pass
class UnknownPackageException(Exception):
pass
class RegistrationFailed(Exception):
pass
import os
def createResources(resources_dir, registry):
'''
Caches source code imported from the registry
:param resources_dir: directory for caching the source code
:param registry: the dispel4py registry, may be None.
'''
if not registry:
return
for mod, code in registry.registered_entities.iteritems():
store_resource(resources_dir, mod, code)
def store_resource(resources_dir, mod, code):
'''
Stores the source of the given python module to a file.
:param resources_dir: directory to store the source
:param mod: module name
:param code: source code
'''
try:
pkg = mod.replace(".", "/")
path = "%s/%s.py" % (resources_dir, pkg)
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
fullpath = resources_dir + '/'
for part in pkg.split('/')[:-1]:
fullpath += part + '/'
open(fullpath + "__init__.py", 'w').close()
with open(path, "w") as code_file:
code_file.write(code)
# print "Wrote source code to %s" % path
except AttributeError:
print "Warning: Could not find source code for module " + mod
except Exception as exc:
print exc
print "Warning: Could not store source code for module " + mod
| |
# -*- coding: utf-8 -*-
__author__ = 'lightrevan'
import unittest
import tempfile
import os
import re
from log_parser.file_parsers import *
from log_parser.row_parsers import *
from log_parser_tests.mocks_and_stubs import *
class SingleLineFileParserTest(unittest.TestCase):
def setUp(self):
data = [('1 a', {'timestamp': 1, 'match': None}),
('2 b', {'timestamp': 2, 'match': None}),
('3 c', {'timestamp': 3, 'match': None}),
('4 d', {'timestamp': 4, 'match': None}),
('5 abcd', {'timestamp': 5, 'match': 'abcd'}),
('6 e', {'timestamp': 6, 'match': None}),
('7 f', {'timestamp': 7, 'match': None}),
('8 abcd', {'timestamp': 8, 'match': 'abcd'}),
('9 a', {'timestamp': 9, 'match': None}),
('10 a', {'timestamp': 10, 'match': None}),
('11 a', {'timestamp': 11, 'match': None}),
('12 a', {'timestamp': 12, 'match': None}),
('13 a', {'timestamp': 13, 'match': None}),
('14 a', {'timestamp': 14, 'match': None}),
('15 a', {'timestamp': 15, 'match': None}),
('16 abcd', {'timestamp': 16, 'match': 'abcd'}),
('17 a', {'timestamp': 17, 'match': None})]
self.tested = SingleLineFileParser(RowGetterStub(data))
def test_parsing(self):
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(5, '5 abcd'),
(8, '8 abcd'),
(16, '16 abcd')]
self.assertEqual(results, required_results)
class ContextFileParserTest(unittest.TestCase):
def setUp(self):
data = [('1 a', {'timestamp': 1, 'match': None}),
('2 b', {'timestamp': 2, 'match': None}),
('3 c', {'timestamp': 3, 'match': None}),
('4 d', {'timestamp': 4, 'match': None}),
('5 abcd', {'timestamp': 5, 'match': 'abcd'}),
('6 e', {'timestamp': 6, 'match': None}),
('7 f', {'timestamp': 7, 'match': None}),
('8 abcd', {'timestamp': 8, 'match': 'abcd'}),
('9 a', {'timestamp': 9, 'match': None}),
('10 a', {'timestamp': 10, 'match': None}),
('11 a', {'timestamp': 11, 'match': None}),
('12 a', {'timestamp': 12, 'match': None}),
('13 a', {'timestamp': 13, 'match': None}),
('14 a', {'timestamp': 14, 'match': None}),
('15 a', {'timestamp': 15, 'match': None}),
('16 abcd', {'timestamp': 16, 'match': 'abcd'}),
('17 a', {'timestamp': 17, 'match': None}),
('18 a', {'timestamp': 18, 'match': None})]
self.tested = SimpleContextFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c1(self):
self.tested.set_context_size(1)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(15, '15 a'),
(16, '16 abcd'),
(17, '17 a')]
self.assertEqual(results, required_results)
def test_parsing_c2(self):
self.tested.set_context_size(2)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(3, '3 c'),
(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(10, '10 a'),
(14, '14 a'),
(15, '15 a'),
(16, '16 abcd'),
(17, '17 a'),
(18, '18 a')]
self.assertEqual(results, required_results)
def test_parsing_c3(self):
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 b'),
(3, '3 c'),
(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(10, '10 a'),
(11, '11 a'),
(13, '13 a'),
(14, '14 a'),
(15, '15 a'),
(16, '16 abcd'),
(17, '17 a'),
(18, '18 a')]
self.assertEqual(results, required_results)
class ContextFileParserTestShortFile(unittest.TestCase):
def setUp(self):
data = [('1 a', {'timestamp': 1, 'match': None}),
('2 b', {'timestamp': 2, 'match': None}),
('3 c', {'timestamp': 3, 'match': None}),
('4 d', {'timestamp': 4, 'match': None}),
('5 abcd', {'timestamp': 5, 'match': 'abcd'}),
('6 e', {'timestamp': 6, 'match': None}),
('7 f', {'timestamp': 7, 'match': None}),
('8 abcd', {'timestamp': 8, 'match': 'abcd'}),
('9 a', {'timestamp': 9, 'match': None}),
('10 a', {'timestamp': 10, 'match': None}),
('11 a', {'timestamp': 11, 'match': None}),
('12 a', {'timestamp': 12, 'match': None}),
('13 a', {'timestamp': 13, 'match': None}),
('14 a', {'timestamp': 14, 'match': None}),
('15 a', {'timestamp': 15, 'match': None}),
('16 abcd', {'timestamp': 16, 'match': 'abcd'})]
self.tested = SimpleContextFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c1(self):
self.tested.set_context_size(1)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(15, '15 a'),
(16, '16 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c2(self):
self.tested.set_context_size(2)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(3, '3 c'),
(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(10, '10 a'),
(14, '14 a'),
(15, '15 a'),
(16, '16 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c3(self):
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 b'),
(3, '3 c'),
(4, '4 d'),
(5, '5 abcd'),
(6, '6 e'),
(7, '7 f'),
(8, '8 abcd'),
(9, '9 a'),
(10, '10 a'),
(11, '11 a'),
(13, '13 a'),
(14, '14 a'),
(15, '15 a'),
(16, '16 abcd')]
self.assertEqual(results, required_results)
class ThreadCommonBufferParserTest(unittest.TestCase):
def setUp(self):
data = [('1 T1 a', {'timestamp': 1, 'match': None, 'thread': 'T1'}),
('2 T1 b', {'timestamp': 2, 'match': None, 'thread': 'T1'}),
('3 T2 c', {'timestamp': 3, 'match': None, 'thread': 'T2'}),
('4 T2 d', {'timestamp': 4, 'match': None, 'thread': 'T2'}),
('5 T1 abcd', {'timestamp': 5, 'match': 'abcd', 'thread': 'T1'}),
('6 T1 e', {'timestamp': 6, 'match': None, 'thread': 'T1'}),
('7 T2 f', {'timestamp': 7, 'match': None, 'thread': 'T2'}),
('8 T2 a', {'timestamp': 8, 'match': None, 'thread': 'T2'}),
('9 T2 a', {'timestamp': 9, 'match': None, 'thread': 'T2'}),
('10 T1 a', {'timestamp': 10, 'match': None, 'thread': 'T1'}),
('11 T2 abcd', {'timestamp': 11, 'match': 'abcd', 'thread': 'T2'})]
self.tested = ThreadContextCommonBufferFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c3(self):
self.tested.set_context_size(3)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 T1 b'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c4(self):
self.tested.set_context_size(4)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c5(self):
self.tested.set_context_size(5)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
class ThreadCommonBufferParserTestThickMatches(unittest.TestCase):
def setUp(self):
data = [('1 T1 a', {'timestamp': 1, 'match': None, 'thread': 'T1'}),
('2 T1 b', {'timestamp': 2, 'match': None, 'thread': 'T1'}),
('3 T1 c', {'timestamp': 3, 'match': None, 'thread': 'T1'}),
('4 T1 d', {'timestamp': 4, 'match': None, 'thread': 'T1'}),
('5 T1 abcd', {'timestamp': 5, 'match': 'abcd', 'thread': 'T1'}),
('6 T1 e', {'timestamp': 6, 'match': None, 'thread': 'T1'}),
('7 T1 abcd', {'timestamp': 7, 'match': 'abcd', 'thread': 'T1'}),
('8 T2 a', {'timestamp': 8, 'match': None, 'thread': 'T2'}),
('9 T2 a', {'timestamp': 9, 'match': None, 'thread': 'T2'}),
('10 T1 a', {'timestamp': 10, 'match': None, 'thread': 'T1'}),
('11 T2 abcd', {'timestamp': 11, 'match': 'abcd', 'thread': 'T2'})]
self.tested = ThreadContextCommonBufferFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c3(self):
self.tested.set_context_size(3)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 T1 b'),
(3, '3 T1 c'),
(4, '4 T1 d'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(7, '7 T1 abcd'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c4(self):
self.tested.set_context_size(4)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T1 c'),
(4, '4 T1 d'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(7, '7 T1 abcd'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c5(self):
self.tested.set_context_size(5)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T1 c'),
(4, '4 T1 d'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(7, '7 T1 abcd'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
class SingleThreadParserTest(unittest.TestCase):
def setUp(self):
data = [('1 T1 a', {'timestamp': 1, 'match': None, 'thread': 'T1'}),
('2 T1 b', {'timestamp': 2, 'match': None, 'thread': 'T1'}),
('4 T2 d', {'timestamp': 4, 'match': None, 'thread': 'T2'}),
('5 T1 abcd', {'timestamp': 5, 'match': 'abcd', 'thread': 'T1'}),
('6 T1 e', {'timestamp': 6, 'match': None, 'thread': 'T1'}),
('7 T2 f', {'timestamp': 7, 'match': None, 'thread': 'T2'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('8 T2 a', {'timestamp': 8, 'match': None, 'thread': 'T2'}),
('9 T2 a', {'timestamp': 9, 'match': None, 'thread': 'T2'}),
('10 T1 a', {'timestamp': 10, 'match': None, 'thread': 'T1'}),
('11 T2 abcd', {'timestamp': 11, 'match': 'abcd', 'thread': 'T2'}),
('12 T1 abcd', {'timestamp': 12, 'match': 'abcd', 'thread': 'T1'}),
('13 T1 a', {'timestamp': 13, 'match': None, 'thread': 'T1'}),
('14 T1 a', {'timestamp': 14, 'match': None, 'thread': 'T1'})]
self.tested = SingleThreadContextFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c1(self):
self.tested.set_context_size(1)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(5, '5 T1 abcd'),
(6, '6 T1 e'),
(10, '10 T1 a'),
(12, '12 T1 abcd'),
(13, '13 T1 a')]
self.assertEqual(results, required_results)
def test_parsing_c2(self):
self.tested.set_context_size(2)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 T1 b'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(10, '10 T1 a'),
(12, '12 T1 abcd'),
(13, '13 T1 a'),
(14, '14 T1 a')]
self.assertEqual(results, required_results)
def test_parsing_c3(self):
self.tested.set_context_size(3)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(6, '6 T1 e'),
(10, '10 T1 a'),
(12, '12 T1 abcd'),
(13, '13 T1 a'),
(14, '14 T1 a')]
self.assertEqual(results, required_results)
class MultiThreadParserTest(unittest.TestCase):
def setUp(self):
data = [('1 T1 a', {'timestamp': 1, 'match': None, 'thread': 'T1'}),
('2 T1 b', {'timestamp': 2, 'match': None, 'thread': 'T1'}),
('3 T2 c', {'timestamp': 3, 'match': None, 'thread': 'T2'}),
('4 T2 d', {'timestamp': 4, 'match': None, 'thread': 'T2'}),
('5 T1 abcd', {'timestamp': 5, 'match': 'abcd', 'thread': 'T1'}),
('6 T2 abcd', {'timestamp': 6, 'match': 'abcd', 'thread': 'T2'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T2 f', {'timestamp': 7, 'match': None, 'thread': 'T2'}),
('8 T2 a', {'timestamp': 8, 'match': None, 'thread': 'T2'}),
('9 T2 a', {'timestamp': 9, 'match': None, 'thread': 'T2'}),
('10 T1 a', {'timestamp': 10, 'match': None, 'thread': 'T1'}),
('11 T2 abcd', {'timestamp': 11, 'match': 'abcd', 'thread': 'T2'})]
self.tested = MultiThreadContextFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c1(self):
self.tested.set_context_size(1)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 T1 b'),
(4, '4 T2 d'),
(5, '5 T1 abcd'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(10, '10 T1 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c2(self):
self.tested.set_context_size(2)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(5, '5 T1 abcd'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(10, '10 T1 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c3(self):
self.tested.set_context_size(3)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(5, '5 T1 abcd'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c4(self):
self.tested.set_context_size(4)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(5, '5 T1 abcd'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c5(self):
self.tested.set_context_size(5)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(5, '5 T1 abcd'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
class MultiThreadBlobParserTest(unittest.TestCase):
def setUp(self):
data = [('1 T1 a', {'timestamp': 1, 'match': None, 'thread': 'T1'}),
('2 T1 b', {'timestamp': 2, 'match': None, 'thread': 'T1'}),
('3 T2 c', {'timestamp': 3, 'match': None, 'thread': 'T2'}),
('4 T2 d', {'timestamp': 4, 'match': None, 'thread': 'T2'}),
('5 T1 abcd', {'timestamp': 5, 'match': 'abcd', 'thread': 'T1'}),
('6 T2 abcd', {'timestamp': 6, 'match': 'abcd', 'thread': 'T2'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T3 a', {'timestamp': 7, 'match': None, 'thread': 'T3'}),
('7 T2 f', {'timestamp': 7, 'match': None, 'thread': 'T2'}),
('8 T2 a', {'timestamp': 8, 'match': None, 'thread': 'T2'}),
('9 T2 a', {'timestamp': 9, 'match': None, 'thread': 'T2'}),
('10 T1 a', {'timestamp': 10, 'match': None, 'thread': 'T1'}),
('11 T2 abcd', {'timestamp': 11, 'match': 'abcd', 'thread': 'T2'})]
self.tested = MultiThreadBlobbingContextFileParser(RowGetterStub(data), context_size=3)
def test_parsing_c1(self):
self.tested.set_context_size(1)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(2, '2 T1 b'),
(5, '5 T1 abcd'),
(4, '4 T2 d'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(10, '10 T1 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c2(self):
self.tested.set_context_size(2)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(10, '10 T1 a'),
(9, '9 T2 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c3(self):
self.tested.set_context_size(3)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c4(self):
self.tested.set_context_size(4)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
def test_parsing_c5(self):
self.tested.set_context_size(5)
results = [(row_params['timestamp'], row) for row, row_params in self.tested]
required_results = [(1, '1 T1 a'),
(2, '2 T1 b'),
(5, '5 T1 abcd'),
(3, '3 T2 c'),
(4, '4 T2 d'),
(6, '6 T2 abcd'),
(7, '7 T2 f'),
(8, '8 T2 a'),
(9, '9 T2 a'),
(10, '10 T1 a'),
(11, '11 T2 abcd')]
self.assertEqual(results, required_results)
if __name__ == '__main__':
unittest.main()
| |
"""The tests for the Demo Media player platform."""
import socket
import unittest
import soco.snapshot
from unittest import mock
import soco
from homeassistant.bootstrap import setup_component
from homeassistant.components.media_player import sonos, DOMAIN
from homeassistant.components.media_player.sonos import CONF_INTERFACE_ADDR, \
CONF_ADVERTISE_ADDR
from homeassistant.const import CONF_HOSTS, CONF_PLATFORM
from tests.common import get_test_home_assistant
ENTITY_ID = 'media_player.kitchen'
class socoDiscoverMock():
"""Mock class for the soco.discover method."""
def discover(interface_addr):
"""Return tuple of soco.SoCo objects representing found speakers."""
return {SoCoMock('192.0.2.1')}
class AvTransportMock():
"""Mock class for the avTransport property on soco.SoCo object."""
def __init__(self):
"""Initialize ethe Transport mock."""
pass
def GetMediaInfo(self, _):
"""Get the media details."""
return {
'CurrentURI': '',
'CurrentURIMetaData': ''
}
class SoCoMock():
"""Mock class for the soco.SoCo object."""
def __init__(self, ip):
"""Initialize soco object."""
self.ip_address = ip
self.is_visible = True
self.avTransport = AvTransportMock()
def clear_sleep_timer(self):
"""Clear the sleep timer."""
return
def get_speaker_info(self, force):
"""Return a dict with various data points about the speaker."""
return {'serial_number': 'B8-E9-37-BO-OC-BA:2',
'software_version': '32.11-30071',
'uid': 'RINCON_B8E937BOOCBA02500',
'zone_icon': 'x-rincon-roomicon:kitchen',
'mac_address': 'B8:E9:37:BO:OC:BA',
'zone_name': 'Kitchen',
'hardware_version': '1.8.1.2-1'}
def get_current_transport_info(self):
"""Return a dict with the current state of the speaker."""
return {'current_transport_speed': '1',
'current_transport_state': 'STOPPED',
'current_transport_status': 'OK'}
def get_current_track_info(self):
"""Return a dict with the current track information."""
return {'album': '',
'uri': '',
'title': '',
'artist': '',
'duration': '0:00:00',
'album_art': '',
'position': '0:00:00',
'playlist_position': '0',
'metadata': ''}
def is_coordinator(self):
"""Return true if coordinator."""
return True
def partymode(self):
"""Cause the speaker to join all other speakers in the network."""
return
def set_sleep_timer(self, sleep_time_seconds):
"""Set the sleep timer."""
return
def unjoin(self):
"""Cause the speaker to separate itself from other speakers."""
return
def uid(self):
"""Return a player uid."""
return "RINCON_XXXXXXXXXXXXXXXXX"
def fake_add_device(devices, update_befor_add=False):
"""Fake add device / update."""
if update_befor_add:
for speaker in devices:
speaker.update()
class TestSonosMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def monkey_available(self):
"""Make a monkey available."""
return True
# Monkey patches
self.real_available = sonos.SonosDevice.available
sonos.SonosDevice.available = monkey_available
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
# Monkey patches
sonos.SonosDevice.available = self.real_available
sonos.DEVICES = []
self.hass.stop()
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_discovery(self, *args):
"""Test a single device using the autodiscovery provided by HASS."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
self.assertEqual(len(sonos.DEVICES), 1)
self.assertEqual(sonos.DEVICES[0].name, 'Kitchen')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch('soco.discover')
def test_ensure_setup_config_interface_addr(self, discover_mock, *args):
"""Test a interface address config'd by the HASS config file."""
discover_mock.return_value = {SoCoMock('192.0.2.1')}
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_INTERFACE_ADDR: '192.0.1.1',
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(sonos.DEVICES), 1)
self.assertEqual(discover_mock.call_count, 1)
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch('soco.discover')
def test_ensure_setup_config_advertise_addr(self, discover_mock,
*args):
"""Test a advertise address config'd by the HASS config file."""
discover_mock.return_value = {SoCoMock('192.0.2.1')}
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_ADVERTISE_ADDR: '192.0.1.1',
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(sonos.DEVICES), 1)
self.assertEqual(discover_mock.call_count, 1)
self.assertEqual(soco.config.EVENT_ADVERTISE_IP, '192.0.1.1')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_single(self, *args):
"""Test a single address config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1'],
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(sonos.DEVICES), 1)
self.assertEqual(sonos.DEVICES[0].name, 'Kitchen')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_string_multiple(self, *args):
"""Test multiple address string config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1,192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(sonos.DEVICES), 2)
self.assertEqual(sonos.DEVICES[0].name, 'Kitchen')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_config_hosts_list(self, *args):
"""Test a multiple address list config'd by the HASS config file."""
config = {
DOMAIN: {
CONF_PLATFORM: 'sonos',
CONF_HOSTS: ['192.0.2.1', '192.168.2.2'],
}
}
assert setup_component(self.hass, DOMAIN, config)
self.assertEqual(len(sonos.DEVICES), 2)
self.assertEqual(sonos.DEVICES[0].name, 'Kitchen')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch.object(soco, 'discover', new=socoDiscoverMock.discover)
@mock.patch('socket.create_connection', side_effect=socket.error())
def test_ensure_setup_sonos_discovery(self, *args):
"""Test a single device using the autodiscovery provided by Sonos."""
sonos.setup_platform(self.hass, {}, fake_add_device)
self.assertEqual(len(sonos.DEVICES), 1)
self.assertEqual(sonos.DEVICES[0].name, 'Kitchen')
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'partymode')
def test_sonos_group_players(self, partymodeMock, *args):
"""Ensuring soco methods called for sonos_group_players service."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
device = sonos.DEVICES[-1]
partymodeMock.return_value = True
device.group_players()
self.assertEqual(partymodeMock.call_count, 1)
self.assertEqual(partymodeMock.call_args, mock.call())
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'unjoin')
def test_sonos_unjoin(self, unjoinMock, *args):
"""Ensuring soco methods called for sonos_unjoin service."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
device = sonos.DEVICES[-1]
unjoinMock.return_value = True
device.unjoin()
self.assertEqual(unjoinMock.call_count, 1)
self.assertEqual(unjoinMock.call_args, mock.call())
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_set_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensuring soco methods called for sonos_set_sleep_timer service."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
device = sonos.DEVICES[-1]
device.set_sleep_timer(30)
set_sleep_timerMock.assert_called_once_with(30)
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(SoCoMock, 'set_sleep_timer')
def test_sonos_clear_sleep_timer(self, set_sleep_timerMock, *args):
"""Ensuring soco methods called for sonos_clear_sleep_timer service."""
sonos.setup_platform(self.hass, {}, mock.MagicMock(), '192.0.2.1')
device = sonos.DEVICES[-1]
device.set_sleep_timer(None)
set_sleep_timerMock.assert_called_once_with(None)
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(soco.snapshot.Snapshot, 'snapshot')
def test_sonos_snapshot(self, snapshotMock, *args):
"""Ensuring soco methods called for sonos_snapshot service."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
device = sonos.DEVICES[-1]
snapshotMock.return_value = True
device.snapshot()
self.assertEqual(snapshotMock.call_count, 1)
self.assertEqual(snapshotMock.call_args, mock.call())
@mock.patch('soco.SoCo', new=SoCoMock)
@mock.patch('socket.create_connection', side_effect=socket.error())
@mock.patch.object(soco.snapshot.Snapshot, 'restore')
def test_sonos_restore(self, restoreMock, *args):
"""Ensuring soco methods called for sonos_restor service."""
sonos.setup_platform(self.hass, {}, fake_add_device, '192.0.2.1')
device = sonos.DEVICES[-1]
restoreMock.return_value = True
device.restore()
self.assertEqual(restoreMock.call_count, 1)
self.assertEqual(restoreMock.call_args, mock.call(True))
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import changelog
from .base import ChangelogsTransport, DEFAULT_CLIENT_INFO
from .grpc import ChangelogsGrpcTransport
class ChangelogsGrpcAsyncIOTransport(ChangelogsTransport):
"""gRPC AsyncIO backend transport for Changelogs.
Service for managing
[Changelogs][google.cloud.dialogflow.cx.v3beta1.Changelog].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_changelogs(
self,
) -> Callable[
[changelog.ListChangelogsRequest], Awaitable[changelog.ListChangelogsResponse]
]:
r"""Return a callable for the list changelogs method over gRPC.
Returns the list of Changelogs.
Returns:
Callable[[~.ListChangelogsRequest],
Awaitable[~.ListChangelogsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_changelogs" not in self._stubs:
self._stubs["list_changelogs"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Changelogs/ListChangelogs",
request_serializer=changelog.ListChangelogsRequest.serialize,
response_deserializer=changelog.ListChangelogsResponse.deserialize,
)
return self._stubs["list_changelogs"]
@property
def get_changelog(
self,
) -> Callable[[changelog.GetChangelogRequest], Awaitable[changelog.Changelog]]:
r"""Return a callable for the get changelog method over gRPC.
Retrieves the specified Changelog.
Returns:
Callable[[~.GetChangelogRequest],
Awaitable[~.Changelog]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_changelog" not in self._stubs:
self._stubs["get_changelog"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Changelogs/GetChangelog",
request_serializer=changelog.GetChangelogRequest.serialize,
response_deserializer=changelog.Changelog.deserialize,
)
return self._stubs["get_changelog"]
def close(self):
return self.grpc_channel.close()
__all__ = ("ChangelogsGrpcAsyncIOTransport",)
| |
#!/usr/bin/env python
# coding:utf-8
import errno
import time
import re
import socket
import ssl
import httplib
import OpenSSL
NetWorkIOError = (socket.error, ssl.SSLError, OpenSSL.SSL.Error, OSError)
from connect_manager import https_manager
from gae_handler import return_fail_message
from google_ip import google_ip
from config import config
from xlog import getLogger
xlog = getLogger("gae_proxy")
#"GAE", "Google Frontend", "GSE", "GFE/2.0",
google_server_types = ["ClientMapServer"]
def send_header(wfile, keyword, value):
keyword = keyword.title()
if keyword == 'Set-Cookie':
for cookie in re.split(r', (?=[^ =]+(?:=|$))', value):
wfile.write("%s: %s\r\n" % (keyword, cookie))
#logging.debug("Head1 %s: %s", keyword, cookie)
elif keyword == 'Content-Disposition' and '"' not in value:
value = re.sub(r'filename=([^"\']+)', 'filename="\\1"', value)
wfile.write("%s: %s\r\n" % (keyword, value))
#logging.debug("Head1 %s: %s", keyword, value)
elif keyword == "Alternate-Protocol":
return
else:
#logging.debug("Head1 %s: %s", keyword, value)
wfile.write("%s: %s\r\n" % (keyword, value))
def fetch(method, host, path, headers, payload, bufsize=8192):
request_data = '%s %s HTTP/1.1\r\n' % (method, path)
request_data += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items())
request_data += '\r\n'
ssl_sock = https_manager.get_ssl_connection(host)
if not ssl_sock:
return
try:
ssl_sock.send(request_data.encode())
payload_len = len(payload)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = ssl_sock.send(payload[start:start+send_size])
start += sended
response = httplib.HTTPResponse(ssl_sock, buffering=True)
response.ssl_sock = ssl_sock
orig_timeout = ssl_sock.gettimeout()
ssl_sock.settimeout(90)
response.begin()
ssl_sock.settimeout(orig_timeout)
except httplib.BadStatusLine as e:
xlog.warn("direct_handler.fetch bad status line:%r", e)
google_ip.report_connect_closed(ssl_sock.ip, "request_fail")
response = None
except Exception as e:
xlog.warn("direct_handler.fetch:%r", e)
google_ip.report_connect_closed(ssl_sock.ip, "request_fail")
response = None
return response
def handler(method, host, url, headers, body, wfile):
time_request = time.time()
if "Connection" in headers and headers["Connection"] == "close":
del headers["Connection"]
errors = []
response = None
while True:
if time.time() - time_request > 30:
return return_fail_message(wfile)
try:
response = fetch(method, host, url, headers, body)
if response:
if response.status > 400:
server_type = response.getheader('Server', "")
if "G" not in server_type and "g" not in server_type and server_type not in google_server_types:
xlog.warn("IP:%s host:%s not support GAE, server type:%s status:%d", response.ssl_sock.ip, host, server_type, response.status)
google_ip.report_connect_fail(response.ssl_sock.ip)
response.close()
continue
break
except OpenSSL.SSL.SysCallError as e:
errors.append(e)
xlog.warn("direct_handler.handler err:%r %s/%s", e, host, url)
except Exception as e:
errors.append(e)
xlog.exception('direct_handler.handler %r %s %s , retry...', e, host, url)
try:
send_to_browser = True
try:
response_headers = dict((k.title(), v) for k, v in response.getheaders())
wfile.write("HTTP/1.1 %d %s\r\n" % (response.status, response.reason))
for key, value in response.getheaders():
send_header(wfile, key, value)
wfile.write("\r\n")
except Exception as e:
send_to_browser = False
wait_time = time.time()-time_request
xlog.warn("direct_handler.handler send response fail. t:%d e:%r %s%s", wait_time, e, host, url)
if method == 'HEAD' or response.status in (204, 304):
xlog.info("DIRECT t:%d %d %s %s", (time.time()-time_request)*1000, response.status, host, url)
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host)
response.close()
return
if 'Transfer-Encoding' in response_headers:
length = 0
while True:
try:
data = response.read(8192)
except httplib.IncompleteRead, e:
data = e.partial
except Exception as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
xlog.warn("direct_handler.handler send Transfer-Encoding t:%d e:%r %s/%s", time.time()-time_request, e, host, url)
response.close()
return
if send_to_browser:
try:
if not data:
wfile.write('0\r\n\r\n')
break
length += len(data)
wfile.write('%x\r\n' % len(data))
wfile.write(data)
wfile.write('\r\n')
except Exception as e:
send_to_browser = False
xlog.warn("direct_handler.handler send Transfer-Encoding t:%d e:%r %s/%s", time.time()-time_request, e, host, url)
else:
if not data:
break
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host)
response.close()
xlog.info("DIRECT chucked t:%d s:%d %d %s %s", (time.time()-time_request)*1000, length, response.status, host, url)
return
content_length = int(response.getheader('Content-Length', 0))
content_range = response.getheader('Content-Range', '')
if content_range:
start, end, length = tuple(int(x) for x in re.search(r'bytes (\d+)-(\d+)/(\d+)', content_range).group(1, 2, 3))
else:
start, end, length = 0, content_length-1, content_length
time_last_read = time.time()
while True:
if start > end:
https_manager.save_ssl_connection_for_reuse(response.ssl_sock, host, call_time=time_request)
xlog.info("DIRECT t:%d s:%d %d %s %s", (time.time()-time_request)*1000, length, response.status, host, url)
return
data = response.read(config.AUTORANGE_BUFSIZE)
if not data:
if time.time() - time_last_read > 20:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
response.close()
xlog.warn("read timeout t:%d len:%d left:%d %s %s", (time.time()-time_request)*1000, length, (end-start), host, url)
return
else:
time.sleep(0.1)
continue
time_last_read = time.time()
data_len = len(data)
start += data_len
if send_to_browser:
try:
ret = wfile.write(data)
if ret == ssl.SSL_ERROR_WANT_WRITE or ret == ssl.SSL_ERROR_WANT_READ:
xlog.debug("send to browser wfile.write ret:%d", ret)
ret = wfile.write(data)
except Exception as e_b:
if e_b[0] in (errno.ECONNABORTED, errno.EPIPE, errno.ECONNRESET) or 'bad write retry' in repr(e_b):
xlog.warn('direct_handler send to browser return %r %s %r', e_b, host, url)
else:
xlog.warn('direct_handler send to browser return %r %s %r', e_b, host, url)
send_to_browser = False
except NetWorkIOError as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
time_except = time.time()
time_cost = time_except - time_request
if e[0] in (errno.ECONNABORTED, errno.EPIPE) or 'bad write retry' in repr(e):
xlog.exception("direct_handler err:%r %s %s time:%d", e, host, url, time_cost)
else:
xlog.exception("direct_handler except:%r %s %s", e, host, url)
except Exception as e:
google_ip.report_connect_closed(response.ssl_sock.ip, "receive fail")
xlog.exception("direct_handler except:%r %s %s", e, host, url)
| |
import itertools
from sympy import (Add, Pow, Symbol, exp, sqrt, symbols, sympify, cse,
Matrix, S, cos, sin, Eq, Function, Tuple, RootOf,
IndexedBase, Idx, Piecewise, O)
from sympy.simplify.cse_opts import sub_pre, sub_post
from sympy.functions.special.hyper import meijerg
from sympy.simplify import cse_main, cse_opts
from sympy.utilities.pytest import XFAIL, raises
from sympy.matrices import (eye, SparseMatrix, MutableDenseMatrix,
MutableSparseMatrix, ImmutableDenseMatrix, ImmutableSparseMatrix)
from sympy.matrices.expressions import MatrixSymbol
from sympy.core.compatibility import range
w, x, y, z = symbols('w,x,y,z')
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12 = symbols('x:13')
def test_numbered_symbols():
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('y%s' % i) for i in range(0, 10)]
ns = cse_main.numbered_symbols(prefix='y')
assert list(itertools.islice(
ns, 10, 20)) == [Symbol('y%s' % i) for i in range(10, 20)]
ns = cse_main.numbered_symbols()
assert list(itertools.islice(
ns, 0, 10)) == [Symbol('x%s' % i) for i in range(0, 10)]
# Dummy "optimization" functions for testing.
def opt1(expr):
return expr + y
def opt2(expr):
return expr*z
def test_preprocess_for_cse():
assert cse_main.preprocess_for_cse(x, [(opt1, None)]) == x + y
assert cse_main.preprocess_for_cse(x, [(None, opt1)]) == x
assert cse_main.preprocess_for_cse(x, [(None, None)]) == x
assert cse_main.preprocess_for_cse(x, [(opt1, opt2)]) == x + y
assert cse_main.preprocess_for_cse(
x, [(opt1, None), (opt2, None)]) == (x + y)*z
def test_postprocess_for_cse():
assert cse_main.postprocess_for_cse(x, [(opt1, None)]) == x
assert cse_main.postprocess_for_cse(x, [(None, opt1)]) == x + y
assert cse_main.postprocess_for_cse(x, [(None, None)]) == x
assert cse_main.postprocess_for_cse(x, [(opt1, opt2)]) == x*z
# Note the reverse order of application.
assert cse_main.postprocess_for_cse(
x, [(None, opt1), (None, opt2)]) == x*z + y
def test_cse_single():
# Simple substitution.
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse([e])
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_cse_single2():
# Simple substitution, test for being able to pass the expression directly
e = Add(Pow(x + y, 2), sqrt(x + y))
substs, reduced = cse(e)
assert substs == [(x0, x + y)]
assert reduced == [sqrt(x0) + x0**2]
substs, reduced = cse(Matrix([[1]]))
assert isinstance(reduced[0], Matrix)
def test_cse_not_possible():
# No substitution possible.
e = Add(x, y)
substs, reduced = cse([e])
assert substs == []
assert reduced == [x + y]
# issue 6329
eq = (meijerg((1, 2), (y, 4), (5,), [], x) +
meijerg((1, 3), (y, 4), (5,), [], x))
assert cse(eq) == ([], [eq])
def test_nested_substitution():
# Substitution within a substitution.
e = Add(Pow(w*x + y, 2), sqrt(w*x + y))
substs, reduced = cse([e])
assert substs == [(x0, w*x + y)]
assert reduced == [sqrt(x0) + x0**2]
def test_subtraction_opt():
# Make sure subtraction is optimized.
e = (x - y)*(z - y) + exp((x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [-x0 + exp(-x0)]
e = -(x - y)*(z - y) + exp(-(x - y)*(z - y))
substs, reduced = cse(
[e], optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)])
assert substs == [(x0, (x - y)*(y - z))]
assert reduced == [x0 + exp(x0)]
# issue 4077
n = -1 + 1/x
e = n/x/(-n)**2 - 1/n/x
assert cse(e, optimizations=[(cse_opts.sub_pre, cse_opts.sub_post)]) == \
([], [0])
def test_multiple_expressions():
e1 = (x + y)*z
e2 = (x + y)*w
substs, reduced = cse([e1, e2])
assert substs == [(x0, x + y)]
assert reduced == [x0*z, x0*w]
l = [w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [z + x*x0, x0]
l = [w*x*y, w*x*y + z, w*y]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == rsubsts
assert reduced == [x1, x1 + z, x0]
l = [(x - z)*(y - z), x - z, y - z]
substs, reduced = cse(l)
rsubsts, _ = cse(reversed(l))
assert substs == [(x0, -z), (x1, x + x0), (x2, x0 + y)]
assert rsubsts == [(x0, -z), (x1, x0 + y), (x2, x + x0)]
assert reduced == [x1*x2, x1, x2]
l = [w*y + w + x + y + z, w*x*y]
assert cse(l) == ([(x0, w*y)], [w + x + x0 + y + z, x*x0])
assert cse([x + y, x + y + z]) == ([(x0, x + y)], [x0, z + x0])
assert cse([x + y, x + z]) == ([], [x + y, x + z])
assert cse([x*y, z + x*y, x*y*z + 3]) == \
([(x0, x*y)], [x0, z + x0, 3 + x0*z])
@XFAIL # CSE of non-commutative Mul terms is disabled
def test_non_commutative_cse():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([(x0, A*B)], [x0*C, x0])
# Test if CSE of non-commutative Mul terms is disabled
def test_bypass_non_commutatives():
A, B, C = symbols('A B C', commutative=False)
l = [A*B*C, A*C]
assert cse(l) == ([], l)
l = [A*B*C, A*B]
assert cse(l) == ([], l)
l = [B*C, A*B*C]
assert cse(l) == ([], l)
@XFAIL # CSE fails when replacing non-commutative sub-expressions
def test_non_commutative_order():
A, B, C = symbols('A B C', commutative=False)
x0 = symbols('x0', commutative=False)
l = [B+C, A*(B+C)]
assert cse(l) == ([(x0, B+C)], [x0, A*x0])
@XFAIL
def test_powers():
assert cse(x*y**2 + x*y) == ([(x0, x*y)], [x0*y + x0])
def test_issue_4498():
assert cse(w/(x - y) + z/(y - x), optimizations='basic') == \
([], [(w - z)/(x - y)])
def test_issue_4020():
assert cse(x**5 + x**4 + x**3 + x**2, optimizations='basic') \
== ([(x0, x**2)], [x0*(x**3 + x + x0 + 1)])
def test_issue_4203():
assert cse(sin(x**x)/x**x) == ([(x0, x**x)], [sin(x0)/x0])
def test_issue_6263():
e = Eq(x*(-x + 1) + x*(x - 1), 0)
assert cse(e, optimizations='basic') == ([], [True])
def test_dont_cse_tuples():
from sympy import Subs
f = Function("f")
g = Function("g")
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
assert name_val == []
assert expr == (Subs(f(x, y), (x, y), (0, 1))
+ Subs(g(x, y), (x, y), (0, 1)))
name_val, (expr,) = cse(
Subs(f(x, y), (x, y), (0, x + y))
+ Subs(g(x, y), (x, y), (0, x + y)))
assert name_val == [(x0, x + y)]
assert expr == Subs(f(x, y), (x, y), (0, x0)) + \
Subs(g(x, y), (x, y), (0, x0))
def test_pow_invpow():
assert cse(1/x**2 + x**2) == \
([(x0, x**2)], [x0 + 1/x0])
assert cse(x**2 + (1 + 1/x**2)/x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0 + x1*(x1 + 1)])
assert cse(1/x**2 + (1 + 1/x**2)*x**2) == \
([(x0, x**2), (x1, 1/x0)], [x0*(x1 + 1) + x1])
assert cse(cos(1/x**2) + sin(1/x**2)) == \
([(x0, x**(-2))], [sin(x0) + cos(x0)])
assert cse(cos(x**2) + sin(x**2)) == \
([(x0, x**2)], [sin(x0) + cos(x0)])
assert cse(y/(2 + x**2) + z/x**2/y) == \
([(x0, x**2)], [y/(x0 + 2) + z/(x0*y)])
assert cse(exp(x**2) + x**2*cos(1/x**2)) == \
([(x0, x**2)], [x0*cos(1/x0) + exp(x0)])
assert cse((1 + 1/x**2)/x**2) == \
([(x0, x**(-2))], [x0*(x0 + 1)])
assert cse(x**(2*y) + x**(-2*y)) == \
([(x0, x**(2*y))], [x0 + 1/x0])
def test_postprocess():
eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
assert cse([eq, Eq(x, z + 1), z - 2, (z + 1)*(x + 1)],
postprocess=cse_main.cse_separate) == \
[[(x1, y + 1), (x2, z + 1), (x, x2), (x0, x + 1)],
[x0 + exp(x0/x1) + cos(x1), z - 2, x0*x2]]
def test_issue_4499():
# previously, this gave 16 constants
from sympy.abc import a, b
B = Function('B')
G = Function('G')
t = Tuple(*
(a, a + S(1)/2, 2*a, b, 2*a - b + 1, (sqrt(z)/2)**(-2*a + 1)*B(2*a -
b, sqrt(z))*B(b - 1, sqrt(z))*G(b)*G(2*a - b + 1),
sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b,
sqrt(z))*G(b)*G(2*a - b + 1), sqrt(z)*(sqrt(z)/2)**(-2*a + 1)*B(b - 1,
sqrt(z))*B(2*a - b + 1, sqrt(z))*G(b)*G(2*a - b + 1),
(sqrt(z)/2)**(-2*a + 1)*B(b, sqrt(z))*B(2*a - b + 1,
sqrt(z))*G(b)*G(2*a - b + 1), 1, 0, S(1)/2, z/2, -b + 1, -2*a + b,
-2*a))
c = cse(t)
ans = (
[(x0, 2*a), (x1, -b), (x2, x1 + 1), (x3, x0 + x2), (x4, sqrt(z)), (x5,
B(x0 + x1, x4)), (x6, G(b)), (x7, G(x3)), (x8, -x0), (x9,
(x4/2)**(x8 + 1)), (x10, x6*x7*x9*B(b - 1, x4)), (x11, x6*x7*x9*B(b,
x4)), (x12, B(x3, x4))], [(a, a + S(1)/2, x0, b, x3, x10*x5,
x11*x4*x5, x10*x12*x4, x11*x12, 1, 0, S(1)/2, z/2, x2, b + x8, x8)])
assert ans == c
def test_issue_6169():
r = RootOf(x**6 - 4*x**5 - 2, 1)
assert cse(r) == ([], [r])
# and a check that the right thing is done with the new
# mechanism
assert sub_post(sub_pre((-x - y)*z - x - y)) == -z*(x + y) - x - y
def test_cse_Indexed():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
expr1 = (y[i+1]-y[i])/(x[i+1]-x[i])
expr2 = 1/(x[i+1]-x[i])
replacements, reduced_exprs = cse([expr1, expr2])
assert len(replacements) > 0
def test_cse_MatrixSymbol():
# MatrixSymbols have non-Basic args, so make sure that works
A = MatrixSymbol("A", 3, 3)
assert cse(A) == ([], [A])
n = symbols('n', integer=True)
B = MatrixSymbol("B", n, n)
assert cse(B) == ([], [B])
def test_cse_MatrixExpr():
from sympy import MatrixSymbol
A = MatrixSymbol('A', 3, 3)
y = MatrixSymbol('y', 3, 1)
expr1 = (A.T*A).I * A * y
expr2 = (A.T*A) * A * y
replacements, reduced_exprs = cse([expr1, expr2])
assert len(replacements) > 0
replacements, reduced_exprs = cse([expr1 + expr2, expr1])
assert replacements
replacements, reduced_exprs = cse([A**2, A + A**2])
assert replacements
def test_Piecewise():
f = Piecewise((-z + x*y, Eq(y, 0)), (-z - x*y, True))
ans = cse(f)
actual_ans = ([(x0, -z), (x1, x*y)], [Piecewise((x0+x1, Eq(y, 0)), (x0 - x1, True))])
assert ans == actual_ans
def test_ignore_order_terms():
eq = exp(x).series(x,0,3) + sin(y+x**3) - 1
assert cse(eq) == ([], [sin(x**3 + y) + x + x**2/2 + O(x**3)])
def test_name_conflict():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l)
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_name_conflict_cust_symbols():
z1 = x0 + y
z2 = x2 + x3
l = [cos(z1) + z1, cos(z2) + z2, x0 + x2]
substs, reduced = cse(l, symbols("x:10"))
assert [e.subs(reversed(substs)) for e in reduced] == l
def test_symbols_exhausted_error():
l = cos(x+y)+x+y+cos(w+y)+sin(w+y)
sym = [x, y, z]
with raises(ValueError) as excinfo:
cse(l, symbols=sym)
def test_issue_7840():
# daveknippers' example
C393 = sympify( \
'Piecewise((C391 - 1.65, C390 < 0.5), (Piecewise((C391 - 1.65, \
C391 > 2.35), (C392, True)), True))'
)
C391 = sympify( \
'Piecewise((2.05*C390**(-1.03), C390 < 0.5), (2.5*C390**(-0.625), True))'
)
C393 = C393.subs('C391',C391)
# simple substitution
sub = {}
sub['C390'] = 0.703451854
sub['C392'] = 1.01417794
ss_answer = C393.subs(sub)
# cse
substitutions,new_eqn = cse(C393)
for pair in substitutions:
sub[pair[0].name] = pair[1].subs(sub)
cse_answer = new_eqn[0].subs(sub)
# both methods should be the same
assert ss_answer == cse_answer
# GitRay's example
expr = sympify(
"Piecewise((Symbol('ON'), Equality(Symbol('mode'), Symbol('ON'))), \
(Piecewise((Piecewise((Symbol('OFF'), StrictLessThan(Symbol('x'), \
Symbol('threshold'))), (Symbol('ON'), S.true)), Equality(Symbol('mode'), \
Symbol('AUTO'))), (Symbol('OFF'), S.true)), S.true))"
)
substitutions, new_eqn = cse(expr)
# this Piecewise should be exactly the same
assert new_eqn[0] == expr
# there should not be any replacements
assert len(substitutions) < 1
def test_issue_8891():
for cls in (MutableDenseMatrix, MutableSparseMatrix,
ImmutableDenseMatrix, ImmutableSparseMatrix):
m = cls(2, 2, [x + y, 0, 0, 0])
res = cse([x + y, m])
ans = ([(x0, x + y)], [x0, cls([[x0, 0], [0, 0]])])
assert res == ans
assert isinstance(res[1][-1], cls)
| |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class Change(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'created': 'datetime',
'user_id': 'int',
'application_id': 'int',
'entity': 'str',
'old': 'object',
'new': 'object'
}
attribute_map = {
'id': 'id',
'created': 'created',
'user_id': 'userId',
'application_id': 'applicationId',
'entity': 'entity',
'old': 'old',
'new': 'new'
}
def __init__(self, id=None, created=None, user_id=None, application_id=None, entity=None, old=None, new=None, local_vars_configuration=None): # noqa: E501
"""Change - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._created = None
self._user_id = None
self._application_id = None
self._entity = None
self._old = None
self._new = None
self.discriminator = None
self.id = id
self.created = created
self.user_id = user_id
if application_id is not None:
self.application_id = application_id
self.entity = entity
if old is not None:
self.old = old
if new is not None:
self.new = new
@property
def id(self):
"""Gets the id of this Change. # noqa: E501
Unique ID for this entity. # noqa: E501
:return: The id of this Change. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Change.
Unique ID for this entity. # noqa: E501
:param id: The id of this Change. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created(self):
"""Gets the created of this Change. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this Change. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Change.
The exact moment this entity was created. # noqa: E501
:param created: The created of this Change. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def user_id(self):
"""Gets the user_id of this Change. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The user_id of this Change. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Change.
The ID of the account that owns this entity. # noqa: E501
:param user_id: The user_id of this Change. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def application_id(self):
"""Gets the application_id of this Change. # noqa: E501
ID of application associated with change # noqa: E501
:return: The application_id of this Change. # noqa: E501
:rtype: int
"""
return self._application_id
@application_id.setter
def application_id(self, application_id):
"""Sets the application_id of this Change.
ID of application associated with change # noqa: E501
:param application_id: The application_id of this Change. # noqa: E501
:type: int
"""
self._application_id = application_id
@property
def entity(self):
"""Gets the entity of this Change. # noqa: E501
API endpoint on which the change was initiated. # noqa: E501
:return: The entity of this Change. # noqa: E501
:rtype: str
"""
return self._entity
@entity.setter
def entity(self, entity):
"""Sets the entity of this Change.
API endpoint on which the change was initiated. # noqa: E501
:param entity: The entity of this Change. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and entity is None: # noqa: E501
raise ValueError("Invalid value for `entity`, must not be `None`") # noqa: E501
self._entity = entity
@property
def old(self):
"""Gets the old of this Change. # noqa: E501
Resource before the change occurred. # noqa: E501
:return: The old of this Change. # noqa: E501
:rtype: object
"""
return self._old
@old.setter
def old(self, old):
"""Sets the old of this Change.
Resource before the change occurred. # noqa: E501
:param old: The old of this Change. # noqa: E501
:type: object
"""
self._old = old
@property
def new(self):
"""Gets the new of this Change. # noqa: E501
Resource after the change occurred. # noqa: E501
:return: The new of this Change. # noqa: E501
:rtype: object
"""
return self._new
@new.setter
def new(self, new):
"""Sets the new of this Change.
Resource after the change occurred. # noqa: E501
:param new: The new of this Change. # noqa: E501
:type: object
"""
self._new = new
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Change):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Change):
return True
return self.to_dict() != other.to_dict()
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from measurements import smoothness
import page_sets
import page_sets.key_silk_cases
from telemetry import benchmark
class _Smoothness(perf_benchmark.PerfBenchmark):
"""Base class for smoothness-based benchmarks."""
# Certain smoothness pages do not perform gesture scrolling, in turn yielding
# an empty first_gesture_scroll_update_latency result. Such empty results
# should be ignored, allowing aggregate metrics for that page set.
_PAGES_WITHOUT_SCROLL_GESTURE_BLACKLIST = [
'http://mobile-news.sandbox.google.com/news/pt0']
test = smoothness.Smoothness
@classmethod
def Name(cls):
return 'smoothness'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del is_first_result # unused
if (value.name == 'first_gesture_scroll_update_latency' and
value.page.url in cls._PAGES_WITHOUT_SCROLL_GESTURE_BLACKLIST and
value.values is None):
return False
return True
@benchmark.Disabled('reference') # crbug.com/547833
class SmoothnessTop25(_Smoothness):
"""Measures rendering statistics while scrolling down the top 25 web pages.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.top_25_smooth'
class SmoothnessToughFiltersCases(_Smoothness):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of SVG and CSS Filter Effects.
"""
page_set = page_sets.ToughFiltersCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_filters_cases'
class SmoothnessToughPathRenderingCases(_Smoothness):
"""Tests a selection of pages with SVG and 2D Canvas paths.
Measures frame rate and a variety of other statistics. """
page_set = page_sets.ToughPathRenderingCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_path_rendering_cases'
@benchmark.Disabled('android') # crbug.com/526901
class SmoothnessToughCanvasCases(_Smoothness):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of the 2D Canvas API.
"""
page_set = page_sets.ToughCanvasCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_canvas_cases'
@benchmark.Disabled('android') # crbug.com/373812
class SmoothnessToughWebGLCases(_Smoothness):
page_set = page_sets.ToughWebglCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_cases'
@benchmark.Enabled('android')
class SmoothnessMaps(perf_benchmark.PerfBenchmark):
page_set = page_sets.MapsPageSet
@classmethod
def Name(cls):
return 'smoothness.maps'
@benchmark.Disabled('android')
class SmoothnessKeyDesktopMoveCases(_Smoothness):
page_set = page_sets.KeyDesktopMoveCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_desktop_move_cases'
@benchmark.Enabled('android')
class SmoothnessKeyMobileSites(_Smoothness):
"""Measures rendering statistics while scrolling down the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.key_mobile_sites_smooth'
@benchmark.Disabled('mac') # crbug.com/563615
class SmoothnessToughAnimationCases(_Smoothness):
test = smoothness.SmoothnessWithRestart
page_set = page_sets.ToughAnimationCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animation_cases'
@benchmark.Enabled('android')
class SmoothnessKeySilkCases(_Smoothness):
"""Measures rendering statistics for the key silk cases without GPU
rasterization.
"""
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_silk_cases'
def CreateStorySet(self, options):
stories = super(SmoothnessKeySilkCases, self).CreateStorySet(options)
# Page26 (befamous) is too noisy to be useful; crbug.com/461127
to_remove = [story for story in stories
if isinstance(story, page_sets.key_silk_cases.Page26)]
for story in to_remove:
stories.RemoveStory(story)
return stories
@benchmark.Enabled('android', 'mac')
@benchmark.Disabled('reference') # crbug.com/547833
class SmoothnessGpuRasterizationTop25(_Smoothness):
"""Measures rendering statistics for the top 25 with GPU rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.Top25SmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.top_25_smooth'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationKeyMobileSites(_Smoothness):
"""Measures rendering statistics for the key mobile sites with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.key_mobile_sites_smooth'
class SmoothnessGpuRasterizationToughPathRenderingCases(_Smoothness):
"""Tests a selection of pages with SVG and 2D canvas paths with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.ToughPathRenderingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_path_rendering_cases'
class SmoothnessGpuRasterizationFiltersCases(_Smoothness):
"""Tests a selection of pages with SVG and CSS filter effects with GPU
rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.ToughFiltersCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_filters_cases'
@benchmark.Enabled('android')
class SmoothnessSyncScrollKeyMobileSites(_Smoothness):
"""Measures rendering statistics for the key mobile sites with synchronous
(main thread) scrolling.
"""
tag = 'sync_scroll'
page_set = page_sets.KeyMobileSitesSmoothPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class SmoothnessSimpleMobilePages(_Smoothness):
"""Measures rendering statistics for simple mobile sites page set.
"""
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessFlingSimpleMobilePages(_Smoothness):
"""Measures rendering statistics for flinging a simple mobile sites page set.
"""
page_set = page_sets.SimpleMobileSitesFlingPageSet
def SetExtraBrowserOptions(self, options):
# As the fling parameters cannot be analytically determined to not
# overscroll, disable overscrolling explicitly. Overscroll behavior is
# orthogonal to fling performance, and its activation is only more noise.
options.AppendExtraBrowserArgs('--disable-overscroll-edge-effect')
@classmethod
def Name(cls):
return 'smoothness.fling.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases.
"""
page_set = page_sets.AndroidToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_pinch_zoom_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/564008
@benchmark.Enabled('chromeos', 'mac')
class SmoothnessDesktopToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases. Uses lower zoom levels customized for desktop limits.
"""
page_set = page_sets.DesktopToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.desktop_tough_pinch_zoom_cases'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.AndroidToughPinchZoomCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_pinch_zoom_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/564008
@benchmark.Enabled('chromeos', 'mac')
class SmoothnessGpuRasterizationDesktopToughPinchZoomCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming in the tough pinch zoom
cases with GPU rasterization. Uses lower zoom levels customized for desktop
limits.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.DesktopToughPinchZoomCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.desktop_tough_pinch_zoom_cases'
@benchmark.Enabled('android', 'chromeos')
class SmoothnessToughScrollingWhileZoomedInCases(_Smoothness):
"""Measures rendering statistics for pinch-zooming then diagonal scrolling"""
page_set = page_sets.ToughScrollingWhileZoomedInCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_while_zoomed_in_cases'
@benchmark.Enabled('android')
class SmoothnessPolymer(_Smoothness):
"""Measures rendering statistics for Polymer cases.
"""
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'smoothness.polymer'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationPolymer(_Smoothness):
"""Measures rendering statistics for the Polymer cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
page_set = page_sets.PolymerPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.polymer'
@benchmark.Disabled('reference') # crbug.com/549429
class SmoothnessToughScrollingCases(_Smoothness):
page_set = page_sets.ToughScrollingCasesPageSet
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del is_first_result # unused
# Only keep 'mean_pixels_approximated' and 'mean_pixels_checkerboarded'
# metrics. (crbug.com/529331)
return value.name in ('mean_pixels_approximated',
'mean_pixels_checkerboarded')
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_cases'
@benchmark.Enabled('android', "mac")
class SmoothnessGpuRasterizationToughScrollingCases(_Smoothness):
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.tough_scrolling_cases'
@benchmark.Disabled('android') # http://crbug.com/531593
class SmoothnessToughImageDecodeCases(_Smoothness):
page_set = page_sets.ToughImageDecodeCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_image_decode_cases'
class SmoothnessImageDecodingCases(_Smoothness):
"""Measures decoding statistics for jpeg images.
"""
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
options.AppendExtraBrowserArgs('--disable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.image_decoding_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/563974
@benchmark.Disabled('android') # http://crbug.com/513699
class SmoothnessGpuImageDecodingCases(_Smoothness):
"""Measures decoding statistics for jpeg images with GPU rasterization.
"""
tag = 'gpu_rasterization_and_decoding'
page_set = page_sets.ImageDecodingCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
# TODO(sugoi): Remove the following line once M41 goes stable
options.AppendExtraBrowserArgs('--enable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization_and_decoding.image_decoding_cases'
@benchmark.Enabled('android')
class SmoothnessPathologicalMobileSites(_Smoothness):
"""Measures task execution statistics while scrolling pathological sites.
"""
page_set = page_sets.PathologicalMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.pathological_mobile_sites'
class SmoothnessToughAnimatedImageCases(_Smoothness):
page_set = page_sets.ToughAnimatedImageCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animated_image_cases'
@benchmark.Disabled('reference') # http://crbug.com/499489
class SmoothnessToughTextureUploadCases(_Smoothness):
page_set = page_sets.ToughTextureUploadCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_texture_upload_cases'
@benchmark.Disabled('reference') # http://crbug.com/496684
class SmoothnessToughAdCases(_Smoothness):
"""Measures rendering statistics while displaying advertisements."""
page_set = page_sets.ToughAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_ad_cases'
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/555089
# http://crbug.com/496684 (reference)
# http://crbug.com/522619 (mac/win)
@benchmark.Disabled('reference', 'win', 'mac')
class SmoothnessScrollingToughAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.ScrollingToughAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.scrolling_tough_ad_cases'
# http://crbug.com/496684 (reference)
# http://crbug.com/522619 (mac/win)
@benchmark.Disabled('reference', 'win', 'mac')
class SmoothnessBidirectionallyScrollingToughAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.BidirectionallyScrollingToughAdCasesPageSet
def SetExtraBrowserOptions(self, options):
# Don't accidentally reload the page while scrolling.
options.AppendExtraBrowserArgs('--disable-pull-to-refresh-effect')
@classmethod
def Name(cls):
return 'smoothness.bidirectionally_scrolling_tough_ad_cases'
@benchmark.Disabled('reference') # http://crbug.com/496684
class SmoothnessToughWebGLAdCases(_Smoothness):
"""Measures rendering statistics while scrolling advertisements."""
page_set = page_sets.ToughWebglAdCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_ad_cases'
| |
from sympy import (EmptySet, FiniteSet, S, Symbol, Interval, exp, erf, sqrt,
symbols, simplify, Eq, cos, And, Tuple, Or, Dict, sympify, binomial,
factor, cancel)
from sympy.stats import (DiscreteUniform, Die, Bernoulli, Coin, Binomial,
Hypergeometric, P, E, variance, covariance, skewness, sample, density,
given, independent, dependent, where, FiniteRV, pspace, cdf,
correlation, moment, cmoment, smoment)
from sympy.utilities.pytest import raises, slow
from sympy.abc import p
oo = S.Infinity
def BayesTest(A, B):
assert P(A, B) == P(And(A, B)) / P(B)
assert P(A, B) == P(B, A) * P(A) / P(B)
def test_discreteuniform():
# Symbolic
a, b, c = symbols('a b c')
X = DiscreteUniform('X', [a, b, c])
assert E(X) == (a + b + c)/3
assert simplify(variance(X)
- ((a**2 + b**2 + c**2)/3 - (a/3 + b/3 + c/3)**2)) == 0
assert P(Eq(X, a)) == P(Eq(X, b)) == P(Eq(X, c)) == S('1/3')
Y = DiscreteUniform('Y', range(-5, 5))
# Numeric
assert E(Y) == S('-1/2')
assert variance(Y) == S('33/4')
for x in range(-5, 5):
assert P(Eq(Y, x)) == S('1/10')
assert P(Y <= x) == S(x + 6)/10
assert P(Y >= x) == S(5 - x)/10
assert dict(density(Die('D', 6)).items()) == \
dict(density(DiscreteUniform('U', range(1, 7))).items())
def test_dice():
# TODO: Make iid method!
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
a, b = symbols('a b')
assert E(X) == 3 + S.Half
assert variance(X) == S(35)/12
assert E(X + Y) == 7
assert E(X + X) == 7
assert E(a*X + b) == a*E(X) + b
assert variance(X + Y) == variance(X) + variance(Y) == cmoment(X + Y, 2)
assert variance(X + X) == 4 * variance(X) == cmoment(X + X, 2)
assert cmoment(X, 0) == 1
assert cmoment(4*X, 3) == 64*cmoment(X, 3)
assert covariance(X, Y) == S.Zero
assert covariance(X, X + Y) == variance(X)
assert density(Eq(cos(X*S.Pi), 1))[True] == S.Half
assert correlation(X, Y) == 0
assert correlation(X, Y) == correlation(Y, X)
assert smoment(X + Y, 3) == skewness(X + Y)
assert smoment(X, 0) == 1
assert P(X > 3) == S.Half
assert P(2*X > 6) == S.Half
assert P(X > Y) == S(5)/12
assert P(Eq(X, Y)) == P(Eq(X, 1))
assert E(X, X > 3) == 5 == moment(X, 1, 0, X > 3)
assert E(X, Y > 3) == E(X) == moment(X, 1, 0, Y > 3)
assert E(X + Y, Eq(X, Y)) == E(2*X)
assert moment(X, 0) == 1
assert moment(5*X, 2) == 25*moment(X, 2)
assert P(X > 3, X > 3) == S.One
assert P(X > Y, Eq(Y, 6)) == S.Zero
assert P(Eq(X + Y, 12)) == S.One/36
assert P(Eq(X + Y, 12), Eq(X, 6)) == S.One/6
assert density(X + Y) == density(Y + Z) != density(X + X)
d = density(2*X + Y**Z)
assert d[S(22)] == S.One/108 and d[S(4100)] == S.One/216 and S(3130) not in d
assert pspace(X).domain.as_boolean() == Or(
*[Eq(X.symbol, i) for i in [1, 2, 3, 4, 5, 6]])
assert where(X > 3).set == FiniteSet(4, 5, 6)
def test_given():
X = Die('X', 6)
assert density(X, X > 5) == {S(6): S(1)}
assert where(X > 2, X > 5).as_boolean() == Eq(X.symbol, 6)
assert sample(X, X > 5) == 6
def test_domains():
X, Y = Die('x', 6), Die('y', 6)
x, y = X.symbol, Y.symbol
# Domains
d = where(X > Y)
assert d.condition == (x > y)
d = where(And(X > Y, Y > 3))
assert d.as_boolean() == Or(And(Eq(x, 5), Eq(y, 4)), And(Eq(x, 6),
Eq(y, 5)), And(Eq(x, 6), Eq(y, 4)))
assert len(d.elements) == 3
assert len(pspace(X + Y).domain.elements) == 36
Z = Die('x', 4)
raises(ValueError, lambda: P(X > Z)) # Two domains with same internal symbol
pspace(X + Y).domain.set == FiniteSet(1, 2, 3, 4, 5, 6)**2
assert where(X > 3).set == FiniteSet(4, 5, 6)
assert X.pspace.domain.dict == FiniteSet(
Dict({X.symbol: i}) for i in range(1, 7))
assert where(X > Y).dict == FiniteSet(Dict({X.symbol: i, Y.symbol: j})
for i in range(1, 7) for j in range(1, 7) if i > j)
def test_dice_bayes():
X, Y, Z = Die('X', 6), Die('Y', 6), Die('Z', 6)
BayesTest(X > 3, X + Y < 5)
BayesTest(Eq(X - Y, Z), Z > Y)
BayesTest(X > 3, X > 2)
def test_bernoulli():
p, a, b = symbols('p a b')
X = Bernoulli('B', p, a, b)
assert E(X) == a*p + b*(-p + 1)
assert density(X)[a] == p
assert density(X)[b] == 1 - p
X = Bernoulli('B', p, 1, 0)
assert E(X) == p
assert simplify(variance(X)) == p*(1 - p)
E(a*X + b) == a*E(X) + b
variance(a*X + b) == a**2 * variance(X)
def test_cdf():
D = Die('D', 6)
o = S.One
assert cdf(
D) == sympify({1: o/6, 2: o/3, 3: o/2, 4: 2*o/3, 5: 5*o/6, 6: o})
def test_coins():
C, D = Coin('C'), Coin('D')
H, T = symbols('H, T')
assert P(Eq(C, D)) == S.Half
assert density(Tuple(C, D)) == {(H, H): S.One/4, (H, T): S.One/4,
(T, H): S.One/4, (T, T): S.One/4}
assert dict(density(C).items()) == {H: S.Half, T: S.Half}
F = Coin('F', S.One/10)
assert P(Eq(F, H)) == S(1)/10
d = pspace(C).domain
assert d.as_boolean() == Or(Eq(C.symbol, H), Eq(C.symbol, T))
raises(ValueError, lambda: P(C > D)) # Can't intelligently compare H to T
def test_binomial_numeric():
nvals = range(5)
pvals = [0, S(1)/4, S.Half, S(3)/4, 1]
for n in nvals:
for p in pvals:
X = Binomial('X', n, p)
assert Eq(E(X), n*p)
assert Eq(variance(X), n*p*(1 - p))
if n > 0 and 0 < p < 1:
assert Eq(skewness(X), (1 - 2*p)/sqrt(n*p*(1 - p)))
for k in range(n + 1):
assert Eq(P(Eq(X, k)), binomial(n, k)*p**k*(1 - p)**(n - k))
@slow
def test_binomial_symbolic():
n = 10 # Because we're using for loops, can't do symbolic n
p = symbols('p', positive=True)
X = Binomial('X', n, p)
assert simplify(E(X)) == n*p == simplify(moment(X, 1))
assert simplify(variance(X)) == n*p*(1 - p) == simplify(cmoment(X, 2))
assert cancel((skewness(X) - (1-2*p)/sqrt(n*p*(1-p)))) == 0
# Test ability to change success/failure winnings
H, T = symbols('H T')
Y = Binomial('Y', n, p, succ=H, fail=T)
assert simplify(E(Y) - (n*(H*p + T*(1 - p)))) == 0
def test_hypergeometric_numeric():
for N in range(1, 5):
for m in range(0, N + 1):
for n in range(1, N + 1):
X = Hypergeometric('X', N, m, n)
N, m, n = map(sympify, (N, m, n))
assert sum(density(X).values()) == 1
assert E(X) == n * m / N
if N > 1:
assert variance(X) == n*(m/N)*(N - m)/N*(N - n)/(N - 1)
# Only test for skewness when defined
if N > 2 and 0 < m < N and n < N:
assert Eq(skewness(X), simplify((N - 2*m)*sqrt(N - 1)*(N - 2*n)
/ (sqrt(n*m*(N - m)*(N - n))*(N - 2))))
def test_FiniteRV():
F = FiniteRV('F', {1: S.Half, 2: S.One/4, 3: S.One/4})
assert dict(density(F).items()) == {S(1): S.Half, S(2): S.One/4, S(3): S.One/4}
assert P(F >= 2) == S.Half
assert pspace(F).domain.as_boolean() == Or(
*[Eq(F.symbol, i) for i in [1, 2, 3]])
def test_density_call():
x = Bernoulli('x', p)
d = density(x)
assert d(0) == 1 - p
assert d(S.Zero) == 1 - p
assert d(5) == 0
assert 0 in d
assert 5 not in d
assert d(S(0)) == d[S(0)]
| |
import salt.modules.win_powercfg as powercfg
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, call, patch
from tests.support.unit import TestCase
class PowerCfgTestCase(TestCase, LoaderModuleMockMixin):
"""
Validate the powercfg state
"""
def setup_loader_modules(self):
return {powercfg: {"__grains__": {"osrelease": 8}}}
query_output = """Subgroup GUID: 238c9fa8-0aad-41ed-83f4-97be242c8f20 (Hibernate)
GUID Alias: SUB_SLEEP
Power Setting GUID: 29f6c1db-86da-48c5-9fdb-f2b67b1f44da (Hibernate after)
GUID Alias: HIBERNATEIDLE
Minimum Possible Setting: 0x00000000
Maximum Possible Setting: 0xffffffff
Possible Settings increment: 0x00000001
Possible Settings units: Seconds
Current AC Power Setting Index: 0x00000708
Current DC Power Setting Index: 0x00000384"""
def test_set_monitor_timeout(self):
"""
Test to make sure we can set the monitor timeout value
"""
mock = MagicMock(return_value=0)
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
mock_retcode = MagicMock(return_value=0)
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
with patch.dict(powercfg.__salt__, {"cmd.retcode": mock_retcode}):
powercfg.set_monitor_timeout(0, "dc")
mock.assert_called_once_with(
"powercfg /getactivescheme", python_shell=False
)
mock_retcode.assert_called_once_with(
"powercfg /setdcvalueindex 381b4222-f694-41f0-9685-ff5bb260df2e"
" SUB_VIDEO VIDEOIDLE 0",
python_shell=False,
)
def test_set_disk_timeout(self):
"""
Test to make sure we can set the disk timeout value
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
mock_retcode = MagicMock(return_value=0)
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
with patch.dict(powercfg.__salt__, {"cmd.retcode": mock_retcode}):
powercfg.set_disk_timeout(0, "dc")
mock.assert_called_once_with(
"powercfg /getactivescheme", python_shell=False
)
mock_retcode.assert_called_once_with(
"powercfg /setdcvalueindex 381b4222-f694-41f0-9685-ff5bb260df2e"
" SUB_DISK DISKIDLE 0",
python_shell=False,
)
def test_set_standby_timeout(self):
"""
Test to make sure we can set the standby timeout value
"""
mock = MagicMock(return_value=0)
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
mock_retcode = MagicMock(return_value=0)
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
with patch.dict(powercfg.__salt__, {"cmd.retcode": mock_retcode}):
powercfg.set_standby_timeout(0, "dc")
mock.assert_called_once_with(
"powercfg /getactivescheme", python_shell=False
)
mock_retcode.assert_called_once_with(
"powercfg /setdcvalueindex 381b4222-f694-41f0-9685-ff5bb260df2e"
" SUB_SLEEP STANDBYIDLE 0",
python_shell=False,
)
def test_set_hibernate_timeout(self):
"""
Test to make sure we can set the hibernate timeout value
"""
mock = MagicMock(return_value=0)
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
mock_retcode = MagicMock(return_value=0)
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
with patch.dict(powercfg.__salt__, {"cmd.retcode": mock_retcode}):
powercfg.set_hibernate_timeout(0, "dc")
mock.assert_called_once_with(
"powercfg /getactivescheme", python_shell=False
)
mock_retcode.assert_called_once_with(
"powercfg /setdcvalueindex 381b4222-f694-41f0-9685-ff5bb260df2e"
" SUB_SLEEP HIBERNATEIDLE 0",
python_shell=False,
)
def test_get_monitor_timeout(self):
"""
Test to make sure we can get the monitor timeout value
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
ret = powercfg.get_monitor_timeout()
calls = [
call("powercfg /getactivescheme", python_shell=False),
call(
"powercfg /q 381b4222-f694-41f0-9685-ff5bb260df2e SUB_VIDEO"
" VIDEOIDLE",
python_shell=False,
),
]
mock.assert_has_calls(calls)
self.assertEqual({"ac": 30, "dc": 15}, ret)
def test_get_disk_timeout(self):
"""
Test to make sure we can get the disk timeout value
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
ret = powercfg.get_disk_timeout()
calls = [
call("powercfg /getactivescheme", python_shell=False),
call(
"powercfg /q 381b4222-f694-41f0-9685-ff5bb260df2e SUB_DISK"
" DISKIDLE",
python_shell=False,
),
]
mock.assert_has_calls(calls)
self.assertEqual({"ac": 30, "dc": 15}, ret)
def test_get_standby_timeout(self):
"""
Test to make sure we can get the standby timeout value
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
ret = powercfg.get_standby_timeout()
calls = [
call("powercfg /getactivescheme", python_shell=False),
call(
"powercfg /q 381b4222-f694-41f0-9685-ff5bb260df2e SUB_SLEEP"
" STANDBYIDLE",
python_shell=False,
),
]
mock.assert_has_calls(calls)
self.assertEqual({"ac": 30, "dc": 15}, ret)
def test_get_hibernate_timeout(self):
"""
Test to make sure we can get the hibernate timeout value
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
ret = powercfg.get_hibernate_timeout()
calls = [
call("powercfg /getactivescheme", python_shell=False),
call(
"powercfg /q 381b4222-f694-41f0-9685-ff5bb260df2e SUB_SLEEP"
" HIBERNATEIDLE",
python_shell=False,
),
]
mock.assert_has_calls(calls)
self.assertEqual({"ac": 30, "dc": 15}, ret)
def test_windows_7(self):
"""
Test to make sure we can get the hibernate timeout value on windows 7
"""
mock = MagicMock()
mock.side_effect = [
"Power Scheme GUID: 381b4222-f694-41f0-9685-ff5bb260df2e (Balanced)",
self.query_output,
]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
with patch.dict(powercfg.__grains__, {"osrelease": "7"}):
ret = powercfg.get_hibernate_timeout()
calls = [
call("powercfg /getactivescheme", python_shell=False),
call(
"powercfg /q 381b4222-f694-41f0-9685-ff5bb260df2e SUB_SLEEP",
python_shell=False,
),
]
mock.assert_has_calls(calls)
self.assertEqual({"ac": 30, "dc": 15}, ret)
def test_set_hibernate_timeout_scheme(self):
"""
Test to make sure we can set the hibernate timeout value
"""
mock = MagicMock(return_value=0)
mock.side_effect = [self.query_output]
with patch.dict(powercfg.__salt__, {"cmd.retcode": mock}):
powercfg.set_hibernate_timeout(0, "dc", scheme="SCHEME_MIN")
mock.assert_called_once_with(
"powercfg /setdcvalueindex SCHEME_MIN SUB_SLEEP HIBERNATEIDLE 0",
python_shell=False,
)
def test_get_hibernate_timeout_scheme(self):
"""
Test to make sure we can get the hibernate timeout value with a
specified scheme
"""
mock = MagicMock()
mock.side_effect = [self.query_output]
with patch.dict(powercfg.__salt__, {"cmd.run": mock}):
ret = powercfg.get_hibernate_timeout(scheme="SCHEME_MIN")
mock.assert_called_once_with(
"powercfg /q SCHEME_MIN SUB_SLEEP HIBERNATEIDLE", python_shell=False
)
self.assertEqual({"ac": 30, "dc": 15}, ret)
| |
#!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line msgpack-rpc client
#
# a usage example:
# % PYTHONPATH=. ./bin/rpc-cli \
# --peers=echo-server=localhost:9999,hoge=localhost:9998
# (Cmd) request echo-server echo ["hoge"]
# RESULT hoge
# (Cmd) request echo-server notify ["notify-method", ["param1","param2"]]
# RESULT notify-method
# (Cmd)
# NOTIFICATION from echo-server ['notify-method', ['param1', 'param2']]
# (Cmd)
import ryu.contrib
ryu.contrib.update_module_path()
from ryu import cfg
import cmd
import signal
import socket
import sys
import termios
from ryu.lib import rpc
CONF = cfg.CONF
CONF.register_cli_opts([
# eg. rpc-cli --peers=hoge=localhost:9998,fuga=localhost:9999
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(object):
def __init__(self, name, addr):
self._name = name
self._addr = addr
self.client = None
try:
self.connect()
except:
pass
def connect(self):
self.client = None
s = socket.create_connection(self._addr)
self.client = rpc.Client(s, notification_callback=self.notification)
def try_to_connect(self, verbose=False):
if self.client:
return
try:
self.connect()
assert self.client
except Exception as e:
if verbose:
print("connection failure %s" % e)
raise EOFError
def notification(self, n):
print("NOTIFICATION from %s %s" % (self._name, n))
def call(self, method, params):
return self._do(lambda: self.client.call(method, params))
def send_notification(self, method, params):
self._do(lambda: self.client.send_notification(method, params))
def _do(self, f):
def g():
try:
return f()
except EOFError:
self.client = None
raise
self.try_to_connect(verbose=True)
try:
return g()
except EOFError:
print("disconnected. trying to connect...")
self.try_to_connect(verbose=True)
print("connected. retrying the request...")
return g()
peers = {}
def add_peer(name, host, port):
peers[name] = Peer(name, (host, port))
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
self._notification_check_interval = 1 # worth to be configurable?
self._saved_termios = None
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split(None, 2)
try:
peer = args[0]
method = args[1]
params = eval(args[2])
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer %s" % peer)
return
try:
f(p, method, params)
except rpc.RPCError as e:
print("RPC ERROR %s" % e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_request(self, line):
"""request <peer> <method> <params>
send a msgpack-rpc request and print a response.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
result = p.call(method, params)
print("RESULT %s" % result)
self._request(line, f)
def do_notify(self, line):
"""notify <peer> <method> <params>
send a msgpack-rpc notification.
<params> is a python code snippet, it should be eval'ed to a list.
"""
def f(p, method, params):
p.send_notification(method, params)
self._request(line, f)
def complete_request(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def complete_notify(self, text, line, begidx, endidx):
return self._complete_peer(text, line, begidx, endidx)
def do_EOF(self, _line):
sys.exit(0)
def emptyline(self):
self._peek_notification()
def postcmd(self, _stop, _line):
self._peek_notification()
def _peek_notification(self):
for k, p in peers.items():
if p.client:
try:
p.client.peek_notification()
except EOFError:
p.client = None
print("disconnected %s" % k)
@staticmethod
def _save_termios():
return termios.tcgetattr(sys.stdin.fileno())
@staticmethod
def _restore_termios(t):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, t)
def preloop(self):
self._saved_termios = self._save_termios()
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(1)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def _timeout(self, _sig, _frame):
if not self._in_onecmd:
# restore terminal settings. (cooked/raw, ...)
# required for pypy at least.
# this doesn't seem to be needed for cpython readline
# module but i'm not sure if it's by spec or luck.
o = self._save_termios()
self._restore_termios(self._saved_termios)
self._peek_notification()
self._restore_termios(o)
signal.alarm(self._notification_check_interval)
def main(args=None, prog=None):
CONF(args=args, prog=prog, project='rpc-cli', version='rpc-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port = addr.rsplit(':', 1)
add_peer(name, host, port)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| |
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
import mock
from feaas import managers, storage as api_storage
from feaas.managers import ec2
class EC2ManagerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ["EC2_ACCESS_KEY"] = cls.access_key = "access"
os.environ["EC2_SECRET_KEY"] = cls.secret_key = "secret"
os.environ["AMI_ID"] = cls.ami_id = "ami-123"
os.environ["SUBNET_ID"] = cls.subnet_id = "subnet-123"
def setUp(self):
os.environ["EC2_ENDPOINT"] = "http://amazonaws.com"
@mock.patch("boto.ec2.EC2Connection")
@mock.patch("boto.ec2.RegionInfo")
def test_connection_http(self, region_mock, ec2_mock):
m = mock.Mock()
region_mock.return_value = m
os.environ["EC2_ENDPOINT"] = "http://amazonaws.com"
ec2_mock.return_value = "connection to ec2"
conn = ec2.EC2Manager(None).connection
self.assertEqual("connection to ec2", conn)
ec2_mock.assert_called_with(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
host="amazonaws.com", port=80,
path="/", is_secure=False,
region=m)
region_mock.assert_called_with(name="custom", endpoint="amazonaws.com")
@mock.patch("boto.ec2.EC2Connection")
@mock.patch("boto.ec2.RegionInfo")
def test_connection_https(self, region_mock, ec2_mock):
m = mock.Mock()
region_mock.return_value = m
os.environ["EC2_ENDPOINT"] = "https://amazonaws.com"
ec2_mock.return_value = "connection to ec2"
conn = ec2.EC2Manager(None).connection
self.assertEqual("connection to ec2", conn)
ec2_mock.assert_called_with(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
host="amazonaws.com", port=443,
path="/", is_secure=True,
region=m)
region_mock.assert_called_with(name="custom", endpoint="amazonaws.com")
@mock.patch("boto.ec2.EC2Connection")
@mock.patch("boto.ec2.RegionInfo")
def test_ec2_connection_http_custom_port(self, region_mock, ec2_mock):
m = mock.Mock()
region_mock.return_value = m
os.environ["EC2_ENDPOINT"] = "http://amazonaws.com:8080"
ec2_mock.return_value = "connection to ec2"
conn = ec2.EC2Manager(None).connection
self.assertEqual("connection to ec2", conn)
ec2_mock.assert_called_with(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
host="amazonaws.com", port=8080,
path="/", is_secure=False,
region=m)
region_mock.assert_called_with(name="custom", endpoint="amazonaws.com")
@mock.patch("boto.ec2.EC2Connection")
@mock.patch("boto.ec2.RegionInfo")
def test_ec2_connection_https_custom_port(self, region_mock, ec2_mock):
m = mock.Mock()
region_mock.return_value = m
os.environ["EC2_ENDPOINT"] = "https://amazonaws.com:8080"
ec2_mock.return_value = "connection to ec2"
conn = ec2.EC2Manager(None).connection
self.assertEqual("connection to ec2", conn)
ec2_mock.assert_called_with(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
host="amazonaws.com", port=8080,
path="/", is_secure=True,
region=m)
region_mock.assert_called_with(name="custom", endpoint="amazonaws.com")
@mock.patch("boto.ec2.EC2Connection")
@mock.patch("boto.ec2.RegionInfo")
def test_ec2_connection_custom_path(self, region_mock, ec2_mock):
m = mock.Mock()
region_mock.return_value = m
os.environ["EC2_ENDPOINT"] = "https://amazonaws.com:8080/something"
ec2_mock.return_value = "connection to ec2"
result = ec2.EC2Manager(None).connection
self.assertEqual("connection to ec2", result)
ec2_mock.assert_called_with(aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
host="amazonaws.com", port=8080,
path="/something", is_secure=True,
region=m)
region_mock.assert_called_with(name="custom", endpoint="amazonaws.com")
def test_start_instance(self):
instance = api_storage.Instance(name="myapp")
storage = mock.Mock()
storage.retrieve_instance.return_value = instance
manager = ec2.EC2Manager(storage)
manager._add_units = mock.Mock()
created_instance = manager.start_instance("myapp")
self.assertEqual(instance, created_instance)
manager._add_units.assert_called_with(instance, 1)
def test_start_instance_not_found(self):
storage = mock.Mock()
storage.retrieve_instance.side_effect = api_storage.InstanceNotFoundError()
manager = ec2.EC2Manager(storage)
with self.assertRaises(api_storage.InstanceNotFoundError):
manager.start_instance("myapp")
@mock.patch("uuid.uuid4")
def test_start_instance_ec2_default_userdata(self, uuid4):
uuid4.return_value = u"abacaxi"
os.environ["API_PACKAGES"] = "varnish vim-nox"
def recover():
del os.environ["API_PACKAGES"]
self.addCleanup(recover)
conn = mock.Mock()
conn.run_instances.return_value = self.get_fake_reservation(
instances=[{"id": "i-800", "dns_name": "abcd.amazonaws.com"}],
)
manager = ec2.EC2Manager(None)
manager._connection = conn
manager._run_unit()
user_data = """apt-get update
apt-get install -y varnish vim-nox
sed -i -e 's/-T localhost:6082/-T :6082/' /etc/default/varnish
sed -i -e 's/-a :6081/-a :8080/' /etc/default/varnish
echo abacaxi > /etc/varnish/secret
service varnish restart
cat > /etc/cron.hourly/dump_vcls <<'END'
{0}
END
chmod +x /etc/cron.hourly/dump_vcls
""".format(open(managers.DUMP_VCL_FILE).read())
conn.run_instances.assert_called_once_with(image_id=self.ami_id,
subnet_id=self.subnet_id,
user_data=user_data)
@mock.patch("httplib2.Http.request")
@mock.patch("uuid.uuid4")
def test_start_instance_ec2_custom_userdata(self, uuid4, request):
uuid4.return_value = u"abacaxi"
return_content = """apt-get update
apt-get install -y varnish vim-nox
sed -i -e 's/-T localhost:6082/-T :6082/' /etc/default/varnish
sed -i -e 's/-a :6081/-a :8080/' /etc/default/varnish
echo VARNISH_SECRET_KEY > /etc/varnish/secret
service varnish restart
cat > /etc/cron.hourly/dump_vcls <<'END'
{0}
END
chmod +x /etc/cron.hourly/dump_vcls
""".format(open(managers.DUMP_VCL_FILE).read())
request.return_value = (200, return_content)
os.environ["USER_DATA_URL"] = "http://localhost/custom_user_data_script"
def recover():
del os.environ["USER_DATA_URL"]
self.addCleanup(recover)
conn = mock.Mock()
conn.run_instances.return_value = self.get_fake_reservation(
instances=[{"id": "i-800", "dns_name": "abcd.amazonaws.com"}],
)
manager = ec2.EC2Manager(None)
manager._connection = conn
manager._run_unit()
user_data = """apt-get update
apt-get install -y varnish vim-nox
sed -i -e 's/-T localhost:6082/-T :6082/' /etc/default/varnish
sed -i -e 's/-a :6081/-a :8080/' /etc/default/varnish
echo abacaxi > /etc/varnish/secret
service varnish restart
cat > /etc/cron.hourly/dump_vcls <<'END'
{0}
END
chmod +x /etc/cron.hourly/dump_vcls
""".format(open(managers.DUMP_VCL_FILE).read())
conn.run_instances.assert_called_once_with(image_id=self.ami_id,
subnet_id=self.subnet_id,
user_data=user_data)
def test_remove_instance(self):
instance = api_storage.Instance(name="secret")
storage = mock.Mock()
storage.retrieve_instance.return_value = instance
manager = ec2.EC2Manager(storage)
manager.remove_instance("secret")
self.assertEqual("removed", instance.state)
storage.retrieve_instance.assert_called_with(name="secret")
storage.store_instance.assert_called_with(instance)
def test_remove_instance_not_found(self):
storage = mock.Mock()
storage.retrieve_instance.side_effect = api_storage.InstanceNotFoundError()
manager = ec2.EC2Manager(storage)
with self.assertRaises(api_storage.InstanceNotFoundError):
manager.remove_instance("secret")
def test_terminate_instance(self):
conn = mock.Mock()
storage = mock.Mock()
unit = api_storage.Unit(id="i-0800")
instance = api_storage.Instance(name="secret", units=[unit])
storage.retrieve_instance.return_value = instance
manager = ec2.EC2Manager(storage)
manager._connection = conn
got_instance = manager.terminate_instance("secret")
conn.terminate_instances.assert_called_with(instance_ids=["i-0800"])
storage.retrieve_instance.assert_called_with(name="secret")
self.assertEqual(instance, got_instance)
def test_terminate_instance_not_found(self):
storage = mock.Mock()
storage.retrieve_instance.side_effect = api_storage.InstanceNotFoundError()
manager = ec2.EC2Manager(storage)
with self.assertRaises(api_storage.InstanceNotFoundError):
manager.terminate_instance("secret")
@mock.patch("sys.stderr")
def test_terminate_instance_ec2_failure(self, stderr_mock):
conn = mock.Mock()
conn.terminate_instances.side_effect = ValueError("Something went wrong")
unit = api_storage.Unit(id="i-0800")
storage = mock.Mock()
storage.retrieve_instance.return_value = api_storage.Instance(name="secret",
units=[unit])
manager = ec2.EC2Manager(storage)
manager._connection = conn
manager.terminate_instance("someapp")
msg = "[ERROR] Failed to terminate EC2 instance: Something went wrong"
stderr_mock.write.assert_called_with(msg)
def test_physical_scale_add_units(self):
instance = api_storage.Instance(name="secret",
units=[api_storage.Unit(dns_name="secret.cloud.tsuru.io",
id="i-0800")])
fake_run_unit, fake_data = self.get_fake_run_unit()
storage = mock.Mock()
manager = ec2.EC2Manager(storage)
manager._run_unit = fake_run_unit
units = manager.physical_scale(instance, 4)
self.assertEqual(fake_data["calls"], 3)
instance.units.extend(fake_data["units"])
storage.store_instance.assert_called_with(instance)
self.assertEqual(fake_data["units"], units)
def test_physical_scale_remove_units(self):
unit1 = api_storage.Unit(dns_name="secret1.cloud.tsuru.io", id="i-0800")
unit2 = api_storage.Unit(dns_name="secret2.cloud.tsuru.io", id="i-0801")
unit3 = api_storage.Unit(dns_name="secret3.cloud.tsuru.io", id="i-0802")
units = [unit1, unit2, unit3]
instance = api_storage.Instance(name="secret", units=units)
storage = mock.Mock()
manager = ec2.EC2Manager(storage)
manager._terminate_unit = mock.Mock()
units = manager.physical_scale(instance, 1)
expected = [mock.call(unit1), mock.call(unit2)]
self.assertEqual(expected, manager._terminate_unit.call_args_list)
self.assertEqual([unit3], instance.units)
storage.store_instance.assert_called_with(instance)
self.assertEqual([unit1, unit2], units)
def get_fake_reservation(self, instances):
reservation = mock.Mock(instances=[])
for instance in instances:
reservation.instances.append(mock.Mock(**instance))
return reservation
def get_fake_run_unit(self):
fake_data = {"calls": 0, "units": []}
def fake_run_unit():
calls = fake_data["calls"] = fake_data["calls"] + 1
name = "i-080%d" % calls
unit = api_storage.Unit(id=name, dns_name="%s.domain.com" % name,
secret="%s-secret" % name)
fake_data["units"].append(unit)
return unit
return fake_run_unit, fake_data
| |
from unittest import TestCase
from protojson import pbliteserializer, alltypes_pb2
def _getExpectedDefaults():
expectedDefaults = [
None, # 0
0, # 1
1, # 2
0, # and so on
0,
0,
0,
0,
0,
0,
0,
1.5,
0,
0,
u'',
'moo',
[ # 16
None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 0],
None, # 17
[None, 0],
None,
None,
1,
None,
None,
None,
None,
None,
None,
None,
None,
None,
[], # 31
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[],
[], # 46
None, # 47
[], # 48
[], # 49
1, # 50
]
return expectedDefaults
class PbLiteSerializeTests(TestCase):
"""
Tests for L{pbliteserializer.PbLiteSerializer.serialize}.
"""
def test_defaults(self):
message = alltypes_pb2.TestAllTypes()
serializer = pbliteserializer.PbLiteSerializer()
ser = serializer.serialize(message)
self.assertEqual(_getExpectedDefaults(), ser)
def test_serializeDeserialize(self):
"""
This is a port of Closure Library's closure/goog/proto2/pbserializer_test.html
testSerializationAndDeserialization.
"""
message = alltypes_pb2.TestAllTypes()
# Set the fields.
# Singular.
message.optional_int32 = 101
message.optional_int64 = 102
message.optional_uint32 = 103
message.optional_uint64 = 104
message.optional_sint32 = 105
message.optional_sint64 = 106
message.optional_fixed32 = 107
message.optional_fixed64 = 108
message.optional_sfixed32 = 109
message.optional_sfixed64 = 110
message.optional_float = 111.5
message.optional_double = 112.5
message.optional_bool = True
message.optional_string = 'test'
message.optional_bytes = 'abcd'
# Note: setting OptionGroup.a is wrong and leads to disaster.
message.optionalgroup.a = 111
message.optional_nested_message.b = 112
message.optional_nested_enum = alltypes_pb2.TestAllTypes.FOO
# Repeated.
message.repeated_int32.append(201)
message.repeated_int32.append(202)
# Skip a few repeated fields so we can test how null array values are
# handled.
message.repeated_string.append('foo')
message.repeated_string.append('bar')
message.required_int32 = 1
# Serialize.
serializer = pbliteserializer.PbLiteSerializer()
pblite = serializer.serialize(message)
self.assertTrue(isinstance(pblite, list))
# Assert that everything serialized properly.
self.assertEqual(101, pblite[1])
self.assertEqual(102, pblite[2])
self.assertEqual(103, pblite[3])
self.assertEqual(104, pblite[4])
self.assertEqual(105, pblite[5])
self.assertEqual(106, pblite[6])
self.assertEqual(107, pblite[7])
self.assertEqual(108, pblite[8])
self.assertEqual(109, pblite[9])
self.assertEqual(110, pblite[10])
self.assertEqual(111.5, pblite[11])
self.assertEqual(112.5, pblite[12])
self.assertEqual(1, pblite[13]) # True is serialized as 1
self.assertEqual('test', pblite[14])
self.assertEqual('abcd', pblite[15])
self.assertEqual(111, pblite[16][17])
self.assertEqual(112, pblite[18][1])
self.assertEqual(None, pblite[19])
self.assertEqual(None, pblite[20])
self.assertEqual(alltypes_pb2.TestAllTypes.FOO, pblite[21])
self.assertEqual(201, pblite[31][0])
self.assertEqual(202, pblite[31][1])
self.assertEqual('foo', pblite[44][0])
self.assertEqual('bar', pblite[44][1])
self.assertEqual(1, pblite[50])
messageDecoded = alltypes_pb2.TestAllTypes()
serializer.deserialize(messageDecoded, pblite)
##print "\n\n", message
##print "\n\n", messageDecoded
self.assertEqual(
messageDecoded,
message,
"Messages do not match:\n" +
str(messageDecoded) + "\n!=\n\n" + str(message))
def test_deserializeSerializeRepeatedMessage(self):
"""
Deserializing a repeated Message works. When serialized, it
matches the original serialized data.
"""
serializer = pbliteserializer.PbLiteSerializer()
pblite = _getExpectedDefaults()
# Set the repeated_nested_message
pblite[48] = [[None, 100], [None, 200]]
messageDecoded = alltypes_pb2.TestAllTypes()
serializer.deserialize(messageDecoded, pblite)
pbliteReencoded = serializer.serialize(messageDecoded)
self.assertEqual([[None, 100], [None, 200]], pbliteReencoded[48])
def test_wrongTypeForData(self):
"""
If a non-indexable object is passed as the second argument to
L{PbLiteSerializer.deserialize}, it raises L{TypeError}.
"""
serializer = pbliteserializer.PbLiteSerializer()
for pblite in [None, 3, 4L, 0.5]:
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
TypeError,
lambda: serializer.deserialize(messageDecoded, pblite))
def test_wrongTypeForMessage(self):
"""
If a non-L{Message} is passed as the second argument to
L{PbLiteSerializer.deserialize}, it raises L{AttributeError}.
"""
serializer = pbliteserializer.PbLiteSerializer()
for messageDecoded in [None, 3, 4L, 0.5, {}, set()]:
self.assertRaises(
AttributeError,
lambda: serializer.deserialize(messageDecoded, []))
class PbLiteDeserializeTests(TestCase):
def setUp(self):
self.serializer = pbliteserializer.PbLiteSerializer()
def test_stringInsteadOfNumber(self):
"""
If an index which should contain an int64 field contains a string,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the optional_int64 to a string
pblite[2] = u'wrong-type'
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_numberTooBig(self):
"""
If an index which should contain an int64 field contains a big number
2**128, L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the optional_int64 to a big number
pblite[2] = 2**128
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_stringInRepeatedNumber(self):
"""
If an index which should contain a list of int64s contains a list of strings,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the repeated_int32
pblite[31] = [4, u'wrong-type', 5]
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_noneInsteadOfRepeatedNumber(self):
"""
If an index which should contain a list of int64s contains a None,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the repeated_int32
pblite[31] = None
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_noneInsteadOfRepeatedMessage(self):
"""
If an index which should contain a list of Messages (more lists)
contains a None, L{PbLiteSerializer.deserialize} raises
L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the repeated_nested_message
pblite[48] = None
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_stringInsteadOfBool(self):
"""
If an index which should contain a bool (or bool number) contains
a string, L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the optional_bool
pblite[13] = u'wrong-type'
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_stringInRepeatedBool(self):
"""
If an index which should contain a list of bools (or bool numbers)
contains a string, L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the repeated_bool
pblite[43] = [1, u'wrong-type', 0]
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_badEnumValue(self):
"""
If a serialized message has an invalid enum value,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the optional_nested_enum
pblite[21] = 99 # not a valid enum value
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_badRepeatedEnumValue(self):
"""
If a serialized message has an invalid repeated enum value,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the repeated_nested_enum
pblite[49] = [1, 2, 99, 3] # 99 is not a valid enum value; the others are
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_messageMissingAnIndex(self):
"""
If a serialized message is missing an index which it should have,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
pblite.pop()
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_messageExtraIndexOkay(self):
"""
If a serialized message has more indices than it should have,
L{PbLiteSerializer.deserialize} ignores it.
"""
pblite = _getExpectedDefaults()
pblite.append(u'extra-field')
messageDecoded = alltypes_pb2.TestAllTypes()
self.serializer.deserialize(messageDecoded, pblite)
def test_requiredFieldIsNone(self):
"""
If a serialized message has a C{None} for a required field,
L{PbLiteSerializer.deserialize} raises L{PbDecodeError}.
"""
pblite = _getExpectedDefaults()
# Set the required_int32
pblite[50] = None
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertRaises(
pbliteserializer.PbDecodeError,
lambda: self.serializer.deserialize(messageDecoded, pblite))
def test_optionalFieldWithDefaultIsNone(self):
"""
If a serialized message has a C{None} for a optional field with
a default, L{PbLiteSerializer.deserialize} ignores the None and
uses the default value.
"""
pblite = _getExpectedDefaults()
# Set the optional_int64
pblite[2] = None
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertFalse(messageDecoded.HasField("optional_int64"))
self.assertEqual(1, messageDecoded.optional_int64)
def test_optionalFieldWithoutDefaultIsNone(self):
"""
If a serialized message has a C{None} for a optional field without
a default, L{PbLiteSerializer.deserialize} ignores the None and
the decoded Message is missing the field.
"""
pblite = _getExpectedDefaults()
# Set the optional_int32
pblite[1] = None
messageDecoded = alltypes_pb2.TestAllTypes()
self.assertFalse(messageDecoded.HasField("optional_int32"))
self.assertEqual(0, messageDecoded.optional_int32)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import (
GetAppVersion, DeadlineExceededError, IsDevServer, logservice)
from branch_utility import BranchUtility
from caching_file_system import CachingFileSystem
from compiled_file_system import CompiledFileSystem
from empty_dir_file_system import EmptyDirFileSystem
from github_file_system import GithubFileSystem
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from subversion_file_system import SubversionFileSystem
import svn_constants
from third_party.json_schema_compiler.memoize import memoize
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstanceForChannel(self, channel):
return self._server_instance
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._channel = request.path.strip('/')
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemForBranchAndRevision(self, branch, revision):
return SubversionFileSystem.Create(branch, revision=revision)
def CreateAppSamplesFileSystem(self, object_store_creator):
# TODO(kalman): CachingFileSystem wrapper for GithubFileSystem, but it's
# not supported yet (see comment there).
return (EmptyDirFileSystem() if IsDevServer() else
GithubFileSystem.Create(object_store_creator))
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and when they do *and* then eventually try to
# flush logs they die. Turn off autoflush and manually do so at the end.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
channel = self._channel
logging.info('cron/%s: starting' % channel)
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
server_instance = self._GetSafeServerInstance()
def get_via_render_servlet(path):
return RenderServlet(
Request(path, self._request.host, self._request.headers),
_SingletonRenderServletDelegate(server_instance)).Get()
def run_cron_for_dir(d, path_prefix=''):
success = True
start_time = time.time()
files = [f for f in server_instance.content_cache.GetFromFileListing(d)
if not f.endswith('/')]
logging.info('cron/%s: rendering %s files from %s...' % (
channel, len(files), d))
try:
for i, f in enumerate(files):
error = None
path = '%s%s' % (path_prefix, f)
try:
response = get_via_render_servlet(path)
if response.status != 200:
error = 'Got %s response' % response.status
except DeadlineExceededError:
logging.error(
'cron/%s: deadline exceeded rendering %s (%s of %s): %s' % (
channel, path, i + 1, len(files), traceback.format_exc()))
raise
except error:
pass
if error:
logging.error('cron/%s: error rendering %s: %s' % (
channel, path, error))
success = False
finally:
logging.info('cron/%s: rendering %s files from %s took %s seconds' % (
channel, len(files), d, time.time() - start_time))
return success
success = True
try:
# Render all of the publicly accessible files.
cron_runs = [
# Note: rendering the public templates will pull in all of the private
# templates.
(svn_constants.PUBLIC_TEMPLATE_PATH, ''),
# Note: rendering the public templates will have pulled in the .js
# and manifest.json files (for listing examples on the API reference
# pages), but there are still images, CSS, etc.
(svn_constants.STATIC_PATH, 'static/'),
]
if not IsDevServer():
cron_runs.append(
(svn_constants.EXAMPLES_PATH, 'extensions/examples/'))
# Note: don't try to short circuit any of this stuff. We want to run
# the cron for all the directories regardless of intermediate
# failures.
for path, path_prefix in cron_runs:
success = run_cron_for_dir(path, path_prefix=path_prefix) and success
# TODO(kalman): Generic way for classes to request cron access. The next
# two special cases are ugly. It would potentially greatly speed up cron
# runs, too.
# Extension examples have zip files too. Well, so do apps, but the app
# file system doesn't get the Offline treatment so they don't need cron.
if not IsDevServer():
manifest_json = '/manifest.json'
example_zips = [
'%s.zip' % filename[:-len(manifest_json)]
for filename in server_instance.content_cache.GetFromFileListing(
svn_constants.EXAMPLES_PATH)
if filename.endswith(manifest_json)]
logging.info('cron/%s: rendering %s example zips...' % (
channel, len(example_zips)))
start_time = time.time()
try:
success = success and all(
get_via_render_servlet('extensions/examples/%s' % z).status == 200
for z in example_zips)
finally:
logging.info('cron/%s: rendering %s example zips took %s seconds' % (
channel, len(example_zips), time.time() - start_time))
# Also trigger a redirect so that PathCanonicalizer has an opportunity to
# cache file listings.
logging.info('cron/%s: triggering a redirect...' % channel)
redirect_response = get_via_render_servlet('storage.html')
success = success and redirect_response.status == 302
except DeadlineExceededError:
success = False
logging.info('cron/%s: finished' % channel)
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
channel = self._channel
delegate = self._delegate
server_instance_at_head = self._CreateServerInstance(channel, None)
get_branch_for_channel = self._GetBranchForChannel
class AppYamlHelperDelegate(AppYamlHelper.Delegate):
def GetHostFileSystemForRevision(self, revision):
return delegate.CreateHostFileSystemForBranchAndRevision(
get_branch_for_channel(channel),
revision)
app_yaml_handler = AppYamlHelper(
svn_constants.APP_YAML_PATH,
server_instance_at_head.host_file_system,
AppYamlHelperDelegate(),
server_instance_at_head.object_store_creator)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
# TODO(kalman): return a new ServerInstance at an explicit revision in
# case the HEAD version changes underneath us.
return server_instance_at_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
logging.info('cron/%s: app version %s is out of date, safe is %s' % (
channel, delegate.GetAppVersion(), safe_revision))
return self._CreateServerInstance(channel, safe_revision)
def _CreateObjectStoreCreator(self, channel):
return ObjectStoreCreator(channel, start_empty=True)
def _GetBranchForChannel(self, channel):
object_store_creator = self._CreateObjectStoreCreator(channel)
return (self._delegate.CreateBranchUtility(object_store_creator)
.GetBranchForChannel(channel))
def _CreateServerInstance(self, channel, revision):
object_store_creator = self._CreateObjectStoreCreator(channel)
host_file_system = CachingFileSystem(
self._delegate.CreateHostFileSystemForBranchAndRevision(
self._GetBranchForChannel(channel),
revision),
object_store_creator)
app_samples_file_system = self._delegate.CreateAppSamplesFileSystem(
object_store_creator)
compiled_host_fs_factory = CompiledFileSystem.Factory(
host_file_system,
object_store_creator)
return ServerInstance(channel,
object_store_creator,
host_file_system,
app_samples_file_system,
'/static' if channel == 'stable' else
'/%s/static' % channel,
compiled_host_fs_factory)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_utils import strutils
from six.moves.urllib import parse
from ironic.common import exception
from ironic.common.glance_service import service_utils as glance_service_utils
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import image_service as service
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(rameshg87): This file now registers some of opts in pxe group.
# This is acceptable for now as a future refactoring into
# separate boot and deploy interfaces is planned, and moving config
# options twice is not recommended. Hence we would move the parameters
# to the appropriate place in the final refactoring.
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help='Default file system format for ephemeral partition, '
'if one is created.'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='On the ironic-conductor node, directory where images are '
'stored on disk.'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='On the ironic-conductor node, directory where master '
'instance images are stored on disk.'),
cfg.IntOpt('image_cache_size',
default=20480,
help='Maximum size (in MiB) of cache for master images, '
'including those in use.'),
# 10080 here is 1 week - 60*24*7. It is entirely arbitrary in the absence
# of a facility to disable the ttl entirely.
cfg.IntOpt('image_cache_ttl',
default=10080,
help='Maximum TTL (in minutes) for old master images in '
'cache.'),
cfg.StrOpt('disk_devices',
default='cciss/c0d0,sda,hda,vda',
help='The disk devices to scan while doing the deploy.'),
]
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
@image_cache.cleanup(priority=50)
class InstanceImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(self.__class__, self).__init__(
CONF.pxe.instance_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
if not is_whole_disk_image:
if (i_info['image_source'] and
not glance_service_utils.is_glance_image(i_info['image_source'])):
i_info['kernel'] = info.get('kernel')
i_info['ramdisk'] = info.get('ramdisk')
i_info['root_gb'] = info.get('root_gb')
error_msg = _("Cannot validate iSCSI deploy. Some parameters were missing"
" in node's instance_info")
deploy_utils.check_for_missing_params(i_info, error_msg)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
err_msg_invalid = _("Cannot validate parameter for iSCSI deploy. "
"Invalid parameter %(param)s. Reason: %(reason)s")
for param in ('root_gb', 'swap_mb', 'ephemeral_gb'):
try:
int(i_info[param])
except ValueError:
reason = _("%s is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param,
'reason': reason})
if is_whole_disk_image:
if int(i_info['swap_mb']) > 0 or int(i_info['ephemeral_gb']) > 0:
err_msg_invalid = _("Cannot deploy whole disk image with "
"swap or ephemeral size set")
raise exception.InvalidParameterValue(err_msg_invalid)
return i_info
i_info['ephemeral_format'] = info.get('ephemeral_format')
i_info['configdrive'] = info.get('configdrive')
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = strutils.bool_from_string(
preserve_ephemeral, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': 'preserve_ephemeral', 'reason': e})
return i_info
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
i_info = parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = deploy_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. '
'Image size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the AMI and writes them to the appropriate place
on local disk.
:param ctx: context
:param node: an ironic node object
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
i_info = parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s",
{'ami': uuid, 'uuid': node.uuid})
deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)],
CONF.force_raw_images)
return (uuid, image_path)
def destroy_images(node_uuid):
"""Delete instance's image file.
:param node_uuid: the uuid of the ironic node.
"""
utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def get_deploy_info(node, **kwargs):
"""Returns the information required for doing iSCSI deploy in a dictionary.
:param node: ironic node object
:param kwargs: the keyword args passed from the conductor node.
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
deploy_key = kwargs.get('key')
i_info = parse_instance_info(node)
if i_info['deploy_key'] != deploy_key:
raise exception.InvalidParameterValue(_("Deploy key does not match"))
params = {
'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(node.uuid),
'node_uuid': node.uuid}
is_whole_disk_image = node.driver_internal_info['is_whole_disk_image']
if not is_whole_disk_image:
params.update({'root_mb': 1024 * int(i_info['root_gb']),
'swap_mb': int(i_info['swap_mb']),
'ephemeral_mb': 1024 * int(i_info['ephemeral_gb']),
'preserve_ephemeral': i_info['preserve_ephemeral'],
'boot_option': get_boot_option(node),
'boot_mode': _get_boot_mode(node)})
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
if is_whole_disk_image:
return params
# configdrive and ephemeral_format are nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
params['configdrive'] = i_info.get('configdrive')
return params
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:raises: InvalidState if the event is not allowed by the associated
state machine.
:returns: a dictionary containing the following keys:
For partition image:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
For whole disk image:
'disk identifier': ID of the disk to which image was deployed.
"""
node = task.node
params = get_deploy_info(node, **kwargs)
ramdisk_error = kwargs.get('error')
def _fail_deploy(task, msg):
"""Fail the deploy after logging and setting error states."""
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
destroy_images(task.node.uuid)
raise exception.InstanceDeployFailure(msg)
if ramdisk_error:
msg = _('Error returned from deploy ramdisk: %s') % ramdisk_error
_fail_deploy(task, msg)
# NOTE(lucasagomes): Let's make sure we don't log the full content
# of the config drive here because it can be up to 64MB in size,
# so instead let's log "***" in case config drive is enabled.
if LOG.isEnabledFor(logging.logging.DEBUG):
log_params = {
k: params[k] if k != 'configdrive' else '***'
for k in params.keys()
}
LOG.debug('Continuing deployment for node %(node)s, params %(params)s',
{'node': node.uuid, 'params': log_params})
uuid_dict_returned = {}
try:
if node.driver_internal_info['is_whole_disk_image']:
uuid_dict_returned = deploy_utils.deploy_disk_image(**params)
else:
uuid_dict_returned = deploy_utils.deploy_partition_image(**params)
except Exception as e:
msg = (_('Deploy failed for instance %(instance)s. '
'Error: %(error)s') %
{'instance': node.instance_uuid, 'error': e})
_fail_deploy(task, msg)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
if not root_uuid_or_disk_id:
msg = (_("Couldn't determine the UUID of the root "
"partition or the disk identifier after deploying "
"node %s") % node.uuid)
_fail_deploy(task, msg)
destroy_images(node.uuid)
return uuid_dict_returned
def do_agent_iscsi_deploy(task, agent_client):
"""Method invoked when deployed with the agent ramdisk.
This method is invoked by drivers for doing iSCSI deploy
using agent ramdisk. This method assumes that the agent
is booted up on the node and is heartbeating.
:param task: a TaskManager object containing the node.
:param agent_client: an instance of agent_client.AgentClient
which will be used during iscsi deploy (for exposing node's
target disk via iSCSI, for install boot loader, etc).
:returns: a dictionary containing the following keys:
For partition image:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
For whole disk image:
'disk identifier': ID of the disk to which image was deployed.
:raises: InstanceDeployFailure, if it encounters some error
during the deploy.
"""
node = task.node
iscsi_options = build_deploy_ramdisk_options(node)
iqn = iscsi_options['iscsi_target_iqn']
result = agent_client.start_iscsi_target(node, iqn)
if result['command_status'] == 'FAILED':
msg = (_("Failed to start the iSCSI target to deploy the "
"node %(node)s. Error: %(error)s") %
{'node': node.uuid, 'error': result['command_error']})
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(reason=msg)
address = parse.urlparse(node.driver_internal_info['agent_url'])
address = address.hostname
# TODO(lucasagomes): The 'error' and 'key' parameters in the
# dictionary below are just being passed because it's needed for
# the iscsi_deploy.continue_deploy() method, we are fooling it
# for now. The agent driver doesn't use/need those. So we need to
# refactor this bits here later.
iscsi_params = {'error': result['command_error'],
'iqn': iqn,
'key': iscsi_options['deployment_key'],
'address': address}
uuid_dict_returned = continue_deploy(task, **iscsi_params)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
# TODO(lucasagomes): Move this bit saving the root_uuid to
# iscsi_deploy.continue_deploy()
driver_internal_info = node.driver_internal_info
driver_internal_info['root_uuid_or_disk_id'] = root_uuid_or_disk_id
node.driver_internal_info = driver_internal_info
node.save()
return uuid_dict_returned
def get_boot_option(node):
"""Gets the boot option.
:param node: A single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dict or is malformed.
:returns: A string representing the boot option type. Defaults to
'netboot'.
"""
capabilities = deploy_utils.parse_instance_info_capabilities(node)
return capabilities.get('boot_option', 'netboot').lower()
def _get_boot_mode(node):
"""Gets the boot mode.
:param node: A single Node.
:returns: A string representing the boot mode type. Defaults to 'bios'.
"""
boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
if boot_mode:
return boot_mode
return "bios"
def build_deploy_ramdisk_options(node):
"""Build the ramdisk config options for a node
This method builds the ramdisk options for a node,
given all the required parameters for doing iscsi deploy.
:param node: a single Node.
:returns: A dictionary of options to be passed to ramdisk for performing
the deploy.
"""
# NOTE: we should strip '/' from the end because this is intended for
# hardcoded ramdisk script
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
deploy_key = utils.random_alnum(32)
i_info = node.instance_info
i_info['deploy_key'] = deploy_key
node.instance_info = i_info
node.save()
# XXX(jroll) DIB relies on boot_option=local to decide whether or not to
# lay down a bootloader. Hack this for now; fix it for real in Liberty.
# See also bug #1441556.
boot_option = get_boot_option(node)
if node.driver_internal_info.get('is_whole_disk_image'):
boot_option = 'netboot'
deploy_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'iscsi_target_iqn': "iqn-%s" % node.uuid,
'ironic_api_url': ironic_api,
'disk': CONF.pxe.disk_devices,
'boot_option': boot_option,
'boot_mode': _get_boot_mode(node),
# NOTE: The below entry is a temporary workaround for bug/1433812
'coreos.configdrive': 0,
}
root_device = deploy_utils.parse_root_device_hints(node)
if root_device:
deploy_options['root_device'] = root_device
return deploy_options
def validate_image_properties(ctx, deploy_info, properties):
"""Validate the image.
For Glance images it checks that the image exists in Glance and its
properties or deployment info contain the properties passed. If it's not a
Glance image, it checks that deployment info contains needed properties.
:param ctx: security context
:param deploy_info: the deploy_info to be validated
:param properties: the list of image meta-properties to be validated.
:raises: InvalidParameterValue if:
* connection to glance failed;
* authorization for accessing image failed;
* HEAD request to image URL failed or returned response code != 200;
* HEAD request response does not contain Content-Length header;
* the protocol specified in image URL is not supported.
:raises: MissingParameterValue if the image doesn't contain
the mentioned properties.
"""
image_href = deploy_info['image_source']
try:
img_service = service.get_image_service(image_href, context=ctx)
image_props = img_service.show(image_href)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_href)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s can not be found.") % image_href)
except exception.ImageRefValidationFailed as e:
raise exception.InvalidParameterValue(e)
missing_props = []
for prop in properties:
if not (deploy_info.get(prop) or image_props.get(prop)):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.MissingParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_href, 'properties': props})
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if the URL of the Ironic API service is not
configured in config file and is not accessible via Keystone
catalog.
:raises: MissingParameterValue if no ports are enrolled for the given node.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(_("Node %s does not have "
"any port associated with it.") % node.uuid)
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.KeystoneFailure,
exception.CatalogNotFound,
exception.KeystoneUnauthorized) as e:
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog. Keystone error: %s") % e)
# Validate the root device hints
deploy_utils.parse_root_device_hints(node)
def validate_pass_bootloader_info_input(task, input_params):
"""Validates the input sent with bootloader install info passthru.
This method validates the input sent with bootloader install info
passthru.
:param task: A TaskManager object.
:param input_params: A dictionary of params sent as input to passthru.
:raises: InvalidParameterValue, if deploy key passed doesn't match the
one stored in instance_info.
:raises: MissingParameterValue, if some input is missing.
"""
params = {'address': input_params.get('address'),
'key': input_params.get('key'),
'status': input_params.get('status')}
msg = _("Some mandatory input missing in 'pass_bootloader_info' "
"vendor passthru from ramdisk.")
deploy_utils.check_for_missing_params(params, msg)
deploy_key = task.node.instance_info['deploy_key']
if deploy_key != input_params.get('key'):
raise exception.InvalidParameterValue(
_("Deploy key %(key_sent)s does not match "
"with %(expected_key)s") %
{'key_sent': input_params.get('key'), 'expected_key': deploy_key})
def validate_bootloader_install_status(task, input_params):
"""Validate if bootloader was installed.
This method first validates if deploy key sent in vendor passthru
was correct one, and then validates whether bootloader installation
was successful or not.
:param task: A TaskManager object.
:param input_params: A dictionary of params sent as input to passthru.
:raises: InstanceDeployFailure, if bootloader installation was
reported from ramdisk as failure.
"""
node = task.node
if input_params['status'] != 'SUCCEEDED':
msg = (_('Failed to install bootloader on node %(node)s. '
'Error: %(error)s.') %
{'node': node.uuid, 'error': input_params.get('error')})
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(msg)
LOG.info(_LI('Bootloader successfully installed on node %s'), node.uuid)
def finish_deploy(task, address):
"""Notifies the ramdisk to reboot the node and makes the instance active.
This method notifies the ramdisk to proceed to reboot and then
makes the instance active.
:param task: a TaskManager object.
:param address: The IP address of the bare metal node.
:raises: InstanceDeployFailure, if notifying ramdisk failed.
"""
node = task.node
try:
deploy_utils.notify_ramdisk_to_proceed(address)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
msg = (_('Failed to notify ramdisk to reboot after bootloader '
'installation. Error: %s') % e)
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(msg)
# TODO(lucasagomes): When deploying a node with the DIB ramdisk
# Ironic will not power control the node at the end of the deployment,
# it's the DIB ramdisk that reboots the node. But, for the SSH driver
# some changes like setting the boot device only gets applied when the
# machine is powered off and on again. So the code below is enforcing
# it. For Liberty we need to change the DIB ramdisk so that Ironic
# always controls the power state of the node for all drivers.
if get_boot_option(node) == "local" and 'ssh' in node.driver:
manager_utils.node_power_action(task, states.REBOOT)
LOG.info(_LI('Deployment to node %s done'), node.uuid)
task.process_event('done')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/RiskAssessment) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class RiskAssessment(domainresource.DomainResource):
""" Potential outcomes for a subject with likelihood.
An assessment of the likely outcome(s) for a patient or other subject as
well as the likelihood of each outcome.
"""
resource_type = "RiskAssessment"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basedOn = None
""" Request fulfilled by this assessment.
Type `FHIRReference` (represented as `dict` in JSON). """
self.basis = None
""" Information used in assessment.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.code = None
""" Type of assessment.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.condition = None
""" Condition assessed.
Type `FHIRReference` (represented as `dict` in JSON). """
self.encounter = None
""" Where was assessment performed?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.identifier = None
""" Unique identifier for the assessment.
List of `Identifier` items (represented as `dict` in JSON). """
self.method = None
""" Evaluation mechanism.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.mitigation = None
""" How to reduce risk.
Type `str`. """
self.note = None
""" Comments on the risk assessment.
List of `Annotation` items (represented as `dict` in JSON). """
self.occurrenceDateTime = None
""" When was assessment made?.
Type `FHIRDate` (represented as `str` in JSON). """
self.occurrencePeriod = None
""" When was assessment made?.
Type `Period` (represented as `dict` in JSON). """
self.parent = None
""" Part of this occurrence.
Type `FHIRReference` (represented as `dict` in JSON). """
self.performer = None
""" Who did assessment?.
Type `FHIRReference` (represented as `dict` in JSON). """
self.prediction = None
""" Outcome predicted.
List of `RiskAssessmentPrediction` items (represented as `dict` in JSON). """
self.reasonCode = None
""" Why the assessment was necessary?.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Why the assessment was necessary?.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.status = None
""" registered | preliminary | final | amended +.
Type `str`. """
self.subject = None
""" Who/what does assessment apply to?.
Type `FHIRReference` (represented as `dict` in JSON). """
super(RiskAssessment, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskAssessment, self).elementProperties()
js.extend([
("basedOn", "basedOn", fhirreference.FHIRReference, False, None, False),
("basis", "basis", fhirreference.FHIRReference, True, None, False),
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("condition", "condition", fhirreference.FHIRReference, False, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("method", "method", codeableconcept.CodeableConcept, False, None, False),
("mitigation", "mitigation", str, False, None, False),
("note", "note", annotation.Annotation, True, None, False),
("occurrenceDateTime", "occurrenceDateTime", fhirdate.FHIRDate, False, "occurrence", False),
("occurrencePeriod", "occurrencePeriod", period.Period, False, "occurrence", False),
("parent", "parent", fhirreference.FHIRReference, False, None, False),
("performer", "performer", fhirreference.FHIRReference, False, None, False),
("prediction", "prediction", RiskAssessmentPrediction, True, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
])
return js
from . import backboneelement
class RiskAssessmentPrediction(backboneelement.BackboneElement):
""" Outcome predicted.
Describes the expected outcome for the subject.
"""
resource_type = "RiskAssessmentPrediction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.outcome = None
""" Possible outcome for the subject.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.probabilityDecimal = None
""" Likelihood of specified outcome.
Type `float`. """
self.probabilityRange = None
""" Likelihood of specified outcome.
Type `Range` (represented as `dict` in JSON). """
self.qualitativeRisk = None
""" Likelihood of specified outcome as a qualitative value.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.rationale = None
""" Explanation of prediction.
Type `str`. """
self.relativeRisk = None
""" Relative likelihood.
Type `float`. """
self.whenPeriod = None
""" Timeframe or age range.
Type `Period` (represented as `dict` in JSON). """
self.whenRange = None
""" Timeframe or age range.
Type `Range` (represented as `dict` in JSON). """
super(RiskAssessmentPrediction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(RiskAssessmentPrediction, self).elementProperties()
js.extend([
("outcome", "outcome", codeableconcept.CodeableConcept, False, None, False),
("probabilityDecimal", "probabilityDecimal", float, False, "probability", False),
("probabilityRange", "probabilityRange", range.Range, False, "probability", False),
("qualitativeRisk", "qualitativeRisk", codeableconcept.CodeableConcept, False, None, False),
("rationale", "rationale", str, False, None, False),
("relativeRisk", "relativeRisk", float, False, None, False),
("whenPeriod", "whenPeriod", period.Period, False, "when", False),
("whenRange", "whenRange", range.Range, False, "when", False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
| |
# -*- coding: utf-8 -*-
import weakref
from ..Qt import QtCore, QtGui
from .Container import *
from .DockDrop import *
from .Dock import Dock
from .. import debug as debug
from ..python2_3 import basestring
class DockArea(Container, QtGui.QWidget, DockDrop):
def __init__(self, temporary=False, home=None):
Container.__init__(self, self)
QtGui.QWidget.__init__(self)
DockDrop.__init__(self, allowedAreas=['left', 'right', 'top', 'bottom'])
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
self.docks = weakref.WeakValueDictionary()
self.topContainer = None
self.raiseOverlay()
self.temporary = temporary
self.tempAreas = []
self.home = home
def type(self):
return "top"
def addDock(self, dock=None, position='bottom', relativeTo=None, **kwds):
"""Adds a dock to this area.
============== =================================================================
**Arguments:**
dock The new Dock object to add. If None, then a new Dock will be
created.
position 'bottom', 'top', 'left', 'right', 'above', or 'below'
relativeTo If relativeTo is None, then the new Dock is added to fill an
entire edge of the window. If relativeTo is another Dock, then
the new Dock is placed adjacent to it (or in a tabbed
configuration for 'above' and 'below').
============== =================================================================
All extra keyword arguments are passed to Dock.__init__() if *dock* is
None.
"""
if dock is None:
dock = Dock(**kwds)
## Determine the container to insert this dock into.
## If there is no neighbor, then the container is the top.
if relativeTo is None or relativeTo is self:
if self.topContainer is None:
container = self
neighbor = None
else:
container = self.topContainer
neighbor = None
else:
if isinstance(relativeTo, basestring):
relativeTo = self.docks[relativeTo]
container = self.getContainer(relativeTo)
if container is None:
raise TypeError("Dock %s is not contained in a DockArea; cannot add another dock relative to it." % relativeTo)
neighbor = relativeTo
## what container type do we need?
neededContainer = {
'bottom': 'vertical',
'top': 'vertical',
'left': 'horizontal',
'right': 'horizontal',
'above': 'tab',
'below': 'tab'
}[position]
## Can't insert new containers into a tab container; insert outside instead.
if neededContainer != container.type() and container.type() == 'tab':
neighbor = container
container = container.container()
## Decide if the container we have is suitable.
## If not, insert a new container inside.
if neededContainer != container.type():
if neighbor is None:
container = self.addContainer(neededContainer, self.topContainer)
else:
container = self.addContainer(neededContainer, neighbor)
## Insert the new dock before/after its neighbor
insertPos = {
'bottom': 'after',
'top': 'before',
'left': 'before',
'right': 'after',
'above': 'before',
'below': 'after'
}[position]
#print "request insert", dock, insertPos, neighbor
old = dock.container()
container.insert(dock, insertPos, neighbor)
self.docks[dock.name()] = dock
if old is not None:
old.apoptose()
return dock
def moveDock(self, dock, position, neighbor):
"""
Move an existing Dock to a new location.
"""
## Moving to the edge of a tabbed dock causes a drop outside the tab box
if position in ['left', 'right', 'top', 'bottom'] and neighbor is not None and neighbor.container() is not None and neighbor.container().type() == 'tab':
neighbor = neighbor.container()
self.addDock(dock, position, neighbor)
def getContainer(self, obj):
if obj is None:
return self
return obj.container()
def makeContainer(self, typ):
if typ == 'vertical':
new = VContainer(self)
elif typ == 'horizontal':
new = HContainer(self)
elif typ == 'tab':
new = TContainer(self)
return new
def addContainer(self, typ, obj):
"""Add a new container around obj"""
new = self.makeContainer(typ)
container = self.getContainer(obj)
container.insert(new, 'before', obj)
#print "Add container:", new, " -> ", container
if obj is not None:
new.insert(obj)
self.raiseOverlay()
return new
def insert(self, new, pos=None, neighbor=None):
if self.topContainer is not None:
# Adding new top-level container; addContainer() should
# take care of giving the old top container a new home.
self.topContainer.containerChanged(None)
self.layout.addWidget(new)
new.containerChanged(self)
self.topContainer = new
self.raiseOverlay()
def count(self):
if self.topContainer is None:
return 0
return 1
def resizeEvent(self, ev):
self.resizeOverlay(self.size())
def addTempArea(self):
if self.home is None:
area = DockArea(temporary=True, home=self)
self.tempAreas.append(area)
win = TempAreaWindow(area)
area.win = win
win.show()
else:
area = self.home.addTempArea()
#print "added temp area", area, area.window()
return area
def floatDock(self, dock):
"""Removes *dock* from this DockArea and places it in a new window."""
area = self.addTempArea()
area.win.resize(dock.size())
area.moveDock(dock, 'top', None)
def removeTempArea(self, area):
self.tempAreas.remove(area)
#print "close window", area.window()
area.window().close()
def saveState(self):
"""
Return a serialized (storable) representation of the state of
all Docks in this DockArea."""
if self.topContainer is None:
main = None
else:
main = self.childState(self.topContainer)
state = {'main': main, 'float': []}
for a in self.tempAreas:
geo = a.win.geometry()
geo = (geo.x(), geo.y(), geo.width(), geo.height())
state['float'].append((a.saveState(), geo))
return state
def childState(self, obj):
if isinstance(obj, Dock):
return ('dock', obj.name(), {})
else:
childs = []
for i in range(obj.count()):
childs.append(self.childState(obj.widget(i)))
return (obj.type(), childs, obj.saveState())
def restoreState(self, state, missing='error', extra='bottom'):
"""
Restore Dock configuration as generated by saveState.
This function does not create any Docks--it will only
restore the arrangement of an existing set of Docks.
By default, docks that are described in *state* but do not exist
in the dock area will cause an exception to be raised. This behavior
can be changed by setting *missing* to 'ignore' or 'create'.
Extra docks that are in the dockarea but that are not mentioned in
*state* will be added to the bottom of the dockarea, unless otherwise
specified by the *extra* argument.
"""
## 1) make dict of all docks and list of existing containers
containers, docks = self.findAll()
oldTemps = self.tempAreas[:]
#print "found docks:", docks
## 2) create container structure, move docks into new containers
if state['main'] is not None:
self.buildFromState(state['main'], docks, self, missing=missing)
## 3) create floating areas, populate
for s in state['float']:
a = self.addTempArea()
a.buildFromState(s[0]['main'], docks, a, missing=missing)
a.win.setGeometry(*s[1])
a.apoptose() # ask temp area to close itself if it is empty
## 4) Add any remaining docks to a float
for d in docks.values():
if extra == 'float':
a = self.addTempArea()
a.addDock(d, 'below')
else:
self.moveDock(d, extra, None)
#print "\nKill old containers:"
## 5) kill old containers
for c in containers:
c.close()
for a in oldTemps:
a.apoptose()
def buildFromState(self, state, docks, root, depth=0, missing='error'):
typ, contents, state = state
pfx = " " * depth
if typ == 'dock':
try:
obj = docks[contents]
del docks[contents]
except KeyError:
if missing == 'error':
raise Exception('Cannot restore dock state; no dock with name "%s"' % contents)
elif missing == 'create':
obj = Dock(name=contents)
elif missing == 'ignore':
return
else:
raise ValueError('"missing" argument must be one of "error", "create", or "ignore".')
else:
obj = self.makeContainer(typ)
root.insert(obj, 'after')
#print pfx+"Add:", obj, " -> ", root
if typ != 'dock':
for o in contents:
self.buildFromState(o, docks, obj, depth+1, missing=missing)
# remove this container if possible. (there are valid situations when a restore will
# generate empty containers, such as when using missing='ignore')
obj.apoptose(propagate=False)
obj.restoreState(state) ## this has to be done later?
def findAll(self, obj=None, c=None, d=None):
if obj is None:
obj = self.topContainer
## check all temp areas first
if c is None:
c = []
d = {}
for a in self.tempAreas:
c1, d1 = a.findAll()
c.extend(c1)
d.update(d1)
if isinstance(obj, Dock):
d[obj.name()] = obj
elif obj is not None:
c.append(obj)
for i in range(obj.count()):
o2 = obj.widget(i)
c2, d2 = self.findAll(o2)
c.extend(c2)
d.update(d2)
return (c, d)
def apoptose(self, propagate=True):
# remove top container if possible, close this area if it is temporary.
#print "apoptose area:", self.temporary, self.topContainer, self.topContainer.count()
if self.topContainer is None or self.topContainer.count() == 0:
self.topContainer = None
if self.temporary:
self.home.removeTempArea(self)
#self.close()
def clear(self):
docks = self.findAll()[1]
for dock in docks.values():
dock.close()
## PySide bug: We need to explicitly redefine these methods
## or else drag/drop events will not be delivered.
def dragEnterEvent(self, *args):
DockDrop.dragEnterEvent(self, *args)
def dragMoveEvent(self, *args):
DockDrop.dragMoveEvent(self, *args)
def dragLeaveEvent(self, *args):
DockDrop.dragLeaveEvent(self, *args)
def dropEvent(self, *args):
DockDrop.dropEvent(self, *args)
def printState(self, state=None, name='Main'):
# for debugging
if state is None:
state = self.saveState()
print("=== %s dock area ===" % name)
if state['main'] is None:
print(" (empty)")
else:
self._printAreaState(state['main'])
for i, float in enumerate(state['float']):
self.printState(float[0], name='float %d' % i)
def _printAreaState(self, area, indent=0):
if area[0] == 'dock':
print(" " * indent + area[0] + " " + str(area[1:]))
return
else:
print(" " * indent + area[0])
for ch in area[1]:
self._printAreaState(ch, indent+1)
class TempAreaWindow(QtGui.QWidget):
def __init__(self, area, **kwargs):
QtGui.QWidget.__init__(self, **kwargs)
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.dockarea = area
self.layout.addWidget(area)
def closeEvent(self, *args):
self.dockarea.clear()
QtGui.QWidget.closeEvent(self, *args)
| |
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.activation import lstm
from chainer.functions.array import reshape
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.connection import n_step_rnn
from chainer.utils import argument
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class NStepLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='uni', rnn_mode='lstm')
class NStepBiLSTM(n_step_rnn.BaseNStepRNN):
def __init__(self, n_layers, states, lengths):
n_step_rnn.BaseNStepRNN.__init__(
self, n_layers, states, lengths,
rnn_dir='bi', rnn_mode='lstm')
def n_step_lstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_lstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Uni-directional Long Short-Term Memory function.
This function calculates stacked Uni-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i_t &= \\sigma(W_0 x_t + W_4 h_{t-1} + b_0 + b_4) \\\\
f_t &= \\sigma(W_1 x_t + W_5 h_{t-1} + b_1 + b_5) \\\\
o_t &= \\sigma(W_2 x_t + W_6 h_{t-1} + b_2 + b_6) \\\\
a_t &= \\tanh(W_3 x_t + W_7 h_{t-1} + b_3 + b_7) \\\\
c_t &= f_t \\cdot c_{t-1} + i_t \\cdot a_t \\\\
h_t &= o_t \\cdot \\tanh(c_t)
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`8S` weigth matrices and :math:`8S` bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shaped as
they are multiplied with input variables, where ``I`` is the size
of the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)`` where ``N`` is the dimension
of the hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. note::
The dimension of hidden units is limited to only one size ``N``. If you
want to use variable dimension of hidden units, please use
:class:`chainer.functions.lstm`.
.. seealso::
:func:`chainer.functions.lstm`
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> w_in = lambda i, j: in_size if i == 0 and j < 4 else out_size
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... ws.append([np.ones((out_size, w_in(n, i))).astype(np.float32) \
for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[1][0].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_lstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(2, 3, 2)
>>> cy.shape
(2, 3, 2)
>>> [y.shape for y in ys]
[(3, 2), (2, 2), (1, 2)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=False, **kwargs)
def n_step_bilstm(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, **kwargs):
"""n_step_bilstm(n_layers, dropout_ratio, hx, cx, ws, bs, xs)
Stacked Bi-directional Long Short-Term Memory function.
This function calculates stacked Bi-directional LSTM with sequences.
This function gets an initial hidden state :math:`h_0`, an initial cell
state :math:`c_0`, an input sequence :math:`x`, weight matrices :math:`W`,
and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
i^{f}_t &=& \\sigma(W^{f}_0 x_t + W^{f}_4 h_{t-1} + b^{f}_0 + b^{f}_4),
\\\\
f^{f}_t &=& \\sigma(W^{f}_1 x_t + W^{f}_5 h_{t-1} + b^{f}_1 + b^{f}_5),
\\\\
o^{f}_t &=& \\sigma(W^{f}_2 x_t + W^{f}_6 h_{t-1} + b^{f}_2 + b^{f}_6),
\\\\
a^{f}_t &=& \\tanh(W^{f}_3 x_t + W^{f}_7 h_{t-1} + b^{f}_3 + b^{f}_7),
\\\\
c^{f}_t &=& f^{f}_t \\cdot c^{f}_{t-1} + i^{f}_t \\cdot a^{f}_t,
\\\\
h^{f}_t &=& o^{f}_t \\cdot \\tanh(c^{f}_t),
\\\\
i^{b}_t &=& \\sigma(W^{b}_0 x_t + W^{b}_4 h_{t-1} + b^{b}_0 + b^{b}_4),
\\\\
f^{b}_t &=& \\sigma(W^{b}_1 x_t + W^{b}_5 h_{t-1} + b^{b}_1 + b^{b}_5),
\\\\
o^{b}_t &=& \\sigma(W^{b}_2 x_t + W^{b}_6 h_{t-1} + b^{b}_2 + b^{b}_6),
\\\\
a^{b}_t &=& \\tanh(W^{b}_3 x_t + W^{b}_7 h_{t-1} + b^{b}_3 + b^{b}_7),
\\\\
c^{b}_t &=& f^{b}_t \\cdot c^{b}_{t-1} + i^{b}_t \\cdot a^{b}_t, \\\\
h^{b}_t &=& o^{b}_t \\cdot \\tanh(c^{b}_t), \\\\
h_t &=& [h^{f}_t; h^{b}_t]
where :math:`W^{f}` is the weight matrices for forward-LSTM, :math:`W^{b}`
is weight matrices for backward-LSTM.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Eight weight matrices and eight bias vectors are
required for each layer of each direction. So, when :math:`S` layers
exist, you need to prepare :math:`16S` weigth matrices and :math:`16S`
bias vectors.
If the number of layers ``n_layers`` is greater than :math:`1`, the input
of the ``k``-th layer is the hidden state ``h_t`` of the ``k-1``-th layer.
Note that all input variables except the first layer may have different
shape from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(2S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units. Because of bi-direction, the
first dimension length is ``2S``.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[2 * l + m]`` represents the weights for the l-th layer of
the m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.) Each ``ws[i]`` is a
list containing eight matrices. ``ws[i][j]`` corresponds to
:math:`W_j` in the equation. ``ws[0][j]`` and ``ws[1][j]`` where
``0 <= j < 4`` are ``(I, N)``-shaped because they are multiplied
with input variables, where ``I`` is the size of the input.
``ws[i][j]`` where ``2 <= i`` and ``0 <= j < 4`` are
``(N, 2N)``-shaped because they are multiplied with two hidden
layers :math:`h_t = [h^{f}_t; h^{b}_t]`. All other matrices are
``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[2 * l + m]`` represents the weights for the l-th layer of
m-th direction. (``m == 0`` means the forward direction and
``m == 1`` means the backward direction.)
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, 2N)`` where ``B_t``
is the mini-batch size for time ``t``, and ``N`` is size of
hidden units. Note that ``B_t`` is the same value as ``xs[t]``.
.. admonition:: Example
>>> batchs = [3, 2, 1] # support variable length sequences
>>> in_size, out_size, n_layers = 3, 2, 2
>>> dropout_ratio = 0.0
>>> xs = [np.ones((b, in_size)).astype(np.float32) for b in batchs]
>>> [x.shape for x in xs]
[(3, 3), (2, 3), (1, 3)]
>>> h_shape = (n_layers * 2, batchs[0], out_size)
>>> hx = np.ones(h_shape).astype(np.float32)
>>> cx = np.ones(h_shape).astype(np.float32)
>>> def w_in(i, j):
... if i == 0 and j < 4:
... return in_size
... elif i > 0 and j < 4:
... return out_size * 2
... else:
... return out_size
...
>>> ws = []
>>> bs = []
>>> for n in range(n_layers):
... for direction in (0, 1):
... ws.append([np.ones((out_size, w_in(n, i))).\
astype(np.float32) for i in range(8)])
... bs.append([np.ones((out_size,)).astype(np.float32) \
for _ in range(8)])
...
>>> ws[0][0].shape # ws[0:2][:4].shape are (out_size, in_size)
(2, 3)
>>> ws[2][0].shape # ws[2:][:4].shape are (out_size, 2 * out_size)
(2, 4)
>>> ws[0][4].shape # others are (out_size, out_size)
(2, 2)
>>> bs[0][0].shape
(2,)
>>> hy, cy, ys = F.n_step_bilstm(
... n_layers, dropout_ratio, hx, cx, ws, bs, xs)
>>> hy.shape
(4, 3, 2)
>>> cy.shape
(4, 3, 2)
>>> [y.shape for y in ys]
[(3, 4), (2, 4), (1, 4)]
"""
return n_step_lstm_base(n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction=True, **kwargs)
def n_step_lstm_base(
n_layers, dropout_ratio, hx, cx, ws, bs, xs, use_bi_direction,
**kwargs):
"""Base function for Stack LSTM/BiLSTM functions.
This function is used at :func:`chainer.functions.n_step_lstm` and
:func:`chainer.functions.n_step_bilstm`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
Args:
n_layers(int): The number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is the number of layers and
is equal to ``n_layers``, ``B`` is the mini-batch size, and ``N``
is the dimension of the hidden units.
cx (~chainer.Variable): Variable holding stacked cell states.
It has the same shape as ``hx``.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents the weights for the i-th layer.
Each ``ws[i]`` is a list containing eight matrices.
``ws[i][j]`` corresponds to :math:`W_j` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 4`` are ``(I, N)``-shape as they
are multiplied with input variables, where ``I`` is the size of
the input and ``N`` is the dimension of the hidden units. All
other matrices are ``(N, N)``-shaped.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represents the biases for the i-th layer.
Each ``bs[i]`` is a list containing eight vectors.
``bs[i][j]`` corresponds to :math:`b_j` in the equation.
The shape of each matrix is ``(N,)``.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is the
mini-batch size for time ``t``. The sequences must be transposed.
:func:`~chainer.functions.transpose_sequence` can be used to
transpose a list of :class:`~chainer.Variable`\\ s each
representing a sequence.
When sequences has different lengths, they must be
sorted in descending order of their lengths before transposing.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
use_bi_direction (bool): If ``True``, this function uses Bi-directional
LSTM.
Returns:
tuple: This function returns a tuple containing three elements,
``hy``, ``cy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is the same as
``hx``.
- ``cy`` is an updated cell states whose shape is the same as
``cx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
the mini-batch size for time ``t``. Note that ``B_t`` is the same
value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_lstm`
:func:`chainer.functions.n_step_bilstm`
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
xp = cuda.get_array_module(hx, hx.data)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
handle = cudnn.get_handle()
states = cuda.get_cudnn_dropout_states()
cudnn.set_dropout_descriptor(states._desc, handle, dropout_ratio)
lengths = [len(x) for x in xs]
xs = chainer.functions.concat(xs, axis=0)
w = n_step_rnn.cudnn_rnn_weight_concat(
n_layers, states, use_bi_direction, 'lstm', ws, bs)
if use_bi_direction:
rnn = NStepBiLSTM
else:
rnn = NStepLSTM
hy, cy, ys = rnn(n_layers, states, lengths)(hx, cx, w, xs)
sections = numpy.cumsum(lengths[:-1])
ys = chainer.functions.split_axis(ys, sections, 0)
return hy, cy, ys
else:
return n_step_rnn.n_step_rnn_impl(
_lstm, n_layers, dropout_ratio, hx, cx, ws, bs, xs,
use_bi_direction)
def _lstm(x, h, c, w, b):
xw = _stack_weight([w[2], w[0], w[1], w[3]])
hw = _stack_weight([w[6], w[4], w[5], w[7]])
xb = _stack_weight([b[2], b[0], b[1], b[3]])
hb = _stack_weight([b[6], b[4], b[5], b[7]])
lstm_in = linear.linear(x, xw, xb) + linear.linear(h, hw, hb)
c_bar, h_bar = lstm.lstm(c, lstm_in)
return h_bar, c_bar
| |
from __future__ import unicode_literals
import boto.kinesis
from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException
import sure # noqa
from moto import mock_kinesis
@mock_kinesis
def test_create_cluster():
conn = boto.kinesis.connect_to_region("us-west-2")
conn.create_stream("my_stream", 2)
stream_response = conn.describe_stream("my_stream")
stream = stream_response["StreamDescription"]
stream["StreamName"].should.equal("my_stream")
stream["HasMoreShards"].should.equal(False)
stream["StreamARN"].should.equal("arn:aws:kinesis:us-west-2:123456789012:my_stream")
stream["StreamStatus"].should.equal("ACTIVE")
shards = stream['Shards']
shards.should.have.length_of(2)
@mock_kinesis
def test_describe_non_existant_stream():
conn = boto.kinesis.connect_to_region("us-east-1")
conn.describe_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException)
@mock_kinesis
def test_list_and_delete_stream():
conn = boto.kinesis.connect_to_region("us-west-2")
conn.create_stream("stream1", 1)
conn.create_stream("stream2", 1)
conn.list_streams()['StreamNames'].should.have.length_of(2)
conn.delete_stream("stream2")
conn.list_streams()['StreamNames'].should.have.length_of(1)
# Delete invalid id
conn.delete_stream.when.called_with("not-a-stream").should.throw(ResourceNotFoundException)
@mock_kinesis
def test_basic_shard_iterator():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
response['Records'].should.equal([])
@mock_kinesis
def test_get_invalid_shard_iterator():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
conn.get_shard_iterator.when.called_with(stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException)
@mock_kinesis
def test_put_records():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
data = "hello world"
partition_key = "1234"
conn.put_record(stream_name, data, partition_key)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
shard_iterator = response['NextShardIterator']
response['Records'].should.have.length_of(1)
record = response['Records'][0]
record["Data"].should.equal("hello world")
record["PartitionKey"].should.equal("1234")
record["SequenceNumber"].should.equal("1")
@mock_kinesis
def test_get_records_limit():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
data = "hello world"
for index in range(5):
conn.put_record(stream_name, data, index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Retrieve only 3 records
response = conn.get_records(shard_iterator, limit=3)
response['Records'].should.have.length_of(3)
# Then get the rest of the results
next_shard_iterator = response['NextShardIterator']
response = conn.get_records(next_shard_iterator)
response['Records'].should.have.length_of(2)
@mock_kinesis
def test_get_records_at_sequence_number():
# AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by a specific sequence number.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting at that id
response = conn.get_shard_iterator(stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id)
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
# And the first result returned should be the second item
response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id)
response['Records'][0]['Data'].should.equal('2')
@mock_kinesis
def test_get_records_after_sequence_number():
# AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a specific sequence number.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting after that id
response = conn.get_shard_iterator(stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id)
shard_iterator = response['ShardIterator']
response = conn.get_records(shard_iterator)
# And the first result returned should be the third item
response['Records'][0]['Data'].should.equal('3')
@mock_kinesis
def test_get_records_latest():
# LATEST - Start reading just after the most recent record in the shard, so that you always read the most recent data in the shard.
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
# Create some data
for index in range(1, 5):
conn.put_record(stream_name, str(index), index)
# Get a shard iterator
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON')
shard_iterator = response['ShardIterator']
# Get the second record
response = conn.get_records(shard_iterator, limit=2)
second_sequence_id = response['Records'][1]['SequenceNumber']
# Then get a new iterator starting after that id
response = conn.get_shard_iterator(stream_name, shard_id, 'LATEST', second_sequence_id)
shard_iterator = response['ShardIterator']
# Write some more data
conn.put_record(stream_name, "last_record", "last_record")
response = conn.get_records(shard_iterator)
# And the only result returned should be the new item
response['Records'].should.have.length_of(1)
response['Records'][0]['PartitionKey'].should.equal('last_record')
response['Records'][0]['Data'].should.equal('last_record')
@mock_kinesis
def test_invalid_shard_iterator_type():
conn = boto.kinesis.connect_to_region("us-west-2")
stream_name = "my_stream"
conn.create_stream(stream_name, 1)
response = conn.describe_stream(stream_name)
shard_id = response['StreamDescription']['Shards'][0]['ShardId']
response = conn.get_shard_iterator.when.called_with(
stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException)
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/prediction_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow_serving.apis import classification_pb2 as tensorflow__serving_dot_apis_dot_classification__pb2
from tensorflow_serving.apis import get_model_metadata_pb2 as tensorflow__serving_dot_apis_dot_get__model__metadata__pb2
from tensorflow_serving.apis import inference_pb2 as tensorflow__serving_dot_apis_dot_inference__pb2
from tensorflow_serving.apis import predict_pb2 as tensorflow__serving_dot_apis_dot_predict__pb2
from tensorflow_serving.apis import regression_pb2 as tensorflow__serving_dot_apis_dot_regression__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/prediction_service.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n0tensorflow_serving/apis/prediction_service.proto\x12\x12tensorflow.serving\x1a,tensorflow_serving/apis/classification.proto\x1a\x30tensorflow_serving/apis/get_model_metadata.proto\x1a\'tensorflow_serving/apis/inference.proto\x1a%tensorflow_serving/apis/predict.proto\x1a(tensorflow_serving/apis/regression.proto2\xfc\x03\n\x11PredictionService\x12\x61\n\x08\x43lassify\x12).tensorflow.serving.ClassificationRequest\x1a*.tensorflow.serving.ClassificationResponse\x12X\n\x07Regress\x12%.tensorflow.serving.RegressionRequest\x1a&.tensorflow.serving.RegressionResponse\x12R\n\x07Predict\x12\".tensorflow.serving.PredictRequest\x1a#.tensorflow.serving.PredictResponse\x12g\n\x0eMultiInference\x12).tensorflow.serving.MultiInferenceRequest\x1a*.tensorflow.serving.MultiInferenceResponse\x12m\n\x10GetModelMetadata\x12+.tensorflow.serving.GetModelMetadataRequest\x1a,.tensorflow.serving.GetModelMetadataResponseB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow__serving_dot_apis_dot_classification__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_inference__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_predict__pb2.DESCRIPTOR,tensorflow__serving_dot_apis_dot_regression__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class PredictionServiceStub(object):
"""open source marker; do not remove
PredictionService provides access to machine-learned models loaded by
model_servers.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Classify = channel.unary_unary(
'/tensorflow.serving.PredictionService/Classify',
request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString,
)
self.Regress = channel.unary_unary(
'/tensorflow.serving.PredictionService/Regress',
request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString,
)
self.Predict = channel.unary_unary(
'/tensorflow.serving.PredictionService/Predict',
request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString,
)
self.MultiInference = channel.unary_unary(
'/tensorflow.serving.PredictionService/MultiInference',
request_serializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.FromString,
)
self.GetModelMetadata = channel.unary_unary(
'/tensorflow.serving.PredictionService/GetModelMetadata',
request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString,
response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString,
)
class PredictionServiceServicer(object):
"""open source marker; do not remove
PredictionService provides access to machine-learned models loaded by
model_servers.
"""
def Classify(self, request, context):
"""Classify.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Regress(self, request, context):
"""Regress.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MultiInference(self, request, context):
"""MultiInference API for multi-headed models.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModelMetadata(self, request, context):
"""GetModelMetadata - provides access to metadata for loaded models.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PredictionServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Classify': grpc.unary_unary_rpc_method_handler(
servicer.Classify,
request_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.SerializeToString,
),
'Regress': grpc.unary_unary_rpc_method_handler(
servicer.Regress,
request_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.SerializeToString,
),
'Predict': grpc.unary_unary_rpc_method_handler(
servicer.Predict,
request_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.SerializeToString,
),
'MultiInference': grpc.unary_unary_rpc_method_handler(
servicer.MultiInference,
request_deserializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.SerializeToString,
),
'GetModelMetadata': grpc.unary_unary_rpc_method_handler(
servicer.GetModelMetadata,
request_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.FromString,
response_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tensorflow.serving.PredictionService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaPredictionServiceServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""open source marker; do not remove
PredictionService provides access to machine-learned models loaded by
model_servers.
"""
def Classify(self, request, context):
"""Classify.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Regress(self, request, context):
"""Regress.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def MultiInference(self, request, context):
"""MultiInference API for multi-headed models.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetModelMetadata(self, request, context):
"""GetModelMetadata - provides access to metadata for loaded models.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaPredictionServiceStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""open source marker; do not remove
PredictionService provides access to machine-learned models loaded by
model_servers.
"""
def Classify(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Classify.
"""
raise NotImplementedError()
Classify.future = None
def Regress(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Regress.
"""
raise NotImplementedError()
Regress.future = None
def Predict(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Predict -- provides access to loaded TensorFlow model.
"""
raise NotImplementedError()
Predict.future = None
def MultiInference(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""MultiInference API for multi-headed models.
"""
raise NotImplementedError()
MultiInference.future = None
def GetModelMetadata(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""GetModelMetadata - provides access to metadata for loaded models.
"""
raise NotImplementedError()
GetModelMetadata.future = None
def beta_create_PredictionService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('tensorflow.serving.PredictionService', 'Classify'): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.FromString,
('tensorflow.serving.PredictionService', 'GetModelMetadata'): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.FromString,
('tensorflow.serving.PredictionService', 'MultiInference'): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.FromString,
('tensorflow.serving.PredictionService', 'Predict'): tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.FromString,
('tensorflow.serving.PredictionService', 'Regress'): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.FromString,
}
response_serializers = {
('tensorflow.serving.PredictionService', 'Classify'): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.SerializeToString,
('tensorflow.serving.PredictionService', 'GetModelMetadata'): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.SerializeToString,
('tensorflow.serving.PredictionService', 'MultiInference'): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.SerializeToString,
('tensorflow.serving.PredictionService', 'Predict'): tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.SerializeToString,
('tensorflow.serving.PredictionService', 'Regress'): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.SerializeToString,
}
method_implementations = {
('tensorflow.serving.PredictionService', 'Classify'): face_utilities.unary_unary_inline(servicer.Classify),
('tensorflow.serving.PredictionService', 'GetModelMetadata'): face_utilities.unary_unary_inline(servicer.GetModelMetadata),
('tensorflow.serving.PredictionService', 'MultiInference'): face_utilities.unary_unary_inline(servicer.MultiInference),
('tensorflow.serving.PredictionService', 'Predict'): face_utilities.unary_unary_inline(servicer.Predict),
('tensorflow.serving.PredictionService', 'Regress'): face_utilities.unary_unary_inline(servicer.Regress),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_PredictionService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('tensorflow.serving.PredictionService', 'Classify'): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString,
('tensorflow.serving.PredictionService', 'GetModelMetadata'): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString,
('tensorflow.serving.PredictionService', 'MultiInference'): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceRequest.SerializeToString,
('tensorflow.serving.PredictionService', 'Predict'): tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString,
('tensorflow.serving.PredictionService', 'Regress'): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString,
}
response_deserializers = {
('tensorflow.serving.PredictionService', 'Classify'): tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString,
('tensorflow.serving.PredictionService', 'GetModelMetadata'): tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString,
('tensorflow.serving.PredictionService', 'MultiInference'): tensorflow__serving_dot_apis_dot_inference__pb2.MultiInferenceResponse.FromString,
('tensorflow.serving.PredictionService', 'Predict'): tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString,
('tensorflow.serving.PredictionService', 'Regress'): tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString,
}
cardinalities = {
'Classify': cardinality.Cardinality.UNARY_UNARY,
'GetModelMetadata': cardinality.Cardinality.UNARY_UNARY,
'MultiInference': cardinality.Cardinality.UNARY_UNARY,
'Predict': cardinality.Cardinality.UNARY_UNARY,
'Regress': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'tensorflow.serving.PredictionService', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| |
import sys
import os
import gc
import threading
import numpy as np
from numpy.testing import assert_equal, assert_, assert_allclose
from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix,
bsr_matrix, dia_matrix)
from scipy.sparse.sputils import supported_dtypes, matrix
from scipy._lib._testutils import check_free_memory
import pytest
from pytest import raises as assert_raises
def test_exception():
assert_raises(MemoryError, _sparsetools.test_throw_error)
def test_threads():
# Smoke test for parallel threaded execution; doesn't actually
# check that code runs in parallel, but just that it produces
# expected results.
nthreads = 10
niter = 100
n = 20
a = csr_matrix(np.ones([n, n]))
bres = []
class Worker(threading.Thread):
def run(self):
b = a.copy()
for j in range(niter):
_sparsetools.csr_plus_csr(n, n,
a.indptr, a.indices, a.data,
a.indptr, a.indices, a.data,
b.indptr, b.indices, b.data)
bres.append(b)
threads = [Worker() for _ in range(nthreads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for b in bres:
assert_(np.all(b.toarray() == 2))
def test_regression_std_vector_dtypes():
# Regression test for gh-3780, checking the std::vector typemaps
# in sparsetools.cxx are complete.
for dtype in supported_dtypes:
ad = matrix([[1, 2], [3, 4]]).astype(dtype)
a = csr_matrix(ad, dtype=dtype)
# getcol is one function using std::vector typemaps, and should not fail
assert_equal(a.getcol(0).todense(), ad[:,0])
@pytest.mark.slow
def test_nnz_overflow():
# Regression test for gh-7230 / gh-7871, checking that coo_todense
# with nnz > int32max doesn't overflow.
nnz = np.iinfo(np.int32).max + 1
# Ensure ~20 GB of RAM is free to run this test.
check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5)
# Use nnz duplicate entries to keep the dense version small.
row = np.zeros(nnz, dtype=np.int32)
col = np.zeros(nnz, dtype=np.int32)
data = np.zeros(nnz, dtype=np.int8)
data[-1] = 4
s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False)
# Sums nnz duplicates to produce a 1x1 array containing 4.
d = s.toarray()
assert_allclose(d, [[4]])
@pytest.mark.skipif(not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8),
reason="test requires 64-bit Linux")
class TestInt32Overflow(object):
"""
Some of the sparsetools routines use dense 2D matrices whose
total size is not bounded by the nnz of the sparse matrix. These
routines used to suffer from int32 wraparounds; here, we try to
check that the wraparounds don't occur any more.
"""
# choose n large enough
n = 50000
def setup_method(self):
assert self.n**2 > np.iinfo(np.int32).max
# check there's enough memory even if everything is run at the
# same time
try:
parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1'))
except ValueError:
parallel_count = np.inf
check_free_memory(3000 * parallel_count)
def teardown_method(self):
gc.collect()
def test_coo_todense(self):
# Check *_todense routines (cf. gh-2179)
#
# All of them in the end call coo_matrix.todense
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
r = m.todense()
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
@pytest.mark.slow
def test_matvecs(self):
# Check *_matvecs routines
n = self.n
i = np.array([0, n-1])
j = np.array([0, n-1])
data = np.array([1, 2], dtype=np.int8)
m = coo_matrix((data, (i, j)))
b = np.ones((n, n), dtype=np.int8)
for sptype in (csr_matrix, csc_matrix, bsr_matrix):
m2 = sptype(m)
r = m2.dot(b)
assert_equal(r[0,0], 1)
assert_equal(r[-1,-1], 2)
del r
gc.collect()
del b
gc.collect()
@pytest.mark.slow
def test_dia_matvec(self):
# Check: huge dia_matrix _matvec
n = self.n
data = np.ones((n, n), dtype=np.int8)
offsets = np.arange(n)
m = dia_matrix((data, offsets), shape=(n, n))
v = np.ones(m.shape[1], dtype=np.int8)
r = m.dot(v)
assert_equal(r[0], np.int8(n))
del data, offsets, m, v, r
gc.collect()
_bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow),
pytest.param("matvecs", marks=pytest.mark.xslow),
"matvec",
"diagonal",
"sort_indices",
pytest.param("transpose", marks=pytest.mark.xslow)]
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_1_block(self, op):
# Check: huge bsr_matrix (1-block)
#
# The point here is that indices inside a block may overflow.
def get_matrix():
n = self.n
data = np.ones((1, n, n), dtype=np.int8)
indptr = np.array([0, 1], dtype=np.int32)
indices = np.array([0], dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
@pytest.mark.slow
@pytest.mark.parametrize("op", _bsr_ops)
def test_bsr_n_block(self, op):
# Check: huge bsr_matrix (n-block)
#
# The point here is that while indices within a block don't
# overflow, accumulators across many block may.
def get_matrix():
n = self.n
data = np.ones((n, n, 1), dtype=np.int8)
indptr = np.array([0, n], dtype=np.int32)
indices = np.arange(n, dtype=np.int32)
m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False)
del data, indptr, indices
return m
gc.collect()
try:
getattr(self, "_check_bsr_" + op)(get_matrix)
finally:
gc.collect()
def _check_bsr_matvecs(self, m):
m = m()
n = self.n
# _matvecs
r = m.dot(np.ones((n, 2), dtype=np.int8))
assert_equal(r[0,0], np.int8(n))
def _check_bsr_matvec(self, m):
m = m()
n = self.n
# _matvec
r = m.dot(np.ones((n,), dtype=np.int8))
assert_equal(r[0], np.int8(n))
def _check_bsr_diagonal(self, m):
m = m()
n = self.n
# _diagonal
r = m.diagonal()
assert_equal(r, np.ones(n))
def _check_bsr_sort_indices(self, m):
# _sort_indices
m = m()
m.sort_indices()
def _check_bsr_transpose(self, m):
# _transpose
m = m()
m.transpose()
def _check_bsr_matmat(self, m):
m = m()
n = self.n
# _bsr_matmat
m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2))
m.dot(m2) # shouldn't SIGSEGV
del m2
# _bsr_matmat
m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0]))
m2.dot(m) # shouldn't SIGSEGV
@pytest.mark.skip(reason="64-bit indices in sparse matrices not available")
def test_csr_matmat_int64_overflow():
n = 3037000500
assert n**2 > np.iinfo(np.int64).max
# the test would take crazy amounts of memory
check_free_memory(n * (8*2 + 1) * 3 / 1e6)
# int64 overflow
data = np.ones((n,), dtype=np.int8)
indptr = np.arange(n+1, dtype=np.int64)
indices = np.zeros(n, dtype=np.int64)
a = csr_matrix((data, indices, indptr))
b = a.T
assert_raises(RuntimeError, a.dot, b)
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = "(%r, %r)" % (a_dtype, b_dtype)
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
def test_endianness():
d = np.ones((3,4))
offsets = [-1,0,1]
a = dia_matrix((d.astype('<f8'), offsets), (4, 4))
b = dia_matrix((d.astype('>f8'), offsets), (4, 4))
v = np.arange(4)
assert_allclose(a.dot(v), [1, 3, 6, 5])
assert_allclose(b.dot(v), [1, 3, 6, 5])
| |
#!/usr/bin/python
#
# Copyright Google 2007-2008, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import gdata
import gdata.service
import gdata.spreadsheet
import gdata.spreadsheet.service
import gdata.docs
import gdata.docs.service
"""Make the Google Documents API feel more like using a database.
This module contains a client and other classes which make working with the
Google Documents List Data API and the Google Spreadsheets Data API look a
bit more like working with a heirarchical database. Using the DatabaseClient,
you can create or find spreadsheets and use them like a database, with
worksheets representing tables and rows representing records.
Example Usage:
# Create a new database, a new table, and add records.
client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com',
password='12345')
database = client.CreateDatabase('My Text Database')
table = database.CreateTable('addresses', ['name','email',
'phonenumber', 'mailingaddress'])
record = table.AddRecord({'name':'Bob', 'email':'bob@example.com',
'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'})
# Edit a record
record.content['email'] = 'bob2@example.com'
record.Push()
# Delete a table
table.Delete
Warnings:
Care should be exercised when using this module on spreadsheets
which contain formulas. This module treats all rows as containing text and
updating a row will overwrite any formula with the output of the formula.
The intended use case is to allow easy storage of text data in a spreadsheet.
Error: Domain specific extension of Exception.
BadCredentials: Error raised is username or password was incorrect.
CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge
was issued.
DatabaseClient: Communicates with Google Docs APIs servers.
Database: Represents a spreadsheet and interacts with tables.
Table: Represents a worksheet and interacts with records.
RecordResultSet: A list of records in a table.
Record: Represents a row in a worksheet allows manipulation of text data.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
class Error(Exception):
pass
class BadCredentials(Error):
pass
class CaptchaRequired(Error):
pass
class DatabaseClient(object):
"""Allows creation and finding of Google Spreadsheets databases.
The DatabaseClient simplifies the process of creating and finding Google
Spreadsheets and will talk to both the Google Spreadsheets API and the
Google Documents List API.
"""
def __init__(self, username=None, password=None):
"""Constructor for a Database Client.
If the username and password are present, the constructor will contact
the Google servers to authenticate.
Args:
username: str (optional) Example: jo@example.com
password: str (optional)
"""
self.__docs_client = gdata.docs.service.DocsService()
self.__spreadsheets_client = (
gdata.spreadsheet.service.SpreadsheetsService())
self.SetCredentials(username, password)
def SetCredentials(self, username, password):
"""Attempts to log in to Google APIs using the provided credentials.
If the username or password are None, the client will not request auth
tokens.
Args:
username: str (optional) Example: jo@example.com
password: str (optional)
"""
self.__docs_client.email = username
self.__docs_client.password = password
self.__spreadsheets_client.email = username
self.__spreadsheets_client.password = password
if username and password:
try:
self.__docs_client.ProgrammaticLogin()
self.__spreadsheets_client.ProgrammaticLogin()
except gdata.service.CaptchaRequired:
raise CaptchaRequired('Please visit https://www.google.com/accounts/'
'DisplayUnlockCaptcha to unlock your account.')
except gdata.service.BadAuthentication:
raise BadCredentials('Username or password incorrect.')
def CreateDatabase(self, name):
"""Creates a new Google Spreadsheet with the desired name.
Args:
name: str The title for the spreadsheet.
Returns:
A Database instance representing the new spreadsheet.
"""
# Create a Google Spreadsheet to form the foundation of this database.
# Spreadsheet is created by uploading a file to the Google Documents
# List API.
virtual_csv_file = io.StringIO(',,,')
virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3)
db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name)
return Database(spreadsheet_entry=db_entry, database_client=self)
def GetDatabases(self, spreadsheet_key=None, name=None):
"""Finds spreadsheets which have the unique key or title.
If querying on the spreadsheet_key there will be at most one result, but
searching by name could yield multiple results.
Args:
spreadsheet_key: str The unique key for the spreadsheet, this
usually in the the form 'pk23...We' or 'o23...423.12,,,3'.
name: str The title of the spreadsheets.
Returns:
A list of Database objects representing the desired spreadsheets.
"""
if spreadsheet_key:
db_entry = self.__docs_client.GetDocumentListEntry(
r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key)
return [Database(spreadsheet_entry=db_entry, database_client=self)]
else:
title_query = gdata.docs.service.DocumentQuery()
title_query['title'] = name
db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri())
matching_databases = []
for entry in db_feed.entry:
matching_databases.append(Database(spreadsheet_entry=entry,
database_client=self))
return matching_databases
def _GetDocsClient(self):
return self.__docs_client
def _GetSpreadsheetsClient(self):
return self.__spreadsheets_client
class Database(object):
"""Provides interface to find and create tables.
The database represents a Google Spreadsheet.
"""
def __init__(self, spreadsheet_entry=None, database_client=None):
"""Constructor for a database object.
Args:
spreadsheet_entry: gdata.docs.DocumentListEntry The
Atom entry which represents the Google Spreadsheet. The
spreadsheet's key is extracted from the entry and stored as a
member.
database_client: DatabaseClient A client which can talk to the
Google Spreadsheets servers to perform operations on worksheets
within this spreadsheet.
"""
self.entry = spreadsheet_entry
if self.entry:
id_parts = spreadsheet_entry.id.text.split('/')
self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '')
self.client = database_client
def CreateTable(self, name, fields=None):
"""Add a new worksheet to this spreadsheet and fill in column names.
Args:
name: str The title of the new worksheet.
fields: list of strings The column names which are placed in the
first row of this worksheet. These names are converted into XML
tags by the server. To avoid changes during the translation
process I recommend using all lowercase alphabetic names. For
example ['somelongname', 'theothername']
Returns:
Table representing the newly created worksheet.
"""
worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name,
row_count=1, col_count=len(fields), key=self.spreadsheet_key)
return Table(name=name, worksheet_entry=worksheet,
database_client=self.client,
spreadsheet_key=self.spreadsheet_key, fields=fields)
def GetTables(self, worksheet_id=None, name=None):
"""Searches for a worksheet with the specified ID or name.
The list of results should have one table at most, or no results
if the id or name were not found.
Args:
worksheet_id: str The ID of the worksheet, example: 'od6'
name: str The title of the worksheet.
Returns:
A list of length 0 or 1 containing the desired Table. A list is returned
to make this method feel like GetDatabases and GetRecords.
"""
if worksheet_id:
worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, wksht_id=worksheet_id)
return [Table(name=worksheet_entry.title.text,
worksheet_entry=worksheet_entry, database_client=self.client,
spreadsheet_key=self.spreadsheet_key)]
else:
matching_tables = []
query = None
if name:
query = gdata.spreadsheet.service.DocumentQuery()
query.title = name
worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, query=query)
for entry in worksheet_feed.entry:
matching_tables.append(Table(name=entry.title.text,
worksheet_entry=entry, database_client=self.client,
spreadsheet_key=self.spreadsheet_key))
return matching_tables
def Delete(self):
"""Deletes the entire database spreadsheet from Google Spreadsheets."""
entry = self.client._GetDocsClient().Get(
r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' +
self.spreadsheet_key)
self.client._GetDocsClient().Delete(entry.GetEditLink().href)
class Table(object):
def __init__(self, name=None, worksheet_entry=None, database_client=None,
spreadsheet_key=None, fields=None):
self.name = name
self.entry = worksheet_entry
id_parts = worksheet_entry.id.text.split('/')
self.worksheet_id = id_parts[-1]
self.spreadsheet_key = spreadsheet_key
self.client = database_client
self.fields = fields or []
if fields:
self.SetFields(fields)
def LookupFields(self):
"""Queries to find the column names in the first row of the worksheet.
Useful when you have retrieved the table from the server and you don't
know the column names.
"""
if self.entry:
first_row_contents = []
query = gdata.spreadsheet.service.CellQuery()
query.max_row = '1'
query.min_row = '1'
feed = self.client._GetSpreadsheetsClient().GetCellsFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=query)
for entry in feed.entry:
first_row_contents.append(entry.content.text)
# Get the next set of cells if needed.
next_link = feed.GetNextLink()
while next_link:
feed = self.client._GetSpreadsheetsClient().Get(next_link.href,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString)
for entry in feed.entry:
first_row_contents.append(entry.content.text)
next_link = feed.GetNextLink()
# Convert the contents of the cells to valid headers.
self.fields = ConvertStringsToColumnHeaders(first_row_contents)
def SetFields(self, fields):
"""Changes the contents of the cells in the first row of this worksheet.
Args:
fields: list of strings The names in the list comprise the
first row of the worksheet. These names are converted into XML
tags by the server. To avoid changes during the translation
process I recommend using all lowercase alphabetic names. For
example ['somelongname', 'theothername']
"""
# TODO: If the table already had fields, we might want to clear out the,
# current column headers.
self.fields = fields
i = 0
for column_name in fields:
i = i + 1
# TODO: speed this up by using a batch request to update cells.
self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name,
self.spreadsheet_key, self.worksheet_id)
def Delete(self):
"""Deletes this worksheet from the spreadsheet."""
worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id)
self.client._GetSpreadsheetsClient().DeleteWorksheet(
worksheet_entry=worksheet)
def AddRecord(self, data):
"""Adds a new row to this worksheet.
Args:
data: dict of strings Mapping of string values to column names.
Returns:
Record which represents this row of the spreadsheet.
"""
new_row = self.client._GetSpreadsheetsClient().InsertRow(data,
self.spreadsheet_key, wksht_id=self.worksheet_id)
return Record(content=data, row_entry=new_row,
spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id,
database_client=self.client)
def GetRecord(self, row_id=None, row_number=None):
"""Gets a single record from the worksheet based on row ID or number.
Args:
row_id: The ID for the individual row.
row_number: str or int The position of the desired row. Numbering
begins at 1, which refers to the second row in the worksheet since
the first row is used for column names.
Returns:
Record for the desired row.
"""
if row_id:
row_entry = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id)
return Record(content=None, row_entry=row_entry,
spreadsheet_key=self.spreadsheet_key,
worksheet_id=self.worksheet_id, database_client=self.client)
else:
row_query = gdata.spreadsheet.service.ListQuery()
row_query.start_index = str(row_number)
row_query.max_results = '1'
row_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
if len(row_feed.entry) >= 1:
return Record(content=None, row_entry=row_feed.entry[0],
spreadsheet_key=self.spreadsheet_key,
worksheet_id=self.worksheet_id, database_client=self.client)
else:
return None
def GetRecords(self, start_row, end_row):
"""Gets all rows between the start and end row numbers inclusive.
Args:
start_row: str or int
end_row: str or int
Returns:
RecordResultSet for the desired rows.
"""
start_row = int(start_row)
end_row = int(end_row)
max_rows = end_row - start_row + 1
row_query = gdata.spreadsheet.service.ListQuery()
row_query.start_index = str(start_row)
row_query.max_results = str(max_rows)
rows_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
return RecordResultSet(rows_feed, self.client, self.spreadsheet_key,
self.worksheet_id)
def FindRecords(self, query_string):
"""Performs a query against the worksheet to find rows which match.
For details on query string syntax see the section on sq under
http://code.google.com/apis/spreadsheets/reference.html#list_Parameters
Args:
query_string: str Examples: 'name == john' to find all rows with john
in the name column, '(cost < 19.50 and name != toy) or cost > 500'
Returns:
RecordResultSet with the first group of matches.
"""
row_query = gdata.spreadsheet.service.ListQuery()
row_query.sq = query_string
matching_feed = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query)
return RecordResultSet(matching_feed, self.client,
self.spreadsheet_key, self.worksheet_id)
class RecordResultSet(list):
"""A collection of rows which allows fetching of the next set of results.
The server may not send all rows in the requested range because there are
too many. Using this result set you can access the first set of results
as if it is a list, then get the next batch (if there are more results) by
calling GetNext().
"""
def __init__(self, feed, client, spreadsheet_key, worksheet_id):
self.client = client
self.spreadsheet_key = spreadsheet_key
self.worksheet_id = worksheet_id
self.feed = feed
list(self)
for entry in self.feed.entry:
self.append(Record(content=None, row_entry=entry,
spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id,
database_client=client))
def GetNext(self):
"""Fetches the next batch of rows in the result set.
Returns:
A new RecordResultSet.
"""
next_link = self.feed.GetNextLink()
if next_link and next_link.href:
new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href,
converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
return RecordResultSet(new_feed, self.client, self.spreadsheet_key,
self.worksheet_id)
class Record(object):
"""Represents one row in a worksheet and provides a dictionary of values.
Attributes:
custom: dict Represents the contents of the row with cell values mapped
to column headers.
"""
def __init__(self, content=None, row_entry=None, spreadsheet_key=None,
worksheet_id=None, database_client=None):
"""Constructor for a record.
Args:
content: dict of strings Mapping of string values to column names.
row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry
representing this row in the worksheet.
spreadsheet_key: str The ID of the spreadsheet in which this row
belongs.
worksheet_id: str The ID of the worksheet in which this row belongs.
database_client: DatabaseClient The client which can be used to talk
the Google Spreadsheets server to edit this row.
"""
self.entry = row_entry
self.spreadsheet_key = spreadsheet_key
self.worksheet_id = worksheet_id
if row_entry:
self.row_id = row_entry.id.text.split('/')[-1]
else:
self.row_id = None
self.client = database_client
self.content = content or {}
if not content:
self.ExtractContentFromEntry(row_entry)
def ExtractContentFromEntry(self, entry):
"""Populates the content and row_id based on content of the entry.
This method is used in the Record's contructor.
Args:
entry: gdata.spreadsheet.SpreadsheetsList The Atom entry
representing this row in the worksheet.
"""
self.content = {}
if entry:
self.row_id = entry.id.text.split('/')[-1]
for label, custom in entry.custom.items():
self.content[label] = custom.text
def Push(self):
"""Send the content of the record to spreadsheets to edit the row.
All items in the content dictionary will be sent. Items which have been
removed from the content may remain in the row. The content member
of the record will not be modified so additional fields in the row
might be absent from this local copy.
"""
self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content)
def Pull(self):
"""Query Google Spreadsheets to get the latest data from the server.
Fetches the entry for this row and repopulates the content dictionary
with the data found in the row.
"""
if self.row_id:
self.entry = self.client._GetSpreadsheetsClient().GetListFeed(
self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id)
self.ExtractContentFromEntry(self.entry)
def Delete(self):
self.client._GetSpreadsheetsClient().DeleteRow(self.entry)
def ConvertStringsToColumnHeaders(proposed_headers):
"""Converts a list of strings to column names which spreadsheets accepts.
When setting values in a record, the keys which represent column names must
fit certain rules. They are all lower case, contain no spaces or special
characters. If two columns have the same name after being sanitized, the
columns further to the right have _2, _3 _4, etc. appended to them.
If there are column names which consist of all special characters, or if
the column header is blank, an obfuscated value will be used for a column
name. This method does not handle blank column names or column names with
only special characters.
"""
headers = []
for input_string in proposed_headers:
# TODO: probably a more efficient way to do this. Perhaps regex.
sanitized = input_string.lower().replace('_', '').replace(
':', '').replace(' ', '')
# When the same sanitized header appears multiple times in the first row
# of a spreadsheet, _n is appended to the name to make it unique.
header_count = headers.count(sanitized)
if header_count > 0:
headers.append('%s_%i' % (sanitized, header_count+1))
else:
headers.append(sanitized)
return headers
| |
#!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - UINT16
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT16 :
- unsigned
- size = 16
- range : [0, 1000]
Test cases :
------------
- UINT16 parameter min value = 0
- UINT16 parameter min value out of bounds = -1
- UINT16 parameter max value = 1000
- UINT16 parameter max value out of bounds = 1001
- UINT16 parameter in nominal case = 50
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT16 - range [0, 1000]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT16"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT16 in nominal case = 50
-----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT16 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing UINT16 minimal value = 0
--------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 parameter min value = 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 0
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("UINT16 parameter min value = 0")
value = "0"
hex_value = "0x0"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing UINT16 parameter value out of negative range
----------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT16 parameter min value out of bounds = -1")
value = "-1"
param_check = commands.getoutput('cat $PFW_RESULT/UINT16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing UINT16 parameter maximum value
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to 1000
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT16 parameter set to 1000
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("UINT16 parameter max value = 1000")
value = "1000"
hex_value = "0x3e8"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing UINT16 parameter value out of positive range
----------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT16 to 1001
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT16 parameter max value out of bounds = 1001")
value = "1001"
param_check = commands.getoutput('cat $PFW_RESULT/UINT16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import re
import shutil
import string
import yaml
from tempfile import mkdtemp
from bloom.git import branch_exists
from bloom.git import create_branch
from bloom.git import has_changes
from bloom.git import get_remotes
from bloom.git import get_root
from bloom.git import inbranch
from bloom.git import show
from bloom.git import track_branches
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
from bloom.logging import sanitize
from bloom.util import execute_command
from bloom.util import my_copytree
from bloom.util import get_distro_list_prompt
BLOOM_CONFIG_BRANCH = 'master'
PLACEHOLDER_FILE = 'CONTENT_MOVED_TO_{0}_BRANCH'.format(BLOOM_CONFIG_BRANCH.upper())
config_spec = {
'name': {
'<name>': 'Name of the repository (used in the archive name)',
'upstream': 'Default value, leave this as upstream if you are unsure'
},
'vcs_uri': {
'<uri>': '''\
Any valid URI. This variable can be templated, for example an svn url
can be templated as such: "https://svn.foo.com/foo/tags/foo-:{version}"
where the :{version} token will be replaced with the version for this release.\
'''
},
'vcs_type': {
'git': 'Upstream URI is a git repository',
'hg': 'Upstream URI is a hg repository',
'svn': 'Upstream URI is a svn repository',
'tar': 'Upstream URI is a tarball'
},
'version': {
':{auto}': '''\
This means the version will be guessed from the devel branch.
This means that the devel branch must be set, the devel branch must exist,
and there must be a valid package.xml in the upstream devel branch.''',
':{ask}': '''\
This means that the user will be prompted for the version each release.
This also means that the upstream devel will be ignored.''',
'<version>': '''\
This will be the version used.
It must be updated for each new upstream version.'''
},
'release_tag': {
':{version}': '''\
This means that the release tag will match the :{version} tag.
This can be further templated, for example: "foo-:{version}" or "v:{version}"
This can describe any vcs reference. For git that means {tag, branch, hash},
for hg that means {tag, branch, hash}, for svn that means a revision number.
For tar this value doubles as the sub directory (if the repository is
in foo/ of the tar ball, putting foo here will cause the contents of
foo/ to be imported to upstream instead of foo itself).
''',
':{ask}': '''\
This means the user will be prompted for the release tag on each release.
''',
':{none}': '''\
For svn and tar only you can set the release tag to :{none}, so that
it is ignored. For svn this means no revision number is used.
'''
},
'devel_branch': {
'<vcs reference>': '''\
Branch in upstream repository on which to search for the version.
This is used only when version is set to ':{auto}'.
''',
},
'ros_distro': {
'<ROS distro>': "This can be any valid ROS distro, e.g. %s" %
get_distro_list_prompt()
},
'patches': {
'<path in bloom branch>': '''\
This can be any valid relative path in the bloom branch. The contents
of this folder will be overlaid onto the upstream branch after each
import-upstream. Additionally, any package.xml files found in the
overlay will have the :{version} string replaced with the current
version being released.''',
':{none}': '''\
Use this if you want to disable overlaying of files.'''
},
'release_repo_url': {
'<url>': '''\
(optional) Used when pushing to remote release repositories. This is only
needed when the release uri which is in the rosdistro file is not writable.
This is useful, for example, when a releaser would like to use a ssh url
to push rather than a https:// url.
''',
':{none}': '''\
This indicates that the default release url should be used.
'''
}
}
class PromptEntry(object):
def __init__(self, name, default=None, values=None, prompt='', spec=None):
self.values = values
self.name = name
self.default = default
self.prompt = prompt
self.spec = spec
def __setattr__(self, key, value):
if key == 'default' and self.values:
if value not in self.values:
error(
"Invalid input '{0}' for '{1}', acceptable values: {2}."
.format(value, self.name, self.values),
exit=True
)
object.__setattr__(self, key, value)
def __str__(self):
msg = fmt('@_' + sanitize(self.name) + ':@|')
if self.spec is not None:
for key, val in self.spec.items():
msg += '\n ' + key
for line in val.splitlines():
msg += '\n ' + line
else:
msg += '\n ' + self.prompt
msg += '\n '
if self.default is None:
msg += fmt(" @![@{yf}None@|@!]@|: ")
else:
msg += fmt(" @!['@{yf}" + sanitize(self.default) + "@|@!']@|: ")
return msg
DEFAULT_TEMPLATE = {
'name': PromptEntry('Repository Name', spec=config_spec['name'], default='upstream'),
'vcs_uri': PromptEntry('Upstream Repository URI', spec=config_spec['vcs_uri']),
'vcs_type': PromptEntry(
'Upstream VCS Type', default='git', spec=config_spec['vcs_type'],
values=['git', 'hg', 'svn', 'tar']),
'version': PromptEntry('Version', default=':{auto}', spec=config_spec['version']),
'release_tag': PromptEntry('Release Tag', default=':{version}', spec=config_spec['release_tag']),
'devel_branch': PromptEntry('Upstream Devel Branch', spec=config_spec['devel_branch']),
'patches': PromptEntry('Patches Directory', spec=config_spec['patches']),
'ros_distro': PromptEntry('ROS Distro', default='indigo', spec=config_spec['ros_distro']),
'release_repo_url': PromptEntry('Release Repository Push URL', spec=config_spec['release_repo_url']),
'release_inc': -1,
'actions': [
'bloom-export-upstream :{vcs_local_uri} :{vcs_type}'
' --tag :{release_tag} --display-uri :{vcs_uri}'
' --name :{name} --output-dir :{archive_dir_path}',
'git-bloom-import-upstream :{archive_path} :{patches}'
' --release-version :{version} --replace',
'git-bloom-generate -y rosrelease :{ros_distro}'
' --source upstream -i :{release_inc}',
'git-bloom-generate -y rosdebian --prefix release/:{ros_distro}'
' :{ros_distro} -i :{release_inc}',
'git-bloom-generate -y rosrpm --prefix release/:{ros_distro}'
' :{ros_distro} -i :{release_inc}'
]
}
CUSTOM_TEMPLATE = {
'reference': ':{ask}',
'patches': ':{name}'
}
config_template = {
'third-party': CUSTOM_TEMPLATE,
None: {}
}
def verify_track(track_name, track):
upconvert_bloom_to_config_branch()
for entry in DEFAULT_TEMPLATE:
if entry not in track:
error("Track '{0}' is missing configuration ".format(track_name) +
"'{0}', it may be out of date, please run 'git-bloom-config edit {1}'."
.format(entry, track_name), exit=True)
class ConfigTemplate(string.Template):
delimiter = ':'
def template_str(line, settings):
t = ConfigTemplate(line)
return t.substitute(settings)
def write_tracks_dict_raw(tracks_dict, cmt_msg=None, directory=None):
upconvert_bloom_to_config_branch()
cmt_msg = cmt_msg if cmt_msg is not None else 'Modified tracks.yaml'
with inbranch(BLOOM_CONFIG_BRANCH):
with open('tracks.yaml', 'w') as f:
f.write(yaml.dump(tracks_dict, indent=2, default_flow_style=False))
execute_command('git add tracks.yaml', cwd=directory)
execute_command('git commit --allow-empty -m "{0}"'.format(cmt_msg),
cwd=directory)
version_regex = re.compile(r'^\d+\.\d+\.\d+$')
def validate_track_versions(tracks_dict):
for track in tracks_dict['tracks'].values():
if 'version' in track:
if track['version'] in [':{ask}', ':{auto}']:
continue
if version_regex.match(track['version']) is None:
raise ValueError(
"Invalid version '{0}', it must be formatted as 'MAJOR.MINOR.PATCH'"
.format(track['version']))
def get_tracks_dict_raw(directory=None):
upconvert_bloom_to_config_branch()
if not branch_exists(BLOOM_CONFIG_BRANCH):
info("Creating '{0}' branch.".format(BLOOM_CONFIG_BRANCH))
create_branch(BLOOM_CONFIG_BRANCH, orphaned=True, directory=directory)
tracks_yaml = show(BLOOM_CONFIG_BRANCH, 'tracks.yaml', directory=directory)
if not tracks_yaml:
write_tracks_dict_raw(
{'tracks': {}}, 'Initial tracks.yaml', directory=directory
)
tracks_yaml = show(BLOOM_CONFIG_BRANCH, 'tracks.yaml',
directory=directory)
tracks_dict = yaml.load(tracks_yaml)
validate_track_versions(tracks_dict)
return tracks_dict
_has_checked_bloom_branch = False
def check_for_multiple_remotes():
if get_root() is None:
return
remotes = get_remotes()
if len(remotes) < 0:
error("Current git repository has no remotes. "
"If you are running bloom-release, please change directories.",
exit=True)
if len(remotes) > 1:
error("Current git repository has multiple remotes. "
"If you are running bloom-release, please change directories.",
exit=True)
def upconvert_bloom_to_config_branch():
global _has_checked_bloom_branch
if _has_checked_bloom_branch:
return
# Assert that this repository does not have multiple remotes
check_for_multiple_remotes()
if get_root() is None:
# Not a git repository
return
track_branches(['bloom', BLOOM_CONFIG_BRANCH])
if show('bloom', PLACEHOLDER_FILE) is not None:
return
if show('bloom', 'bloom.conf') is not None:
# Wait for the bloom.conf upconvert...
return
if not branch_exists('bloom'):
return
_has_checked_bloom_branch = True
info("Moving configurations from deprecated 'bloom' branch "
"to the '{0}' branch.".format(BLOOM_CONFIG_BRANCH))
tmp_dir = mkdtemp()
git_root = get_root()
try:
# Copy the new upstream source into the temporary directory
with inbranch('bloom'):
ignores = ('.git', '.gitignore', '.svn', '.hgignore', '.hg', 'CVS')
configs = os.path.join(tmp_dir, 'configs')
my_copytree(git_root, configs, ignores)
if [x for x in os.listdir(os.getcwd()) if x not in ignores]:
execute_command('git rm -rf ./*')
with open(PLACEHOLDER_FILE, 'w') as f:
f.write("""\
This branch ('bloom') has been deprecated in favor of storing settings and overlay files in the master branch.
Please goto the master branch for anything which referenced the bloom branch.
You can delete this branch at your convenience.
""")
execute_command('git add ' + PLACEHOLDER_FILE)
if has_changes():
execute_command('git commit -m "DEPRECATING BRANCH"')
if not branch_exists(BLOOM_CONFIG_BRANCH):
info("Creating '{0}' branch.".format(BLOOM_CONFIG_BRANCH))
create_branch(BLOOM_CONFIG_BRANCH, orphaned=True)
with inbranch(BLOOM_CONFIG_BRANCH):
my_copytree(configs, git_root)
execute_command('git add ./*')
if has_changes():
execute_command('git commit -m '
'"Moving configs from bloom branch"')
finally:
# Clean up
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
| |
#! /usr/bin/env python2
import argparse
from RecBlastUtils import *
import csv_transformer
import taxa_to_taxid
from uuid import uuid4
import part_one
import part_two
import part_three
from RecBlastFigures import *
# this will be the stand alone version of RecBlast for linux.
__version__ = "1.1.2"
# flags:
# DEBUG = False
TAXA_ID_PROVIDED = False # if the user provides a file with a list of taxa id.
GENE_CSV_PROVIDED = False
# If false, we will take a file of taxa names
# BLAST PARAMS
# defaults
E_VALUE_THRESH = 1e-7
IDENTITY_THRESHOLD = 37
COVERAGE_THRESHOLD = 50
MAX_TARGET_SEQS = '1000000'
BACK_E_VALUE_THRESH = E_VALUE_THRESH # because it doesn't matter?
# BACK_E_VALUE_THRESH = 1e-15 # for the other direction
MAX_ATTEMPTS_TO_COMPLETE_REC_BLAST = 100
# CPU = 1
# fixed:
# OUTFMT = '6 staxids sseqid pident qcovs evalue sscinames sblastnames' # changed it to enable parsing
OUTFMT = '6 staxids sseqid pident qcovs evalue sscinames sblastnames qseqid'
ACCESSION_REGEX = re.compile(r'([A-Z0-9\._]+) ?')
DESCRIPTION_REGEX = re.compile(r'\([^)]*\)')
ORIGINAL_ID = 1 # start part_two from 0. change this when you want to start from mid-file
APP_CONTACT_EMAIL = "recblast@gmail.com"
Entrez.email = APP_CONTACT_EMAIL
Entrez.tool = "RecBlast"
# comparison
TEXTUAL_MATCH = 0.4
TEXTUAL_SEQ_MATCH = 0.99
####################
# parse arguments #
####################
parser = argparse.ArgumentParser()
parser.add_argument("origin_species", help="The species of origin (for which we start the blast)")
parser.add_argument("gene_file", help="A file containing a list of gene names to perform the reciprocal blast on.")
parser.add_argument("taxa_list_file", help="A file containing a list of taxa names to perform the reciprocal blast on. "
"They must not include the original taxon!")
parser.add_argument("--output_path", help="A folder in which to keep all RecBlast output.")
parser.add_argument("--gene_csv", help="This flag means the gene file provided is already a CSV file containing the "
"required genes as well as their description and uniprot id.",
action="store_true", default=False)
parser.add_argument("--run_name", help="The name the run will receive (will determine the folder names)")
parser.add_argument("--max_attempt_to_complete_recblast",
help="The maximum number of matches to perform the reciprocal blast on.",
default=MAX_ATTEMPTS_TO_COMPLETE_REC_BLAST)
# blast parameters
parser.add_argument("--num_threads", help="The number of threads (CPU) dedicated for parallel blast run.",
default=1, type=int)
parser.add_argument("--evalue", help="The e-value threshold for matches of the first blast.", default=E_VALUE_THRESH)
parser.add_argument("--evalue_back", help="The e-value threshold for matches of the second blast.",
default=BACK_E_VALUE_THRESH)
parser.add_argument("--identity", help="The minimum identity required for blast matches.",
default=IDENTITY_THRESHOLD)
parser.add_argument("--coverage", help="The minimum query and hit coverage required for blast matches.",
default=COVERAGE_THRESHOLD)
parser.add_argument("--max_seqs", help="The maximum number of sequences reported by blast.", default=MAX_TARGET_SEQS)
parser.add_argument("--db_first_run", help="The path to the BLASTP database for the first run (should be NR).")
parser.add_argument("--target_db", help="The path to the BLASTP database for the second run "
"(protein database of the species of origin)")
parser.add_argument("--string_similarity", help="The string similarity value for comparing the gene names/descriptions",
default=TEXTUAL_MATCH)
parser.add_argument("--run_even_if_no_db_found", help="Performs a heavy reciprocal blast. "
"Not recommended in most cases. See documentation for use cases.",
action="store_true")
parser.add_argument("--run_all", help="Runs BLAST on all sequences together instead of running separately."
"Recommended mostly in small runs in machines with large available RAM. "
"Not recommended in most cases. See documentation for use cases.",
action="store_true", default=False) # added to enable case 2
parser.add_argument("--keep_files", help="Keeps intermediate files after completion", action="store_true",
default=False)
parser.add_argument("--try_uniprot", help="Looks for the sequences in UniProt too", action="store_true",
default=False)
parser.add_argument("--skip_first_blast", help="Skips the first blast run (and runs files from the"
"local folder. Use this if your run was interrupted and you wish to "
"continue it from the second blast phase.", action="store_true",
default=False)
parser.add_argument("--skip_second_blast", help="Skips the first AND second blast runs (and runs files from the"
"local folder). Use this if your run was interrupted and you wish to "
"continue it from the analysis stage of the second blast.",
action="store_true", default=False)
parser.add_argument("--debug", help="Adds debug prints in various stages of the run.", action="store_true",
default=False)
parser.add_argument("-v", "--version", help="Prints version in formation.", action="store_true",
default=False)
args = parser.parse_args()
# DEBUG flags
DEBUG = args.debug
def debug(s):
return debug_s(s, DEBUG)
if args.version:
print("RecBlast stand alone version: {}".format(__version__))
print("All rights reserved to Efrat Rapoport and Moran Neuhof, 2016")
exit(1)
# DATA VALIDATION, and local preparation of paths:
# making sure the files we received exist and are not empty:
if exists_not_empty(args.gene_file):
debug("The gene file {} exists and not empty".format(args.gene_file))
else:
print("gene_file {} does not exist or is empty!".format(args.gene_file))
exit(1)
if exists_not_empty(args.taxa_list_file):
debug("The taxa list file {} exists and not empty".format(args.taxa_list_file))
else:
print("taxa_list_file {} does not exist or is empty!".format(args.taxa_list_file))
exit(1)
# locating BLASTP path on your system
BLASTP_PATH = "Not valid"
try:
BLASTP_PATH = strip(subprocess.check_output(["which", "blastp"], universal_newlines=True))
debug("BLASTP found in {}".format(BLASTP_PATH))
except subprocess.CalledProcessError:
print("No BLASTP found. Please check install blast properly or make sure it's in $PATH. Aborting.")
exit(1)
CPU = args.num_threads
RUN_ALL = args.run_all
# script folder
SCRIPT_FOLDER = os.path.dirname(os.path.abspath(__file__))
# defining run folder
base_folder = os.getcwd() # current folder
if args.run_name:
run_name = args.run_name
if args.output_path:
run_folder = args.output_path # assigning run_folder (default)
else:
run_folder = join_folder(base_folder, run_name)
else:
run_name = str(uuid4()) # randomly assigned run_name
run_folder = join_folder(base_folder, run_name)
create_folder_if_needed(run_folder) # creating the folder
# creating the rest of the folders:
# folders:
FIRST_BLAST_FOLDER = join_folder(run_folder, "first_blast")
create_folder_if_needed(FIRST_BLAST_FOLDER) # creating the folder
SECOND_BLAST_FOLDER = join_folder(run_folder, "second_blast")
create_folder_if_needed(SECOND_BLAST_FOLDER) # creating the folder
FASTA_PATH = join_folder(run_folder, "fasta_path")
create_folder_if_needed(FASTA_PATH)
FASTA_OUTPUT_FOLDER = join_folder(run_folder, "fasta_output")
create_folder_if_needed(FASTA_OUTPUT_FOLDER)
# CSV_OUTPUT_FILENAME = join_folder(run_folder, "output_table.csv")
CSV_RBH_OUTPUT_FILENAME = join_folder(run_folder, "output_table_RBH.csv")
CSV_STRICT_OUTPUT_FILENAME = join_folder(run_folder, "output_table_strict.csv")
CSV_NS_OUTPUT_FILENAME = join_folder(run_folder, "output_table_non-strict.csv")
# Decide on taxa input:
TAX_DB = join_folder(SCRIPT_FOLDER, "DB/taxdump/tax_names.txt")
# database location
DB_FOLDER = join_folder(SCRIPT_FOLDER, "DB")
# parsing and creating taxa files and parameters:
tax_name_dict = taxa_to_taxid.create_tax_dict(TAX_DB)
tax_id_dict = dict((v, k) for k, v in tax_name_dict.iteritems()) # the reverse dict
# processing original species
if is_number(args.origin_species): # already a tax_id
ORG_TAX_ID = args.origin_species
try:
ORIGIN_SPECIES = tax_id_dict[args.origin_species]
debug("Tax id given: {0}, which is {1}, no need to convert.".format(ORG_TAX_ID, ORIGIN_SPECIES))
except KeyError:
print("Unknown tax id for the reference species: {}!".format(ORG_TAX_ID))
ORIGIN_SPECIES = ""
exit(1)
else: # it's a tax name
ORIGIN_SPECIES = args.origin_species
# convert it to tax id
try:
ORG_TAX_ID = tax_name_dict[ORIGIN_SPECIES]
debug("Tax name given: {0}, which is tax_id {1}, converted.".format(ORIGIN_SPECIES, ORG_TAX_ID))
except KeyError:
print("Unknown tax name for the reference species: {}!".format(ORIGIN_SPECIES))
ORG_TAX_ID = ""
exit(1)
# parsing the genes_csv if it's a csv, and transforming it if it's a gene list file
if args.gene_csv:
CSV_PATH = args.gene_file # hope it's a good and valid file...
print("Provided CSV with {} genes". format(file_len(CSV_PATH) - 1))
else: # if the csv is not provided, create it from a gene file
CSV_PATH = csv_transformer.gene_file_to_csv(remove_commas(args.gene_file), ORG_TAX_ID, try_uniprot=args.try_uniprot)
# quits if no input could be converted
print("Generated CSV with {} genes". format(file_len(CSV_PATH) - 1))
# validating the taxa list files
# converting taxa names list
(TAXA_LIST_FILE, bad_tax_list, good_tax_list) = taxa_to_taxid.convert_tax_to_taxid(tax_name_dict, tax_id_dict,
args.taxa_list_file, ORIGIN_SPECIES,
ORG_TAX_ID)
if len(bad_tax_list) > 0:
print("Bad taxa names found in the file provided:")
print("\n".join(bad_tax_list))
print("Ignoring them.")
debug("Converted tax list to tax ID files, saved new list in: {}".format(TAXA_LIST_FILE))
# processing DB information:
#############################
# forward DB:
DB = "Not valid" # default value. Doesn't matter at this point
if args.db_first_run: # if the user provided a DB
DB = args.db_first_run
else: # find the nr DB yourself
BLASTDB_PATH = ""
try:
BLASTDB_PATH = os.environ["BLASTDB"]
# BLASTDB_PATH = strip(subprocess.check_output(["echo", "$BLASTDB"], universal_newlines=True))
# doesn't work all the time
if BLASTDB_PATH == "": # depends on the system
blastdb_exit() # quits and sends a message
# except subprocess.CalledProcessError:
except KeyError:
blastdb_exit()
print("Found $BLASTDB path in {}".format(BLASTDB_PATH))
# check if nr exists on the local system
if exists_not_empty(join_folder(BLASTDB_PATH, "nr.00.phd")):
DB = join_folder(BLASTDB_PATH, "nr")
else:
print("Could not find the nr protein database in your $BLASTDB folder ({}).".format(BLASTDB_PATH))
print("This means one of the following:\n"
"1. You have the database but it's somewhere else.\n"
" If you do, please change the $BLASTDB variable to the nr location on your machine.\n"
"2. You don't have the nr database at all.\n"
" If that's the case, please install it using this simple script: update_blastdb.pl\n"
" The script should already be installed as it comes with the Blast+ installation.\n"
"Then run RecBlast again.")
exit(1)
# target db (database of the original species)
if args.target_db: # if the user provided
TARGET_DB = args.target_db
else:
TARGET_DB_FOLDER = join_folder(DB_FOLDER, ORG_TAX_ID) # this is where our db should be
# check if it exists - if so use it as a db
if os.path.exists(TARGET_DB_FOLDER):
TARGET_DB = join_folder(TARGET_DB_FOLDER, 'db')
print "{} already has a local version of BLASTP DB!" .format(ORG_TAX_ID)
else: # if not create an alias
print("No local version of {} database exists. Creating a subset now.".format(ORG_TAX_ID))
gi_file = join_folder(run_folder, "taxon_gi_file_list.txt") # the path to the new file
TARGET_DB = subset_db(ORG_TAX_ID, gi_file, DB_FOLDER, DB, args.run_even_if_no_db_found, DEBUG, debug)
# skipping the first and the second blasts:
if args.skip_second_blast:
run_first_blast = False
run_second_blast = False
elif args.skip_first_blast:
run_first_blast = False
run_second_blast = True
else:
run_first_blast = True
run_second_blast = True
#################
# Run main code #
#################
print "Welcome to RecBlast."
print "Run {0} started at: {1}".format(run_name, strftime('%H:%M:%S'))
# part 1:
print("starting to perform part_one.py")
id_dic, blast1_output_files, local_id_dic = part_one.main(CSV_PATH, APP_CONTACT_EMAIL, run_folder, FASTA_PATH,
FIRST_BLAST_FOLDER, FASTA_OUTPUT_FOLDER, BLASTP_PATH, DB,
TAXA_LIST_FILE, OUTFMT, MAX_TARGET_SEQS, E_VALUE_THRESH,
COVERAGE_THRESHOLD, CPU, RUN_ALL, DEBUG, debug,
run_first_blast)
print("BLASTP part 1 done!")
print("*******************")
# part 2:
second_blast_for_ids_dict, blast2_output_files, blast2_gene_id_paths = part_two.main(local_id_dic, FIRST_BLAST_FOLDER,
SECOND_BLAST_FOLDER, ORIGINAL_ID,
E_VALUE_THRESH, IDENTITY_THRESHOLD,
COVERAGE_THRESHOLD,
ACCESSION_REGEX, run_folder,
BLASTP_PATH, TARGET_DB, OUTFMT,
MAX_TARGET_SEQS,
BACK_E_VALUE_THRESH, CPU,
ORG_TAX_ID, RUN_ALL, DEBUG, debug,
run_second_blast,
input_list=blast1_output_files)
print("BLASTP part 2 done!")
print("*******************")
# part 3:
if part_three.main(SECOND_BLAST_FOLDER, BACK_E_VALUE_THRESH, IDENTITY_THRESHOLD, COVERAGE_THRESHOLD, TEXTUAL_MATCH,
TEXTUAL_SEQ_MATCH, ORIGIN_SPECIES, ACCESSION_REGEX,DESCRIPTION_REGEX, run_folder,
MAX_ATTEMPTS_TO_COMPLETE_REC_BLAST, CSV_RBH_OUTPUT_FILENAME, CSV_STRICT_OUTPUT_FILENAME,
CSV_NS_OUTPUT_FILENAME, FASTA_OUTPUT_FOLDER, DEBUG, debug, good_tax_list, id_dic,
second_blast_for_ids_dict, blast2_gene_id_paths):
print("part 3 done!")
print("*******************")
# Visual output:
try:
debug("Creating images:")
image_paths = generate_visual_graphs(CSV_RBH_OUTPUT_FILENAME, CSV_STRICT_OUTPUT_FILENAME, CSV_NS_OUTPUT_FILENAME)
debug("Image paths saved!")
print("Printed all viz files to folder {}".format(run_folder))
except Exception, e:
print "Exception occurred while running the visualization part: {}".format(e)
# Zip results:
try:
debug("Zipping results:")
files_to_zip = [CSV_RBH_OUTPUT_FILENAME, CSV_STRICT_OUTPUT_FILENAME, CSV_NS_OUTPUT_FILENAME] + image_paths.values()
zip_output_path = zip_results(FASTA_OUTPUT_FOLDER, files_to_zip, run_folder)
print("saved zip output to: {}".format(zip_output_path))
except Exception, e:
print "Exception occurred while running zip on output files: {}".format(e)
# cleaning:
if not DEBUG and not args.keep_files:
if cleanup(run_folder, base_folder, run_name):
print("Files archived, compressed and cleaned.")
print("Program done.")
| |
"""Factory function to initialize KNX devices from config."""
from __future__ import annotations
from xknx import XKNX
from xknx.devices import (
BinarySensor as XknxBinarySensor,
Climate as XknxClimate,
ClimateMode as XknxClimateMode,
Cover as XknxCover,
Device as XknxDevice,
Fan as XknxFan,
Light as XknxLight,
Notification as XknxNotification,
Scene as XknxScene,
Sensor as XknxSensor,
Switch as XknxSwitch,
Weather as XknxWeather,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME, CONF_TYPE
from homeassistant.helpers.typing import ConfigType
from .const import KNX_ADDRESS, ColorTempModes, SupportedPlatforms
from .schema import (
BinarySensorSchema,
ClimateSchema,
CoverSchema,
FanSchema,
LightSchema,
SceneSchema,
SensorSchema,
SwitchSchema,
WeatherSchema,
)
def create_knx_device(
platform: SupportedPlatforms,
knx_module: XKNX,
config: ConfigType,
) -> XknxDevice:
"""Return the requested XKNX device."""
if platform is SupportedPlatforms.LIGHT:
return _create_light(knx_module, config)
if platform is SupportedPlatforms.COVER:
return _create_cover(knx_module, config)
if platform is SupportedPlatforms.CLIMATE:
return _create_climate(knx_module, config)
if platform is SupportedPlatforms.SWITCH:
return _create_switch(knx_module, config)
if platform is SupportedPlatforms.SENSOR:
return _create_sensor(knx_module, config)
if platform is SupportedPlatforms.NOTIFY:
return _create_notify(knx_module, config)
if platform is SupportedPlatforms.SCENE:
return _create_scene(knx_module, config)
if platform is SupportedPlatforms.BINARY_SENSOR:
return _create_binary_sensor(knx_module, config)
if platform is SupportedPlatforms.WEATHER:
return _create_weather(knx_module, config)
if platform is SupportedPlatforms.FAN:
return _create_fan(knx_module, config)
def _create_cover(knx_module: XKNX, config: ConfigType) -> XknxCover:
"""Return a KNX Cover device to be used within XKNX."""
return XknxCover(
knx_module,
name=config[CONF_NAME],
group_address_long=config.get(CoverSchema.CONF_MOVE_LONG_ADDRESS),
group_address_short=config.get(CoverSchema.CONF_MOVE_SHORT_ADDRESS),
group_address_stop=config.get(CoverSchema.CONF_STOP_ADDRESS),
group_address_position_state=config.get(
CoverSchema.CONF_POSITION_STATE_ADDRESS
),
group_address_angle=config.get(CoverSchema.CONF_ANGLE_ADDRESS),
group_address_angle_state=config.get(CoverSchema.CONF_ANGLE_STATE_ADDRESS),
group_address_position=config.get(CoverSchema.CONF_POSITION_ADDRESS),
travel_time_down=config[CoverSchema.CONF_TRAVELLING_TIME_DOWN],
travel_time_up=config[CoverSchema.CONF_TRAVELLING_TIME_UP],
invert_position=config[CoverSchema.CONF_INVERT_POSITION],
invert_angle=config[CoverSchema.CONF_INVERT_ANGLE],
device_class=config.get(CONF_DEVICE_CLASS),
)
def _create_light_color(
color: str, config: ConfigType
) -> tuple[str | None, str | None, str | None, str | None]:
"""Load color configuration from configuration structure."""
if "individual_colors" in config and color in config["individual_colors"]:
sub_config = config["individual_colors"][color]
group_address_switch = sub_config.get(KNX_ADDRESS)
group_address_switch_state = sub_config.get(LightSchema.CONF_STATE_ADDRESS)
group_address_brightness = sub_config.get(LightSchema.CONF_BRIGHTNESS_ADDRESS)
group_address_brightness_state = sub_config.get(
LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
)
return (
group_address_switch,
group_address_switch_state,
group_address_brightness,
group_address_brightness_state,
)
return None, None, None, None
def _create_light(knx_module: XKNX, config: ConfigType) -> XknxLight:
"""Return a KNX Light device to be used within XKNX."""
group_address_tunable_white = None
group_address_tunable_white_state = None
group_address_color_temp = None
group_address_color_temp_state = None
if config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.ABSOLUTE:
group_address_color_temp = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)
group_address_color_temp_state = config.get(
LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS
)
elif config[LightSchema.CONF_COLOR_TEMP_MODE] == ColorTempModes.RELATIVE:
group_address_tunable_white = config.get(LightSchema.CONF_COLOR_TEMP_ADDRESS)
group_address_tunable_white_state = config.get(
LightSchema.CONF_COLOR_TEMP_STATE_ADDRESS
)
(
red_switch,
red_switch_state,
red_brightness,
red_brightness_state,
) = _create_light_color(LightSchema.CONF_RED, config)
(
green_switch,
green_switch_state,
green_brightness,
green_brightness_state,
) = _create_light_color(LightSchema.CONF_GREEN, config)
(
blue_switch,
blue_switch_state,
blue_brightness,
blue_brightness_state,
) = _create_light_color(LightSchema.CONF_BLUE, config)
(
white_switch,
white_switch_state,
white_brightness,
white_brightness_state,
) = _create_light_color(LightSchema.CONF_WHITE, config)
return XknxLight(
knx_module,
name=config[CONF_NAME],
group_address_switch=config.get(KNX_ADDRESS),
group_address_switch_state=config.get(LightSchema.CONF_STATE_ADDRESS),
group_address_brightness=config.get(LightSchema.CONF_BRIGHTNESS_ADDRESS),
group_address_brightness_state=config.get(
LightSchema.CONF_BRIGHTNESS_STATE_ADDRESS
),
group_address_color=config.get(LightSchema.CONF_COLOR_ADDRESS),
group_address_color_state=config.get(LightSchema.CONF_COLOR_STATE_ADDRESS),
group_address_rgbw=config.get(LightSchema.CONF_RGBW_ADDRESS),
group_address_rgbw_state=config.get(LightSchema.CONF_RGBW_STATE_ADDRESS),
group_address_tunable_white=group_address_tunable_white,
group_address_tunable_white_state=group_address_tunable_white_state,
group_address_color_temperature=group_address_color_temp,
group_address_color_temperature_state=group_address_color_temp_state,
group_address_switch_red=red_switch,
group_address_switch_red_state=red_switch_state,
group_address_brightness_red=red_brightness,
group_address_brightness_red_state=red_brightness_state,
group_address_switch_green=green_switch,
group_address_switch_green_state=green_switch_state,
group_address_brightness_green=green_brightness,
group_address_brightness_green_state=green_brightness_state,
group_address_switch_blue=blue_switch,
group_address_switch_blue_state=blue_switch_state,
group_address_brightness_blue=blue_brightness,
group_address_brightness_blue_state=blue_brightness_state,
group_address_switch_white=white_switch,
group_address_switch_white_state=white_switch_state,
group_address_brightness_white=white_brightness,
group_address_brightness_white_state=white_brightness_state,
min_kelvin=config[LightSchema.CONF_MIN_KELVIN],
max_kelvin=config[LightSchema.CONF_MAX_KELVIN],
)
def _create_climate(knx_module: XKNX, config: ConfigType) -> XknxClimate:
"""Return a KNX Climate device to be used within XKNX."""
climate_mode = XknxClimateMode(
knx_module,
name=f"{config[CONF_NAME]} Mode",
group_address_operation_mode=config.get(
ClimateSchema.CONF_OPERATION_MODE_ADDRESS
),
group_address_operation_mode_state=config.get(
ClimateSchema.CONF_OPERATION_MODE_STATE_ADDRESS
),
group_address_controller_status=config.get(
ClimateSchema.CONF_CONTROLLER_STATUS_ADDRESS
),
group_address_controller_status_state=config.get(
ClimateSchema.CONF_CONTROLLER_STATUS_STATE_ADDRESS
),
group_address_controller_mode=config.get(
ClimateSchema.CONF_CONTROLLER_MODE_ADDRESS
),
group_address_controller_mode_state=config.get(
ClimateSchema.CONF_CONTROLLER_MODE_STATE_ADDRESS
),
group_address_operation_mode_protection=config.get(
ClimateSchema.CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS
),
group_address_operation_mode_night=config.get(
ClimateSchema.CONF_OPERATION_MODE_NIGHT_ADDRESS
),
group_address_operation_mode_comfort=config.get(
ClimateSchema.CONF_OPERATION_MODE_COMFORT_ADDRESS
),
group_address_operation_mode_standby=config.get(
ClimateSchema.CONF_OPERATION_MODE_STANDBY_ADDRESS
),
group_address_heat_cool=config.get(ClimateSchema.CONF_HEAT_COOL_ADDRESS),
group_address_heat_cool_state=config.get(
ClimateSchema.CONF_HEAT_COOL_STATE_ADDRESS
),
operation_modes=config.get(ClimateSchema.CONF_OPERATION_MODES),
controller_modes=config.get(ClimateSchema.CONF_CONTROLLER_MODES),
)
return XknxClimate(
knx_module,
name=config[CONF_NAME],
group_address_temperature=config[ClimateSchema.CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
ClimateSchema.CONF_TARGET_TEMPERATURE_ADDRESS
),
group_address_target_temperature_state=config[
ClimateSchema.CONF_TARGET_TEMPERATURE_STATE_ADDRESS
],
group_address_setpoint_shift=config.get(
ClimateSchema.CONF_SETPOINT_SHIFT_ADDRESS
),
group_address_setpoint_shift_state=config.get(
ClimateSchema.CONF_SETPOINT_SHIFT_STATE_ADDRESS
),
setpoint_shift_mode=config[ClimateSchema.CONF_SETPOINT_SHIFT_MODE],
setpoint_shift_max=config[ClimateSchema.CONF_SETPOINT_SHIFT_MAX],
setpoint_shift_min=config[ClimateSchema.CONF_SETPOINT_SHIFT_MIN],
temperature_step=config[ClimateSchema.CONF_TEMPERATURE_STEP],
group_address_on_off=config.get(ClimateSchema.CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(ClimateSchema.CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(ClimateSchema.CONF_MIN_TEMP),
max_temp=config.get(ClimateSchema.CONF_MAX_TEMP),
mode=climate_mode,
on_off_invert=config[ClimateSchema.CONF_ON_OFF_INVERT],
create_temperature_sensors=config[
ClimateSchema.CONF_CREATE_TEMPERATURE_SENSORS
],
)
def _create_switch(knx_module: XKNX, config: ConfigType) -> XknxSwitch:
"""Return a KNX switch to be used within XKNX."""
return XknxSwitch(
knx_module,
name=config[CONF_NAME],
group_address=config[KNX_ADDRESS],
group_address_state=config.get(SwitchSchema.CONF_STATE_ADDRESS),
invert=config[SwitchSchema.CONF_INVERT],
)
def _create_sensor(knx_module: XKNX, config: ConfigType) -> XknxSensor:
"""Return a KNX sensor to be used within XKNX."""
return XknxSensor(
knx_module,
name=config[CONF_NAME],
group_address_state=config[SensorSchema.CONF_STATE_ADDRESS],
sync_state=config[SensorSchema.CONF_SYNC_STATE],
always_callback=config[SensorSchema.CONF_ALWAYS_CALLBACK],
value_type=config[CONF_TYPE],
)
def _create_notify(knx_module: XKNX, config: ConfigType) -> XknxNotification:
"""Return a KNX notification to be used within XKNX."""
return XknxNotification(
knx_module,
name=config[CONF_NAME],
group_address=config[KNX_ADDRESS],
)
def _create_scene(knx_module: XKNX, config: ConfigType) -> XknxScene:
"""Return a KNX scene to be used within XKNX."""
return XknxScene(
knx_module,
name=config[CONF_NAME],
group_address=config[KNX_ADDRESS],
scene_number=config[SceneSchema.CONF_SCENE_NUMBER],
)
def _create_binary_sensor(knx_module: XKNX, config: ConfigType) -> XknxBinarySensor:
"""Return a KNX binary sensor to be used within XKNX."""
device_name = config[CONF_NAME]
return XknxBinarySensor(
knx_module,
name=device_name,
group_address_state=config[BinarySensorSchema.CONF_STATE_ADDRESS],
invert=config[BinarySensorSchema.CONF_INVERT],
sync_state=config[BinarySensorSchema.CONF_SYNC_STATE],
device_class=config.get(CONF_DEVICE_CLASS),
ignore_internal_state=config[BinarySensorSchema.CONF_IGNORE_INTERNAL_STATE],
context_timeout=config.get(BinarySensorSchema.CONF_CONTEXT_TIMEOUT),
reset_after=config.get(BinarySensorSchema.CONF_RESET_AFTER),
)
def _create_weather(knx_module: XKNX, config: ConfigType) -> XknxWeather:
"""Return a KNX weather device to be used within XKNX."""
return XknxWeather(
knx_module,
name=config[CONF_NAME],
sync_state=config[WeatherSchema.CONF_SYNC_STATE],
create_sensors=config[WeatherSchema.CONF_KNX_CREATE_SENSORS],
group_address_temperature=config[WeatherSchema.CONF_KNX_TEMPERATURE_ADDRESS],
group_address_brightness_south=config.get(
WeatherSchema.CONF_KNX_BRIGHTNESS_SOUTH_ADDRESS
),
group_address_brightness_east=config.get(
WeatherSchema.CONF_KNX_BRIGHTNESS_EAST_ADDRESS
),
group_address_brightness_west=config.get(
WeatherSchema.CONF_KNX_BRIGHTNESS_WEST_ADDRESS
),
group_address_brightness_north=config.get(
WeatherSchema.CONF_KNX_BRIGHTNESS_NORTH_ADDRESS
),
group_address_wind_speed=config.get(WeatherSchema.CONF_KNX_WIND_SPEED_ADDRESS),
group_address_wind_bearing=config.get(
WeatherSchema.CONF_KNX_WIND_BEARING_ADDRESS
),
group_address_rain_alarm=config.get(WeatherSchema.CONF_KNX_RAIN_ALARM_ADDRESS),
group_address_frost_alarm=config.get(
WeatherSchema.CONF_KNX_FROST_ALARM_ADDRESS
),
group_address_wind_alarm=config.get(WeatherSchema.CONF_KNX_WIND_ALARM_ADDRESS),
group_address_day_night=config.get(WeatherSchema.CONF_KNX_DAY_NIGHT_ADDRESS),
group_address_air_pressure=config.get(
WeatherSchema.CONF_KNX_AIR_PRESSURE_ADDRESS
),
group_address_humidity=config.get(WeatherSchema.CONF_KNX_HUMIDITY_ADDRESS),
)
def _create_fan(knx_module: XKNX, config: ConfigType) -> XknxFan:
"""Return a KNX Fan device to be used within XKNX."""
fan = XknxFan(
knx_module,
name=config[CONF_NAME],
group_address_speed=config.get(KNX_ADDRESS),
group_address_speed_state=config.get(FanSchema.CONF_STATE_ADDRESS),
group_address_oscillation=config.get(FanSchema.CONF_OSCILLATION_ADDRESS),
group_address_oscillation_state=config.get(
FanSchema.CONF_OSCILLATION_STATE_ADDRESS
),
max_step=config.get(FanSchema.CONF_MAX_STEP),
)
return fan
| |
""" test parquet compat """
import pytest
import datetime
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import PY3, is_platform_windows, is_platform_mac
from pandas.io.parquet import (to_parquet, read_parquet, get_engine,
PyArrowImpl, FastParquetImpl)
from pandas.util import testing as tm
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
# setup engines & skips
@pytest.fixture(params=[
pytest.param('fastparquet',
marks=pytest.mark.skipif(not _HAVE_FASTPARQUET,
reason='fastparquet is '
'not installed')),
pytest.param('pyarrow',
marks=pytest.mark.skipif(not _HAVE_PYARROW,
reason='pyarrow is '
'not installed'))])
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return 'pyarrow'
@pytest.fixture
def pa_lt_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
pytest.skip("pyarrow is >= 0.7.0")
return 'pyarrow'
@pytest.fixture
def pa_ge_070():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
return 'pyarrow'
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return 'fastparquet'
@pytest.fixture
def fp_lt_014():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
pytest.skip("fastparquet is >= 0.1.4")
return 'fastparquet'
@pytest.fixture
def df_compat():
return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('20130101', periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
})
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{'string': list('abc'),
'string_with_nan': ['a', np.nan, 'c'],
'string_with_none': ['a', None, 'c'],
'bytes': [b'foo', b'bar', b'baz'],
'unicode': [u'foo', u'bar', u'baz'],
'int': list(range(1, 4)),
'uint': np.arange(3, 6).astype('u1'),
'float': np.arange(4.0, 7.0, dtype='float64'),
'float_with_nan': [2., np.nan, 3.],
'bool': [True, False, True],
'datetime': pd.date_range('20130101', periods=3),
'datetime_with_nat': [pd.Timestamp('20130101'),
pd.NaT,
pd.Timestamp('20130103')]})
def check_round_trip(df, engine=None, path=None,
write_kwargs=None, read_kwargs=None,
expected=None, check_names=True,
repeat=2):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {'compression': None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs['engine'] = engine
read_kwargs['engine'] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(expected, actual,
check_names=check_names)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, 'foo', 'bar')
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'pyarrow'):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context('io.parquet.engine', 'fastparquet'):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context('io.parquet.engine', 'auto'):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'pyarrow'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'fastparquet'):
assert isinstance(get_engine('auto'), FastParquetImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
with pd.option_context('io.parquet.engine', 'auto'):
assert isinstance(get_engine('auto'), PyArrowImpl)
assert isinstance(get_engine('pyarrow'), PyArrowImpl)
assert isinstance(get_engine('fastparquet'), FastParquetImpl)
@pytest.mark.xfail(is_platform_windows() or is_platform_mac(),
reason="reading pa metadata failing on Windows/mac")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=['a', 'd'])
tm.assert_frame_equal(result, df[['a', 'd']])
class Base(object):
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# unicode
df.columns = [u'foo', u'bar']
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
if PY3:
# bytes on PY3, on PY2 these are str
df.columns = [b'foo', b'bar']
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1)]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli'])
def test_compression(self, engine, compression):
if compression == 'snappy':
pytest.importorskip('snappy')
elif compression == 'brotli':
pytest.importorskip('brotli')
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={'compression': compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4))})
expected = pd.DataFrame({'string': list('abc')})
check_round_trip(df, engine, expected=expected,
read_kwargs={'columns': ['string']})
def test_write_index(self, engine):
check_names = engine != 'fastparquet'
if engine == 'pyarrow':
import pyarrow
if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'):
pytest.skip("pyarrow is < 0.7.0")
df = pd.DataFrame({'A': [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range('20130101', periods=3),
list('abc'),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
check_round_trip(df, engine)
def test_write_multiindex(self, pa_ge_070):
# Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version
engine = pa_ge_070
df = pd.DataFrame({'A': [1, 2, 3]})
index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa_ge_070):
engine = pa_ge_070
dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS')
df = pd.DataFrame(np.random.randn(2 * len(dates), 3),
columns=list('ABC'))
index1 = pd.MultiIndex.from_product(
[['Level1', 'Level2'], dates],
names=['level', 'date'])
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(df, engine, read_kwargs={'columns': ['A', 'B']},
expected=df[['A', 'B']])
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
import pyarrow
if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
df['bool_with_none'] = [True, None, True]
check_round_trip(df, pa)
@pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)")
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='Europe/Brussels')
check_round_trip(df, pa, expected=df[['string', 'int']],
read_kwargs={'columns': ['string', 'int']})
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, pa, ValueError)
# timedelta
df = pd.DataFrame({'a': pd.timedelta_range('1 day',
periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, pa, ValueError)
def test_categorical(self, pa_ge_070):
pa = pa_ge_070
# supported in >= 0.7.0
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
# de-serialized as object
expected = df.assign(a=df.a.astype(object))
check_round_trip(df, pa, expected=expected)
def test_categorical_unsupported(self, pa_lt_070):
pa = pa_lt_070
# supported in >= 0.7.0
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
self.check_error_on_write(df, pa, NotImplementedError)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa,
path='s3://pandas-test/pyarrow.parquet')
class TestParquetFastParquet(Base):
def test_basic(self, fp, df_full):
df = df_full
# additional supported types for fastparquet
if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'):
df['datetime_tz'] = pd.date_range('20130101', periods=3,
tz='US/Eastern')
df['timedelta'] = pd.timedelta_range('1 day', periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({'a': [True, None, False]})
expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16')
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"):
pytest.skip("CategoricalDtype not supported for older fp")
df = pd.DataFrame({'a': pd.Categorical(list('abc'))})
check_round_trip(df, fp)
def test_datetime_tz(self, fp_lt_014):
# fastparquet<0.1.4 doesn't preserve tz
df = pd.DataFrame({'a': pd.date_range('20130101', periods=3,
tz='US/Eastern')})
# warns on the coercion
with catch_warnings(record=True):
check_round_trip(df, fp_lt_014,
expected=df.astype('datetime64[ns]'))
def test_filter_row_groups(self, fp):
d = {'a': list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None,
row_group_offsets=1)
result = read_parquet(path, fp, filters=[('a', '==', 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp,
path='s3://pandas-test/fastparquet.parquet')
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the library page."""
import json
import logging
import string
from constants import constants
from core.domain import acl_decorators
from core.controllers import base
from core.domain import collection_services
from core.domain import exp_services
from core.domain import summary_services
from core.domain import user_services
from core.platform import models
import feconf
import utils
(base_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration])
current_user_services = models.Registry.import_current_user_services()
def get_matching_activity_dicts(query_string, search_cursor):
"""Given a query string and a search cursor, returns a list of activity
dicts that satisfy the search query.
"""
# We only populate collections in the initial load, since the current
# frontend search infrastructure is set up to only deal with one search
# cursor at a time.
# TODO(sll): Remove this special casing.
collection_ids = []
if not search_cursor:
collection_ids, _ = (
collection_services.get_collection_ids_matching_query(
query_string))
exp_ids, new_search_cursor = (
exp_services.get_exploration_ids_matching_query(
query_string, cursor=search_cursor))
activity_list = []
activity_list = (
summary_services.get_displayable_collection_summary_dicts_matching_ids(
collection_ids))
activity_list += (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
if len(activity_list) == feconf.DEFAULT_QUERY_LIMIT:
logging.error(
'%s activities were fetched to load the library page. '
'You may be running up against the default query limits.'
% feconf.DEFAULT_QUERY_LIMIT)
return activity_list, new_search_cursor
class LibraryPage(base.BaseHandler):
"""The main library page. Used for both the default list of categories and
for search results.
"""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
search_mode = 'search' in self.request.url
if search_mode:
page_mode = feconf.LIBRARY_PAGE_MODE_SEARCH
else:
page_mode = feconf.LIBRARY_PAGE_MODE_INDEX
self.values.update({
'meta_description': (
feconf.SEARCH_PAGE_DESCRIPTION if search_mode
else feconf.LIBRARY_PAGE_DESCRIPTION),
'nav_mode': feconf.NAV_MODE_LIBRARY,
'has_fully_registered': bool(
self.user_id and
user_services.has_fully_registered(self.user_id)),
'LANGUAGE_CODES_AND_NAMES': (
utils.get_all_language_codes_and_names()),
'page_mode': page_mode,
'SEARCH_DROPDOWN_CATEGORIES': feconf.SEARCH_DROPDOWN_CATEGORIES,
})
self.render_template('pages/library/library.html')
class LibraryIndexHandler(base.BaseHandler):
"""Provides data for the default library index page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
# TODO(sll): Support index pages for other language codes.
summary_dicts_by_category = summary_services.get_library_groups([
constants.DEFAULT_LANGUAGE_CODE])
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FOR_LIBRARY_PAGE))
featured_activity_summary_dicts = (
summary_services.get_featured_activity_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE]))
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
if top_rated_activity_summary_dicts:
summary_dicts_by_category.insert(0, {
'activity_summary_dicts': top_rated_activity_summary_dicts,
'categories': [],
'header_i18n_id': (
feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS),
'has_full_results_page': True,
'full_results_url': feconf.LIBRARY_TOP_RATED_URL,
'protractor_id': 'top-rated',
})
if featured_activity_summary_dicts:
summary_dicts_by_category.insert(0, {
'activity_summary_dicts': featured_activity_summary_dicts,
'categories': [],
'header_i18n_id': feconf.LIBRARY_CATEGORY_FEATURED_ACTIVITIES,
'has_full_results_page': False,
'full_results_url': None,
})
self.values.update({
'activity_summary_dicts_by_category': (
summary_dicts_by_category),
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class LibraryGroupPage(base.BaseHandler):
"""The page for displaying top rated and recently published explorations.
"""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.values.update({
'meta_description': (
feconf.LIBRARY_GROUP_PAGE_DESCRIPTION),
'nav_mode': feconf.NAV_MODE_LIBRARY,
'has_fully_registered': bool(
self.user_id and
user_services.has_fully_registered(self.user_id)),
'LANGUAGE_CODES_AND_NAMES': (
utils.get_all_language_codes_and_names()),
'page_mode': feconf.LIBRARY_PAGE_MODE_GROUP,
'SEARCH_DROPDOWN_CATEGORIES': feconf.SEARCH_DROPDOWN_CATEGORIES,
})
self.render_template('pages/library/library.html')
class LibraryGroupIndexHandler(base.BaseHandler):
"""Provides data for categories such as top rated and recently published."""
@acl_decorators.open_access
def get(self):
"""Handles GET requests for group pages."""
# TODO(sll): Support index pages for other language codes.
group_name = self.request.get('group_name')
activity_list = []
header_i18n_id = ''
if group_name == feconf.LIBRARY_GROUP_RECENTLY_PUBLISHED:
recently_published_summary_dicts = (
summary_services.get_recently_published_exp_summary_dicts(
feconf.RECENTLY_PUBLISHED_QUERY_LIMIT_FULL_PAGE))
if recently_published_summary_dicts:
activity_list = recently_published_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_RECENTLY_PUBLISHED
elif group_name == feconf.LIBRARY_GROUP_TOP_RATED:
top_rated_activity_summary_dicts = (
summary_services.get_top_rated_exploration_summary_dicts(
[constants.DEFAULT_LANGUAGE_CODE],
feconf.NUMBER_OF_TOP_RATED_EXPLORATIONS_FULL_PAGE))
if top_rated_activity_summary_dicts:
activity_list = top_rated_activity_summary_dicts
header_i18n_id = feconf.LIBRARY_CATEGORY_TOP_RATED_EXPLORATIONS
else:
return self.PageNotFoundException
preferred_language_codes = [constants.DEFAULT_LANGUAGE_CODE]
if self.user_id:
user_settings = user_services.get_user_settings(self.user_id)
preferred_language_codes = user_settings.preferred_language_codes
self.values.update({
'activity_list': activity_list,
'header_i18n_id': header_i18n_id,
'preferred_language_codes': preferred_language_codes,
})
self.render_json(self.values)
class SearchHandler(base.BaseHandler):
"""Provides data for activity search results."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
query_string = utils.unescape_encoded_uri_component(
self.request.get('q'))
# Remove all punctuation from the query string, and replace it with
# spaces. See http://stackoverflow.com/a/266162 and
# http://stackoverflow.com/a/11693937
remove_punctuation_map = dict(
(ord(char), None) for char in string.punctuation)
query_string = query_string.translate(remove_punctuation_map)
if self.request.get('category'):
query_string += ' category=%s' % self.request.get('category')
if self.request.get('language_code'):
query_string += ' language_code=%s' % self.request.get(
'language_code')
search_cursor = self.request.get('cursor', None)
activity_list, new_search_cursor = get_matching_activity_dicts(
query_string, search_cursor)
self.values.update({
'activity_list': activity_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
class LibraryRedirectPage(base.BaseHandler):
"""An old 'gallery' page that should redirect to the library index page."""
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
self.redirect('/library')
class ExplorationSummariesHandler(base.BaseHandler):
"""Returns summaries corresponding to ids of public explorations. This
controller supports returning private explorations for the given user.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
try:
exp_ids = json.loads(self.request.get('stringified_exp_ids'))
except Exception:
raise self.PageNotFoundException
include_private_exps_str = self.request.get(
'include_private_explorations')
include_private_exps = (
include_private_exps_str.lower() == 'true'
if include_private_exps_str else False)
editor_user_id = self.user_id if include_private_exps else None
if not editor_user_id:
include_private_exps = False
if (not isinstance(exp_ids, list) or not all([
isinstance(exp_id, basestring) for exp_id in exp_ids])):
raise self.PageNotFoundException
if include_private_exps:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids, user=self.user))
else:
summaries = (
summary_services.get_displayable_exp_summary_dicts_matching_ids(
exp_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
class CollectionSummariesHandler(base.BaseHandler):
"""Returns collection summaries corresponding to collection ids.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
try:
collection_ids = json.loads(
self.request.get('stringified_collection_ids'))
except Exception:
raise self.PageNotFoundException
summaries = (
summary_services.get_displayable_collection_summary_dicts_matching_ids( # pylint: disable=line-too-long
collection_ids))
self.values.update({
'summaries': summaries
})
self.render_json(self.values)
| |
#!/usr/bin/env python
import os
import hashlib
import sys
import json
import time
################################################################################
# Overview
################################################################################
# Cashier hashes a directory. Any modification to file contents, or directory
# structure is reflected in the hash (SHA1).
#
# Cashier produces .cash_file json files in sub-directories with contents:
# - modification time
# - hash of subdirectory contents
# - namehash of subdirectory structure (file names)
#
# These files allow for quick iterative hashing.
#
# The final produced hash is SHA1(namehash + hash) of the root directory.
#
########################################
# Usage
########################################
# $ cashier.py path_to_root_directory [clean]
# - where the clean option removes all .cash_file's
#
########################################
# Algorithm
########################################
#
# Let cash_i be a tuple where:
# i = a directory or file
# cash_i_mtime = latest modification time in subdirectory
# cash_i_hash = hash of all contents of subdirectory
# cash_i_namehash = hash of (dir/filename + namehash) for all subdirectories
#
# As a convention: cash_i where i is a file becomes:
# cash_i_mtime = latest modification time of file
# cash_i_hash = SHA1(contents of file)
# cash_i_namehash = SHA1(filename)
#
# Then we compute cash_i with the following recurrence given subdir(i) is
# a list of immediate children of i sorted lexicographically.
#
# cash_i_mtime = max(cash_u_mtime for all u in subdir(i))
# cash_i_hash = SHA1(concat(cash_u_hash for all u in subdir(i)))
# cash_i_namehash = SHA1(concat(u_name+cash_u_namehash for all u in subdir(i)))
#
# Then output = SHA1(cash_r_namehash + cash_r_hash) where r is the root dir
#
# Efficiency comes from pruning of subdirs using modification time for
# detection. Only subdirs containing a newer mod time or a differing namehash
# require recomputation of content hashes.
#
################################################################################
if len(sys.argv) < 2:
print 'Usage: cashier.py path_to_root_dir [clean]'
sys.exit(0)
rootDir = sys.argv[1]
if not os.path.exists(rootDir):
print 'Error: %s does not exist.' % rootDir
sys.exit(0)
shouldClean = False
if len(sys.argv) > 2 and sys.argv[2] == 'clean':
shouldClean = True
CASH_FILE_NAME = ".cash_file"
FILE_DIRNAME_PLACEHOLDER = "FILE"
class CashFile:
def __init__(self, dirName, hash=None, mtime=None, namehash=None):
self.stats = {}
self.dirName = dirName
self.stats['hash'] = hash
self.stats['mtime'] = mtime
self.stats['namehash'] = namehash
def writeCashFile(self):
cash_path = os.path.join(self.dirName, CASH_FILE_NAME)
with open(cash_path, 'w') as f:
f.write(json.dumps(self.stats) + '\n')
def setHash(self, hashValue):
self.stats['hash'] = hashValue
def getHash(self):
return self.stats['hash']
def setMTime(self, mtime):
self.stats['mtime'] = mtime
def getMTime(self):
return self.stats['mtime']
def setNameHash(self, nameHashValue):
self.stats['namehash'] = nameHashValue
def getNameHash(self):
return self.stats['namehash']
@staticmethod
def loadCashFile(dirName):
cashFile = CashFile(dirName)
cash_path = os.path.join(dirName, CASH_FILE_NAME)
if not os.path.isfile(cash_path):
return None
with open(cash_path, 'r') as f:
cashFile.stats = json.loads(f.read())
return cashFile
@staticmethod
def combineCashFiles(dirName, listCashFiles):
combinedCashFile = CashFile(dirName)
maxTime = 0
m = hashlib.sha1()
mnamehash = hashlib.sha1()
for cashFile in listCashFiles:
m.update(cashFile.getHash())
mnamehash.update(cashFile.getNameHash())
maxTime = max(maxTime, cashFile.getMTime())
combinedCashFile.stats['mtime'] = maxTime
combinedCashFile.stats['hash'] = m.hexdigest()
combinedCashFile.stats['namehash'] = mnamehash.hexdigest()
return combinedCashFile
@staticmethod
def hashFile(filePath):
if not os.path.isfile(filePath):
return None
m = hashlib.sha1()
with open(filePath, 'rb') as f:
m.update(f.read())
return m.hexdigest()
@staticmethod
def hashNames(listCashFiles):
m = hashlib.sha1()
for cashFile in listCashFiles:
m.update(os.path.basename(cashFile.dirName).lower()) # file or dir name
m.update(cashFile.getNameHash()) # subdir name hash
return m.hexdigest()
currentCashFile = None
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
if shouldClean:
os.system('rm %s' % os.path.join(dirName, CASH_FILE_NAME))
continue
currentCashFile = CashFile.loadCashFile(dirName)
filteredSubDirList = sorted(filter(lambda x: not x.startswith('.'),
subdirList))
filteredFileList = sorted(filter(lambda x: not x.startswith('.'), fileList))
if not currentCashFile:
currentCashFile = CashFile(dirName, "", 0)
# Directories first
cashDirList = []
for subdir in filteredSubDirList:
subdirPath = os.path.join(dirName, subdir)
if os.path.islink(subdirPath):
continue
tempCashFile = CashFile.loadCashFile(subdirPath)
if tempCashFile:
cashDirList.append(tempCashFile)
else:
sys.stderr.write('Error! Sub-directory [%s] contains no files.\n' %
subdirPath)
cashFileList = []
# Then files (ignores hidden files)
for fn in filteredFileList:
fp = os.path.join(dirName, fn)
if os.path.islink(fp) or not os.path.isfile(fp):
continue
fpMTime = os.path.getmtime(fp)
fileCashFile = CashFile(fp, None, fpMTime, fn.lower())
cashFileList.append(fileCashFile)
totalCashList = cashDirList + cashFileList
# Check if we need to update
needsUpdate = False
for cashFile in totalCashList:
if cashFile.getMTime() > currentCashFile.getMTime():
needsUpdate = True
break
newNameHash = CashFile.hashNames(totalCashList)
if newNameHash != currentCashFile.getNameHash():
needsUpdate = True
# No update needed, keep on going
if not needsUpdate:
continue
# Populate file content hashes
for cashfile in cashFileList:
cashfile.setHash(CashFile.hashFile(cashfile.dirName))
currentCashFile = CashFile.combineCashFiles(dirName, totalCashList)
# Manually set namehash for now
currentCashFile.setNameHash(newNameHash)
currentCashFile.writeCashFile()
# Final combined hash(namehash + contents hash)
print hashlib.sha1(currentCashFile.getNameHash() +
currentCashFile.getHash()).hexdigest()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.